diff --git a/app.yaml b/app.yaml deleted file mode 100644 index 65c69dc..0000000 --- a/app.yaml +++ /dev/null @@ -1,3 +0,0 @@ -entrypoint: n -env: flex -runtime: python diff --git a/app/app.yaml b/app/app.yaml new file mode 100644 index 0000000..c3254ed --- /dev/null +++ b/app/app.yaml @@ -0,0 +1,21 @@ +runtime: python +env: flex + +runtime_config: + python_version: 3 +api_version: 1 +threadsafe: true + +entrypoint: gunicorn -b :$PORT main:app + +env_variables: + # Replace user, password, database, and instance connection name with the values obtained + # when configuring your Cloud SQL instance. + SQLALCHEMY_DATABASE_URI: >- + postgresql+psycopg2://postgres:SoftwareEngineering!420@/35.184.149.32/boswe?host=/cloudsql/boswemian-rhapsody:us-central1:boswe +beta_settings: + cloud_sql_instances: boswemian-rhapsody:us-central1:boswe + +handlers: +- url: /.* + script: main.app diff --git a/app/appengine_config.py b/app/appengine_config.py new file mode 100644 index 0000000..71abeed --- /dev/null +++ b/app/appengine_config.py @@ -0,0 +1,4 @@ +from google.appengine.ext import vendor + +# Add any libraries installed in the "lib" folder. +vendor.add('lib') \ No newline at end of file diff --git a/app/cloud_sql_proxy b/app/cloud_sql_proxy new file mode 100755 index 0000000..c8f2c8d Binary files /dev/null and b/app/cloud_sql_proxy differ diff --git a/app/lib/Flask-0.12.1.dist-info/DESCRIPTION.rst b/app/lib/Flask-0.12.1.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..239dbda --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,46 @@ +Flask +----- + +Flask is a microframework for Python based on Werkzeug, Jinja 2 and good +intentions. And before you ask: It's BSD licensed! + +Flask is Fun +```````````` + +Save in a hello.py: + +.. code:: python + + from flask import Flask + app = Flask(__name__) + + @app.route("/") + def hello(): + return "Hello World!" + + if __name__ == "__main__": + app.run() + +And Easy to Setup +````````````````` + +And run it: + +.. code:: bash + + $ pip install Flask + $ python hello.py + * Running on http://localhost:5000/ + + Ready for production? `Read this first `. + +Links +````` + +* `website `_ +* `documentation `_ +* `development version + `_ + + + diff --git a/app/lib/Flask-0.12.1.dist-info/LICENSE.txt b/app/lib/Flask-0.12.1.dist-info/LICENSE.txt new file mode 100644 index 0000000..a7da10e --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/LICENSE.txt @@ -0,0 +1,33 @@ +Copyright (c) 2015 by Armin Ronacher and contributors. See AUTHORS +for more details. + +Some rights reserved. + +Redistribution and use in source and binary forms of the software as well +as documentation, with or without modification, are permitted provided +that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT +NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/app/lib/Flask-0.12.1.dist-info/METADATA b/app/lib/Flask-0.12.1.dist-info/METADATA new file mode 100644 index 0000000..56a5a10 --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/METADATA @@ -0,0 +1,75 @@ +Metadata-Version: 2.0 +Name: Flask +Version: 0.12.1 +Summary: A microframework based on Werkzeug, Jinja2 and good intentions +Home-page: http://github.com/pallets/flask/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: any +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Dist: Jinja2 (>=2.4) +Requires-Dist: Werkzeug (>=0.7) +Requires-Dist: click (>=2.0) +Requires-Dist: itsdangerous (>=0.21) + +Flask +----- + +Flask is a microframework for Python based on Werkzeug, Jinja 2 and good +intentions. And before you ask: It's BSD licensed! + +Flask is Fun +```````````` + +Save in a hello.py: + +.. code:: python + + from flask import Flask + app = Flask(__name__) + + @app.route("/") + def hello(): + return "Hello World!" + + if __name__ == "__main__": + app.run() + +And Easy to Setup +````````````````` + +And run it: + +.. code:: bash + + $ pip install Flask + $ python hello.py + * Running on http://localhost:5000/ + + Ready for production? `Read this first `. + +Links +````` + +* `website `_ +* `documentation `_ +* `development version + `_ + + + diff --git a/app/lib/Flask-0.12.1.dist-info/RECORD b/app/lib/Flask-0.12.1.dist-info/RECORD new file mode 100644 index 0000000..a7e9077 --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/RECORD @@ -0,0 +1,51 @@ +Flask-0.12.1.dist-info/DESCRIPTION.rst,sha256=DmJm8IBlBjl3wkm0Ly23jYvWbvK_mCuE5oUseYCijbI,810 +Flask-0.12.1.dist-info/LICENSE.txt,sha256=hLgKluMRHSnxG-L0EmrqjmKgG5cHlff6pIh3rCNINeI,1582 +Flask-0.12.1.dist-info/METADATA,sha256=SG7efIuuoa51GNPZ0gD4PnB-X-US_yk7xlKBkoMAnAI,1948 +Flask-0.12.1.dist-info/RECORD,, +Flask-0.12.1.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +Flask-0.12.1.dist-info/entry_points.txt,sha256=jzk2Wy2h30uEcqqzd4CVnlzsMXB-vaD5GXjuPMXmTmI,60 +Flask-0.12.1.dist-info/metadata.json,sha256=6nWCwJZ8_KE2QOFkfrcgOc7vfLsJVJie2GP6eFRdQG0,1389 +Flask-0.12.1.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6 +flask/__init__.py,sha256=PyjiUdcIkBrx6NAW441uD4d2lsjHgGPyh96TBRvzZ0w,1673 +flask/__main__.py,sha256=cldbNi5zpjE68XzIWI8uYHNWwBHHVJmwtlXWk6P4CO4,291 +flask/_compat.py,sha256=VlfjUuLjufsTHJIjr_ZsnnOesSbAXIslBBgRe5tfOok,2802 +flask/app.py,sha256=6DPjtb5jUJWgL5fXksG5boA49EB3l-k9pWyftitbNNk,83169 +flask/blueprints.py,sha256=6HVasMcPcaq7tk36kCrgX4bnhTkky4G5WIWCyyJL8HY,16872 +flask/cli.py,sha256=2NXEdCOu5-4ymklxX4Lf6bjb-89I4VHYeP6xScR3i8E,18328 +flask/config.py,sha256=Ym5Jenyu6zAZ1fdVLeKekY9-EsKmq8183qnRgauwCMY,9905 +flask/ctx.py,sha256=UPA0YwoIlHP0txOGanC9lQLSGv6eCqV5Fmw2cVJRmgQ,14739 +flask/debughelpers.py,sha256=z-uQavKIymOZl0WQDLXsnacA00ERIlCx3S3Tnb_OYsE,6024 +flask/exthook.py,sha256=SvXs5jwpcOjogwJ7SNquiWTxowoN1-MHFoqAejWnk2o,5762 +flask/globals.py,sha256=I3m_4RssLhWW1R11zuEI8oFryHUHX3NQwjMkGXOZzg8,1645 +flask/helpers.py,sha256=LK4CQkN0LViGYH4FP5GLsS74SwedsHz4zO5awHOtzaQ,38449 +flask/json.py,sha256=1zPM-NPLiWoOfGd0P14FxnEkeKtjtUZxMC9pyYyDBYI,9183 +flask/logging.py,sha256=UG-77jPkRClk9w1B-_ArjjXPuj9AmZz9mG0IRGvptW0,2751 +flask/sessions.py,sha256=QBKXVYKJ-HKbx9m6Yb5yan_EPq84a5yevVLgAzNKFQY,14394 +flask/signals.py,sha256=MfZk5qTRj_R_O3aGYlTEnx2g3SvlZncz8Ii73eKK59g,2209 +flask/templating.py,sha256=u7FbN6j56H_q6CrdJJyJ6gZtqaMa0vh1_GP12gEHRQQ,4912 +flask/testing.py,sha256=II8EO_NjOT1LvL8Hh_SdIFL_BdlwVPcB9yot5pbltxE,5630 +flask/views.py,sha256=6OPv7gwu3h14JhqpeeMRWwrxoGHsUr4_nOGSyTRAxAI,5630 +flask/wrappers.py,sha256=1S_5mmuA1Tlx7D9lXV6xMblrg-PdAauNWahe-henMEE,7612 +flask/ext/__init__.py,sha256=UEezCApsG4ZJWqwUnX9YmWcNN4OVENgph_9L05n0eOM,842 +/var/folders/z7/wt4t05zs4q743qnrmn9c0wt80000gn/T/tmpecgcxziq/bin/flask,sha256=rgY52XXrY6_puZAlhZpvDMtH1XYpBdo649SRncGNcTs,256 +flask/__pycache__/signals.cpython-34.pyc,, +flask/__pycache__/wrappers.cpython-34.pyc,, +flask/__pycache__/_compat.cpython-34.pyc,, +flask/__pycache__/__init__.cpython-34.pyc,, +flask/__pycache__/ctx.cpython-34.pyc,, +flask/__pycache__/views.cpython-34.pyc,, +flask/__pycache__/cli.cpython-34.pyc,, +flask/__pycache__/logging.cpython-34.pyc,, +flask/__pycache__/__main__.cpython-34.pyc,, +flask/__pycache__/exthook.cpython-34.pyc,, +flask/__pycache__/json.cpython-34.pyc,, +flask/__pycache__/blueprints.cpython-34.pyc,, +flask/ext/__pycache__/__init__.cpython-34.pyc,, +flask/__pycache__/sessions.cpython-34.pyc,, +flask/__pycache__/debughelpers.cpython-34.pyc,, +flask/__pycache__/config.cpython-34.pyc,, +flask/__pycache__/testing.cpython-34.pyc,, +flask/__pycache__/app.cpython-34.pyc,, +flask/__pycache__/helpers.cpython-34.pyc,, +flask/__pycache__/globals.cpython-34.pyc,, +flask/__pycache__/templating.cpython-34.pyc,, diff --git a/app/lib/Flask-0.12.1.dist-info/WHEEL b/app/lib/Flask-0.12.1.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/Flask-0.12.1.dist-info/entry_points.txt b/app/lib/Flask-0.12.1.dist-info/entry_points.txt new file mode 100644 index 0000000..14adf18 --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/entry_points.txt @@ -0,0 +1,4 @@ + + [console_scripts] + flask=flask.cli:main + \ No newline at end of file diff --git a/app/lib/Flask-0.12.1.dist-info/metadata.json b/app/lib/Flask-0.12.1.dist-info/metadata.json new file mode 100644 index 0000000..e854c06 --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.commands": {"wrap_console": {"flask": "flask.cli:main"}}, "python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/pallets/flask/"}}, "python.exports": {"console_scripts": {"flask": "flask.cli:main"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask", "platform": "any", "run_requires": [{"requires": ["Jinja2 (>=2.4)", "Werkzeug (>=0.7)", "click (>=2.0)", "itsdangerous (>=0.21)"]}], "summary": "A microframework based on Werkzeug, Jinja2 and good intentions", "version": "0.12.1"} \ No newline at end of file diff --git a/app/lib/Flask-0.12.1.dist-info/top_level.txt b/app/lib/Flask-0.12.1.dist-info/top_level.txt new file mode 100644 index 0000000..7e10602 --- /dev/null +++ b/app/lib/Flask-0.12.1.dist-info/top_level.txt @@ -0,0 +1 @@ +flask diff --git a/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/PKG-INFO b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/PKG-INFO new file mode 100644 index 0000000..34ff3bc --- /dev/null +++ b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/PKG-INFO @@ -0,0 +1,43 @@ +Metadata-Version: 1.1 +Name: Flask-Restless +Version: 0.17.0 +Summary: A Flask extension for easy ReSTful API generation +Home-page: http://github.com/jfinkels/flask-restless +Author: Jeffrey Finkelstein +Author-email: jeffrey.finkelstein@gmail.com +License: GNU AGPLv3+ or BSD +Download-URL: http://pypi.python.org/pypi/Flask-Restless +Description: + Flask-Restless + ~~~~~~~~~~~~~~ + + Flask-Restless is a `Flask `_ extension which + facilitates the creation of ReSTful JSON APIs. It is compatible with models + which have been defined using `SQLAlchemy `_ or + `FLask-SQLAlchemy `_. + + For more information, check the World Wide Web! + + * `Documentation `_ + * `PyPI listing `_ + * `Source code repository `_ + + +Keywords: ReST,API,Flask +Platform: any +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Topic :: Database :: Front-Ends +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/SOURCES.txt b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/SOURCES.txt new file mode 100644 index 0000000..9bc472e --- /dev/null +++ b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/SOURCES.txt @@ -0,0 +1,14 @@ +README +setup.cfg +setup.py +Flask_Restless.egg-info/PKG-INFO +Flask_Restless.egg-info/SOURCES.txt +Flask_Restless.egg-info/dependency_links.txt +Flask_Restless.egg-info/not-zip-safe +Flask_Restless.egg-info/requires.txt +Flask_Restless.egg-info/top_level.txt +flask_restless/__init__.py +flask_restless/helpers.py +flask_restless/manager.py +flask_restless/search.py +flask_restless/views.py \ No newline at end of file diff --git a/src/lib/wtforms/ext/appengine/__init__.py b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/dependency_links.txt similarity index 100% rename from src/lib/wtforms/ext/appengine/__init__.py rename to app/lib/Flask_Restless-0.17.0-py3.4.egg-info/dependency_links.txt diff --git a/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/installed-files.txt b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/installed-files.txt new file mode 100644 index 0000000..7ba8994 --- /dev/null +++ b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/installed-files.txt @@ -0,0 +1,17 @@ +../flask_restless/__init__.py +../flask_restless/helpers.py +../flask_restless/manager.py +../flask_restless/search.py +../flask_restless/views.py +../flask_restless/__pycache__/__init__.cpython-34.pyc +../flask_restless/__pycache__/helpers.cpython-34.pyc +../flask_restless/__pycache__/manager.cpython-34.pyc +../flask_restless/__pycache__/search.cpython-34.pyc +../flask_restless/__pycache__/views.cpython-34.pyc +./ +dependency_links.txt +not-zip-safe +PKG-INFO +requires.txt +SOURCES.txt +top_level.txt diff --git a/src/lib/wtforms/ext/i18n/__init__.py b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/not-zip-safe similarity index 100% rename from src/lib/wtforms/ext/i18n/__init__.py rename to app/lib/Flask_Restless-0.17.0-py3.4.egg-info/not-zip-safe diff --git a/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/requires.txt b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/requires.txt new file mode 100644 index 0000000..632b7fa --- /dev/null +++ b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/requires.txt @@ -0,0 +1,4 @@ +flask>=0.10 +sqlalchemy>=0.8 +python-dateutil>2.0 +mimerender>=0.5.2 diff --git a/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/top_level.txt b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/top_level.txt new file mode 100644 index 0000000..ba5d48c --- /dev/null +++ b/app/lib/Flask_Restless-0.17.0-py3.4.egg-info/top_level.txt @@ -0,0 +1 @@ +flask_restless diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/DESCRIPTION.rst b/app/lib/Flask_SQLAlchemy-2.2.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..c9db5b5 --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/DESCRIPTION.rst @@ -0,0 +1,15 @@ + +Flask-SQLAlchemy +---------------- + +Adds SQLAlchemy support to your Flask application. + +Links +````` + +* `documentation `_ +* `development version + `_ + + + diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/LICENSE.txt b/app/lib/Flask_SQLAlchemy-2.2.dist-info/LICENSE.txt new file mode 100644 index 0000000..49fcac3 --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright (c) 2014 by Armin Ronacher. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/METADATA b/app/lib/Flask_SQLAlchemy-2.2.dist-info/METADATA new file mode 100644 index 0000000..bdbad4a --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/METADATA @@ -0,0 +1,42 @@ +Metadata-Version: 2.0 +Name: Flask-SQLAlchemy +Version: 2.2 +Summary: Adds SQLAlchemy support to your Flask application +Home-page: http://github.com/mitsuhiko/flask-sqlalchemy +Author: Phil Howell +Author-email: phil@quae.co.uk +License: BSD +Platform: any +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Requires-Dist: Flask (>=0.10) +Requires-Dist: SQLAlchemy (>=0.8.0) + + +Flask-SQLAlchemy +---------------- + +Adds SQLAlchemy support to your Flask application. + +Links +````` + +* `documentation `_ +* `development version + `_ + + + diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/RECORD b/app/lib/Flask_SQLAlchemy-2.2.dist-info/RECORD new file mode 100644 index 0000000..761bba7 --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/RECORD @@ -0,0 +1,11 @@ +Flask_SQLAlchemy-2.2.dist-info/DESCRIPTION.rst,sha256=Mp4bpckSjf082xflOARFwzWLTnUszq7JxcY0dR9vD2w,273 +Flask_SQLAlchemy-2.2.dist-info/LICENSE.txt,sha256=2smrI3hNiP6c5TcX0fa6fqODgsdJVLC166X0kVxei9A,1457 +Flask_SQLAlchemy-2.2.dist-info/METADATA,sha256=hIgP0kudClmQDnErllZMdlp_WjajcBmlILbSKzVMovI,1348 +Flask_SQLAlchemy-2.2.dist-info/RECORD,, +Flask_SQLAlchemy-2.2.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113 +Flask_SQLAlchemy-2.2.dist-info/metadata.json,sha256=mU6wJVocytpfiF75Utnlrh36Z2ZzFeF9h7oTVtiTy50,1219 +Flask_SQLAlchemy-2.2.dist-info/top_level.txt,sha256=w2K4fNNoTh4HItoFfz2FRQShSeLcvHYrzU_sZov21QU,17 +flask_sqlalchemy/__init__.py,sha256=U0HkvloC9KPZpgT8zzLQEyRpLAhVkG5AUt_HzrzTVeY,36641 +flask_sqlalchemy/_compat.py,sha256=wVdOzefs6BI6Fi5mu21YctjQjrkIDdkPfgsWkye1v-4,623 +flask_sqlalchemy/__pycache__/_compat.cpython-34.pyc,, +flask_sqlalchemy/__pycache__/__init__.cpython-34.pyc,, diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/WHEEL b/app/lib/Flask_SQLAlchemy-2.2.dist-info/WHEEL new file mode 100644 index 0000000..7bf9daa --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0.a0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/metadata.json b/app/lib/Flask_SQLAlchemy-2.2.dist-info/metadata.json new file mode 100644 index 0000000..49f19aa --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "extensions": {"python.details": {"contacts": [{"email": "phil@quae.co.uk", "name": "Phil Howell", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/mitsuhiko/flask-sqlalchemy"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-SQLAlchemy", "platform": "any", "run_requires": [{"requires": ["Flask (>=0.10)", "SQLAlchemy (>=0.8.0)"]}], "summary": "Adds SQLAlchemy support to your Flask application", "version": "2.2"} \ No newline at end of file diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/top_level.txt b/app/lib/Flask_SQLAlchemy-2.2.dist-info/top_level.txt new file mode 100644 index 0000000..8a5538e --- /dev/null +++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/top_level.txt @@ -0,0 +1 @@ +flask_sqlalchemy diff --git a/app/lib/Jinja2-2.9.6.dist-info/DESCRIPTION.rst b/app/lib/Jinja2-2.9.6.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..4421f04 --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/DESCRIPTION.rst @@ -0,0 +1,36 @@ +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: http://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/app/lib/Jinja2-2.9.6.dist-info/LICENSE.txt b/app/lib/Jinja2-2.9.6.dist-info/LICENSE.txt new file mode 100644 index 0000000..10145a2 --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/LICENSE.txt @@ -0,0 +1,31 @@ +Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. + +Some rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/app/lib/Jinja2-2.9.6.dist-info/METADATA b/app/lib/Jinja2-2.9.6.dist-info/METADATA new file mode 100644 index 0000000..ea69de1 --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/METADATA @@ -0,0 +1,65 @@ +Metadata-Version: 2.0 +Name: Jinja2 +Version: 2.9.6 +Summary: A small but fast and easy to use stand-alone template engine written in pure python. +Home-page: http://jinja.pocoo.org/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Dist: MarkupSafe (>=0.23) +Provides-Extra: i18n +Requires-Dist: Babel (>=0.8); extra == 'i18n' + +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: http://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/app/lib/Jinja2-2.9.6.dist-info/RECORD b/app/lib/Jinja2-2.9.6.dist-info/RECORD new file mode 100644 index 0000000..4c81f8d --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/RECORD @@ -0,0 +1,58 @@ +jinja2/__init__.py,sha256=Cx_UnJO4i_GqvKQsOu__mvGE_eMJSsBqITa26irtg5A,2565 +jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596 +jinja2/_stringdefs.py,sha256=PYtqTmmWIhjXlFBoH-eE6fJkQvlu7nxUyQ2YlFB97VA,589381 +jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144 +jinja2/asyncsupport.py,sha256=ZJO1Fdd9R93sDLrk6TZNuMQGgtuDmpTlENNRkLwZF7c,7765 +jinja2/bccache.py,sha256=0xoVw0R9nj3vtzPl9g-zB5BKTLFJ7FFMq2ABbn1IkCI,12793 +jinja2/compiler.py,sha256=lE5owyPwT1cGGZxWyzQtZLW7Uj1g3Vw1oVtBU8Uc_uM,62929 +jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626 +jinja2/debug.py,sha256=UqEbKb4zofBABwvyA77Kr0-5IAQawKqC9t8ZeTIzpGU,12038 +jinja2/defaults.py,sha256=GvVEQqIRvRMCbQF2NZSr0mlEN8lxvGixU5wIIAeRe1A,1323 +jinja2/environment.py,sha256=z91L_efdYs-KNs6DBxQWDyYncOwOqn_0J4M5CfFj0Q8,50848 +jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428 +jinja2/ext.py,sha256=9xq8fd_QPBIe4Z7hE1XawB7f1EDHrVZjpb2JiRTiG94,23867 +jinja2/filters.py,sha256=1OYGhyN84yVmFUIOwJNRV_StqTCfPhnRLfJTmWbEe_8,33424 +jinja2/idtracking.py,sha256=HHcCOMsQhCrrjwYAmikKqq_XetXLovCjXAThh9WbRAc,8760 +jinja2/lexer.py,sha256=W4A830e-fj12zRT6rL7H91F4D6xwED5LjR8iMxjWuVQ,28238 +jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382 +jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340 +jinja2/nodes.py,sha256=4_Ucxbkohtj4BAlpV0w_MpVmIxJNaVXDTBb4EHBA2JI,29392 +jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722 +jinja2/parser.py,sha256=3tc82qO1Ovs9och_PjirbAmnWNT77n4wWjIQ8pEVKvU,35465 +jinja2/runtime.py,sha256=axkTQXg2-oc_Cm35NEMDDas3Jbq3ATxNrDOEa5v3wIw,26835 +jinja2/sandbox.py,sha256=Jx4MTxly8KvdkSWyui_kHY1_ZZ0RAQL4ojAy1KDRyK0,16707 +jinja2/tests.py,sha256=iFuUTbUYv7TFffq2aTswCRdIhQ6wyrby1YevChVPqkE,4428 +jinja2/utils.py,sha256=BIFqeXXsCUSjWx6MEwYhY6V4tXzVNs9WRXfB60MA9HY,19941 +jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316 +Jinja2-2.9.6.dist-info/DESCRIPTION.rst,sha256=CXIS1UnPSk5_lZBS6Lb8ko-3lqGfjsiUwNBLXCTj2lc,975 +Jinja2-2.9.6.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 +Jinja2-2.9.6.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554 +Jinja2-2.9.6.dist-info/METADATA,sha256=53LSXlqC86JTyLSPsDyAOmyV4pXIzzmmZoUXz7ogytA,2172 +Jinja2-2.9.6.dist-info/metadata.json,sha256=vzvX25T4hwMOe1EIOBo9rpfiZerOB_KVLcplGG_qYtE,1394 +Jinja2-2.9.6.dist-info/RECORD,, +Jinja2-2.9.6.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +Jinja2-2.9.6.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +jinja2/__pycache__/exceptions.cpython-34.pyc,, +jinja2/__pycache__/filters.cpython-34.pyc,, +jinja2/__pycache__/runtime.cpython-34.pyc,, +jinja2/__pycache__/meta.cpython-34.pyc,, +jinja2/__pycache__/loaders.cpython-34.pyc,, +jinja2/__pycache__/_compat.cpython-34.pyc,, +jinja2/__pycache__/ext.cpython-34.pyc,, +jinja2/__pycache__/__init__.cpython-34.pyc,, +jinja2/__pycache__/nodes.cpython-34.pyc,, +jinja2/__pycache__/environment.cpython-34.pyc,, +jinja2/__pycache__/parser.cpython-34.pyc,, +jinja2/__pycache__/defaults.cpython-34.pyc,, +jinja2/__pycache__/visitor.cpython-34.pyc,, +jinja2/__pycache__/utils.cpython-34.pyc,, +jinja2/__pycache__/idtracking.cpython-34.pyc,, +jinja2/__pycache__/sandbox.cpython-34.pyc,, +jinja2/__pycache__/debug.cpython-34.pyc,, +jinja2/__pycache__/_stringdefs.cpython-34.pyc,, +jinja2/__pycache__/tests.cpython-34.pyc,, +jinja2/__pycache__/bccache.cpython-34.pyc,, +jinja2/__pycache__/compiler.cpython-34.pyc,, +jinja2/__pycache__/optimizer.cpython-34.pyc,, +jinja2/__pycache__/lexer.cpython-34.pyc,, +jinja2/__pycache__/constants.cpython-34.pyc,, diff --git a/app/lib/Jinja2-2.9.6.dist-info/WHEEL b/app/lib/Jinja2-2.9.6.dist-info/WHEEL new file mode 100644 index 0000000..9dff69d --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/Jinja2-2.9.6.dist-info/entry_points.txt b/app/lib/Jinja2-2.9.6.dist-info/entry_points.txt new file mode 100644 index 0000000..32e6b75 --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/entry_points.txt @@ -0,0 +1,4 @@ + + [babel.extractors] + jinja2 = jinja2.ext:babel_extract[i18n] + \ No newline at end of file diff --git a/app/lib/Jinja2-2.9.6.dist-info/metadata.json b/app/lib/Jinja2-2.9.6.dist-info/metadata.json new file mode 100644 index 0000000..9bbf942 --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/metadata.json @@ -0,0 +1 @@ +{"license": "BSD", "name": "Jinja2", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "run_requires": [{"requires": ["Babel (>=0.8)"], "extra": "i18n"}, {"requires": ["MarkupSafe (>=0.23)"]}], "version": "2.9.6", "extensions": {"python.details": {"project_urls": {"Home": "http://jinja.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extras": ["i18n"]} \ No newline at end of file diff --git a/app/lib/Jinja2-2.9.6.dist-info/top_level.txt b/app/lib/Jinja2-2.9.6.dist-info/top_level.txt new file mode 100644 index 0000000..7f7afbf --- /dev/null +++ b/app/lib/Jinja2-2.9.6.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/PKG-INFO b/app/lib/MarkupSafe-1.0-py3.4.egg-info/PKG-INFO new file mode 100644 index 0000000..6f2568f --- /dev/null +++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/PKG-INFO @@ -0,0 +1,133 @@ +Metadata-Version: 1.1 +Name: MarkupSafe +Version: 1.0 +Summary: Implements a XML/HTML/XHTML Markup safe string for Python +Home-page: http://github.com/pallets/markupsafe +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Description: MarkupSafe + ========== + + Implements a unicode subclass that supports HTML strings: + + .. code-block:: python + + >>> from markupsafe import Markup, escape + >>> escape("") + Markup(u'<script>alert(document.cookie);</script>') + >>> tmpl = Markup("%s") + >>> tmpl % "Peter > Lustig" + Markup(u'Peter > Lustig') + + If you want to make an object unicode that is not yet unicode + but don't want to lose the taint information, you can use the + ``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which + is a different name for the same function). + + .. code-block:: python + + >>> from markupsafe import soft_unicode + >>> soft_unicode(42) + u'42' + >>> soft_unicode(Markup('foo')) + Markup(u'foo') + + HTML Representations + -------------------- + + Objects can customize their HTML markup equivalent by overriding + the ``__html__`` function: + + .. code-block:: python + + >>> class Foo(object): + ... def __html__(self): + ... return 'Nice' + ... + >>> escape(Foo()) + Markup(u'Nice') + >>> Markup(Foo()) + Markup(u'Nice') + + Silent Escapes + -------------- + + Since MarkupSafe 0.10 there is now also a separate escape function + called ``escape_silent`` that returns an empty string for ``None`` for + consistency with other systems that return empty strings for ``None`` + when escaping (for instance Pylons' webhelpers). + + If you also want to use this for the escape method of the Markup + object, you can create your own subclass that does that: + + .. code-block:: python + + from markupsafe import Markup, escape_silent as escape + + class SilentMarkup(Markup): + __slots__ = () + + @classmethod + def escape(cls, s): + return cls(escape(s)) + + New-Style String Formatting + --------------------------- + + Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and + 3.x are now fully supported. Previously the escape behavior of those + functions was spotty at best. The new implementations operates under the + following algorithm: + + 1. if an object has an ``__html_format__`` method it is called as + replacement for ``__format__`` with the format specifier. It either + has to return a string or markup object. + 2. if an object has an ``__html__`` method it is called. + 3. otherwise the default format system of Python kicks in and the result + is HTML escaped. + + Here is how you can implement your own formatting: + + .. code-block:: python + + class User(object): + + def __init__(self, id, username): + self.id = id + self.username = username + + def __html_format__(self, format_spec): + if format_spec == 'link': + return Markup('{1}').format( + self.id, + self.__html__(), + ) + elif format_spec: + raise ValueError('Invalid format spec') + return self.__html__() + + def __html__(self): + return Markup('{0}').format(self.username) + + And to format that user: + + .. code-block:: python + + >>> user = User(1, 'foo') + >>> Markup('

User: {0:link}').format(user) + Markup(u'

User: foo') + + Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/SOURCES.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/SOURCES.txt new file mode 100644 index 0000000..210b339 --- /dev/null +++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/SOURCES.txt @@ -0,0 +1,18 @@ +AUTHORS +CHANGES +LICENSE +MANIFEST.in +README.rst +setup.cfg +setup.py +tests.py +MarkupSafe.egg-info/PKG-INFO +MarkupSafe.egg-info/SOURCES.txt +MarkupSafe.egg-info/dependency_links.txt +MarkupSafe.egg-info/not-zip-safe +MarkupSafe.egg-info/top_level.txt +markupsafe/__init__.py +markupsafe/_compat.py +markupsafe/_constants.py +markupsafe/_native.py +markupsafe/_speedups.c \ No newline at end of file diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/dependency_links.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/installed-files.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/installed-files.txt new file mode 100644 index 0000000..f430f15 --- /dev/null +++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/installed-files.txt @@ -0,0 +1,16 @@ +../markupsafe/__init__.py +../markupsafe/_compat.py +../markupsafe/_constants.py +../markupsafe/_native.py +../markupsafe/_speedups.c +../markupsafe/__pycache__/__init__.cpython-34.pyc +../markupsafe/__pycache__/_compat.cpython-34.pyc +../markupsafe/__pycache__/_constants.cpython-34.pyc +../markupsafe/__pycache__/_native.cpython-34.pyc +../markupsafe/_speedups.so +./ +dependency_links.txt +not-zip-safe +PKG-INFO +SOURCES.txt +top_level.txt diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/not-zip-safe b/app/lib/MarkupSafe-1.0-py3.4.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/top_level.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/top_level.txt new file mode 100644 index 0000000..75bf729 --- /dev/null +++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/PKG-INFO b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/PKG-INFO new file mode 100644 index 0000000..5475a48 --- /dev/null +++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/PKG-INFO @@ -0,0 +1,155 @@ +Metadata-Version: 1.1 +Name: SQLAlchemy +Version: 1.1.9 +Summary: Database Abstraction Library +Home-page: http://www.sqlalchemy.org +Author: Mike Bayer +Author-email: mike_mp@zzzcomputing.com +License: MIT License +Description: SQLAlchemy + ========== + + The Python SQL Toolkit and Object Relational Mapper + + Introduction + ------------- + + SQLAlchemy is the Python SQL toolkit and Object Relational Mapper + that gives application developers the full power and + flexibility of SQL. SQLAlchemy provides a full suite + of well known enterprise-level persistence patterns, + designed for efficient and high-performing database + access, adapted into a simple and Pythonic domain + language. + + Major SQLAlchemy features include: + + * An industrial strength ORM, built + from the core on the identity map, unit of work, + and data mapper patterns. These patterns + allow transparent persistence of objects + using a declarative configuration system. + Domain models + can be constructed and manipulated naturally, + and changes are synchronized with the + current transaction automatically. + * A relationally-oriented query system, exposing + the full range of SQL's capabilities + explicitly, including joins, subqueries, + correlation, and most everything else, + in terms of the object model. + Writing queries with the ORM uses the same + techniques of relational composition you use + when writing SQL. While you can drop into + literal SQL at any time, it's virtually never + needed. + * A comprehensive and flexible system + of eager loading for related collections and objects. + Collections are cached within a session, + and can be loaded on individual access, all + at once using joins, or by query per collection + across the full result set. + * A Core SQL construction system and DBAPI + interaction layer. The SQLAlchemy Core is + separate from the ORM and is a full database + abstraction layer in its own right, and includes + an extensible Python-based SQL expression + language, schema metadata, connection pooling, + type coercion, and custom types. + * All primary and foreign key constraints are + assumed to be composite and natural. Surrogate + integer primary keys are of course still the + norm, but SQLAlchemy never assumes or hardcodes + to this model. + * Database introspection and generation. Database + schemas can be "reflected" in one step into + Python structures representing database metadata; + those same structures can then generate + CREATE statements right back out - all within + the Core, independent of the ORM. + + SQLAlchemy's philosophy: + + * SQL databases behave less and less like object + collections the more size and performance start to + matter; object collections behave less and less like + tables and rows the more abstraction starts to matter. + SQLAlchemy aims to accommodate both of these + principles. + * An ORM doesn't need to hide the "R". A relational + database provides rich, set-based functionality + that should be fully exposed. SQLAlchemy's + ORM provides an open-ended set of patterns + that allow a developer to construct a custom + mediation layer between a domain model and + a relational schema, turning the so-called + "object relational impedance" issue into + a distant memory. + * The developer, in all cases, makes all decisions + regarding the design, structure, and naming conventions + of both the object model as well as the relational + schema. SQLAlchemy only provides the means + to automate the execution of these decisions. + * With SQLAlchemy, there's no such thing as + "the ORM generated a bad query" - you + retain full control over the structure of + queries, including how joins are organized, + how subqueries and correlation is used, what + columns are requested. Everything SQLAlchemy + does is ultimately the result of a developer- + initiated decision. + * Don't use an ORM if the problem doesn't need one. + SQLAlchemy consists of a Core and separate ORM + component. The Core offers a full SQL expression + language that allows Pythonic construction + of SQL constructs that render directly to SQL + strings for a target database, returning + result sets that are essentially enhanced DBAPI + cursors. + * Transactions should be the norm. With SQLAlchemy's + ORM, nothing goes to permanent storage until + commit() is called. SQLAlchemy encourages applications + to create a consistent means of delineating + the start and end of a series of operations. + * Never render a literal value in a SQL statement. + Bound parameters are used to the greatest degree + possible, allowing query optimizers to cache + query plans effectively and making SQL injection + attacks a non-issue. + + Documentation + ------------- + + Latest documentation is at: + + http://www.sqlalchemy.org/docs/ + + Installation / Requirements + --------------------------- + + Full documentation for installation is at + `Installation `_. + + Getting Help / Development / Bug reporting + ------------------------------------------ + + Please refer to the `SQLAlchemy Community Guide `_. + + License + ------- + + SQLAlchemy is distributed under the `MIT license + `_. + + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Database :: Front-Ends +Classifier: Operating System :: OS Independent diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/SOURCES.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/SOURCES.txt new file mode 100644 index 0000000..2de1f71 --- /dev/null +++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/SOURCES.txt @@ -0,0 +1,806 @@ +AUTHORS +CHANGES +LICENSE +MANIFEST.in +README.dialects.rst +README.rst +README.unittests.rst +setup.cfg +setup.py +sqla_nose.py +tox.ini +doc/contents.html +doc/copyright.html +doc/genindex.html +doc/glossary.html +doc/index.html +doc/intro.html +doc/search.html +doc/searchindex.js +doc/_images/sqla_arch_small.png +doc/_images/sqla_engine_arch.png +doc/_modules/index.html +doc/_modules/examples/adjacency_list/adjacency_list.html +doc/_modules/examples/association/basic_association.html +doc/_modules/examples/association/dict_of_sets_with_default.html +doc/_modules/examples/association/proxied_association.html +doc/_modules/examples/custom_attributes/active_column_defaults.html +doc/_modules/examples/custom_attributes/custom_management.html +doc/_modules/examples/custom_attributes/listen_for_events.html +doc/_modules/examples/dogpile_caching/advanced.html +doc/_modules/examples/dogpile_caching/caching_query.html +doc/_modules/examples/dogpile_caching/environment.html +doc/_modules/examples/dogpile_caching/fixture_data.html +doc/_modules/examples/dogpile_caching/helloworld.html +doc/_modules/examples/dogpile_caching/local_session_caching.html +doc/_modules/examples/dogpile_caching/model.html +doc/_modules/examples/dogpile_caching/relationship_caching.html +doc/_modules/examples/dynamic_dict/dynamic_dict.html +doc/_modules/examples/elementtree/adjacency_list.html +doc/_modules/examples/elementtree/optimized_al.html +doc/_modules/examples/elementtree/pickle.html +doc/_modules/examples/generic_associations/discriminator_on_association.html +doc/_modules/examples/generic_associations/generic_fk.html +doc/_modules/examples/generic_associations/table_per_association.html +doc/_modules/examples/generic_associations/table_per_related.html +doc/_modules/examples/graphs/directed_graph.html +doc/_modules/examples/inheritance/concrete.html +doc/_modules/examples/inheritance/joined.html +doc/_modules/examples/inheritance/single.html +doc/_modules/examples/join_conditions/cast.html +doc/_modules/examples/join_conditions/threeway.html +doc/_modules/examples/large_collection/large_collection.html +doc/_modules/examples/materialized_paths/materialized_paths.html +doc/_modules/examples/nested_sets/nested_sets.html +doc/_modules/examples/performance/__main__.html +doc/_modules/examples/performance/bulk_inserts.html +doc/_modules/examples/performance/bulk_updates.html +doc/_modules/examples/performance/large_resultsets.html +doc/_modules/examples/performance/short_selects.html +doc/_modules/examples/performance/single_inserts.html +doc/_modules/examples/postgis/postgis.html +doc/_modules/examples/sharding/attribute_shard.html +doc/_modules/examples/versioned_history/history_meta.html +doc/_modules/examples/versioned_history/test_versioning.html +doc/_modules/examples/versioned_rows/versioned_map.html +doc/_modules/examples/versioned_rows/versioned_rows.html +doc/_modules/examples/vertical/dictlike-polymorphic.html +doc/_modules/examples/vertical/dictlike.html +doc/_static/basic.css +doc/_static/changelog.css +doc/_static/comment-bright.png +doc/_static/comment-close.png +doc/_static/comment.png +doc/_static/detectmobile.js +doc/_static/docs.css +doc/_static/doctools.js +doc/_static/down-pressed.png +doc/_static/down.png +doc/_static/file.png +doc/_static/init.js +doc/_static/jquery-3.1.0.js +doc/_static/jquery.js +doc/_static/minus.png +doc/_static/plus.png +doc/_static/pygments.css +doc/_static/searchtools.js +doc/_static/sphinx_paramlinks.css +doc/_static/underscore-1.3.1.js +doc/_static/underscore.js +doc/_static/up-pressed.png +doc/_static/up.png +doc/_static/websupport.js +doc/build/Makefile +doc/build/conf.py +doc/build/contents.rst +doc/build/copyright.rst +doc/build/corrections.py +doc/build/glossary.rst +doc/build/index.rst +doc/build/intro.rst +doc/build/requirements.txt +doc/build/sqla_arch_small.png +doc/build/changelog/changelog_01.rst +doc/build/changelog/changelog_02.rst +doc/build/changelog/changelog_03.rst +doc/build/changelog/changelog_04.rst +doc/build/changelog/changelog_05.rst +doc/build/changelog/changelog_06.rst +doc/build/changelog/changelog_07.rst +doc/build/changelog/changelog_08.rst +doc/build/changelog/changelog_09.rst +doc/build/changelog/changelog_10.rst +doc/build/changelog/changelog_11.rst +doc/build/changelog/index.rst +doc/build/changelog/migration_04.rst +doc/build/changelog/migration_05.rst +doc/build/changelog/migration_06.rst +doc/build/changelog/migration_07.rst +doc/build/changelog/migration_08.rst +doc/build/changelog/migration_09.rst +doc/build/changelog/migration_10.rst +doc/build/changelog/migration_11.rst +doc/build/core/api_basics.rst +doc/build/core/compiler.rst +doc/build/core/connections.rst +doc/build/core/constraints.rst +doc/build/core/custom_types.rst +doc/build/core/ddl.rst +doc/build/core/defaults.rst +doc/build/core/dml.rst +doc/build/core/engines.rst +doc/build/core/engines_connections.rst +doc/build/core/event.rst +doc/build/core/events.rst +doc/build/core/exceptions.rst +doc/build/core/expression_api.rst +doc/build/core/functions.rst +doc/build/core/index.rst +doc/build/core/inspection.rst +doc/build/core/interfaces.rst +doc/build/core/internals.rst +doc/build/core/metadata.rst +doc/build/core/pooling.rst +doc/build/core/reflection.rst +doc/build/core/schema.rst +doc/build/core/selectable.rst +doc/build/core/serializer.rst +doc/build/core/sqla_engine_arch.png +doc/build/core/sqlelement.rst +doc/build/core/tutorial.rst +doc/build/core/type_api.rst +doc/build/core/type_basics.rst +doc/build/core/types.rst +doc/build/dialects/firebird.rst +doc/build/dialects/index.rst +doc/build/dialects/mssql.rst +doc/build/dialects/mysql.rst +doc/build/dialects/oracle.rst +doc/build/dialects/postgresql.rst +doc/build/dialects/sqlite.rst +doc/build/dialects/sybase.rst +doc/build/faq/connections.rst +doc/build/faq/index.rst +doc/build/faq/metadata_schema.rst +doc/build/faq/ormconfiguration.rst +doc/build/faq/performance.rst +doc/build/faq/sessions.rst +doc/build/faq/sqlexpressions.rst +doc/build/orm/backref.rst +doc/build/orm/basic_relationships.rst +doc/build/orm/cascades.rst +doc/build/orm/classical.rst +doc/build/orm/collections.rst +doc/build/orm/composites.rst +doc/build/orm/constructors.rst +doc/build/orm/contextual.rst +doc/build/orm/deprecated.rst +doc/build/orm/events.rst +doc/build/orm/examples.rst +doc/build/orm/exceptions.rst +doc/build/orm/extending.rst +doc/build/orm/index.rst +doc/build/orm/inheritance.rst +doc/build/orm/internals.rst +doc/build/orm/join_conditions.rst +doc/build/orm/loading.rst +doc/build/orm/loading_columns.rst +doc/build/orm/loading_objects.rst +doc/build/orm/loading_relationships.rst +doc/build/orm/mapped_attributes.rst +doc/build/orm/mapped_sql_expr.rst +doc/build/orm/mapper_config.rst +doc/build/orm/mapping_api.rst +doc/build/orm/mapping_columns.rst +doc/build/orm/mapping_styles.rst +doc/build/orm/nonstandard_mappings.rst +doc/build/orm/persistence_techniques.rst +doc/build/orm/query.rst +doc/build/orm/relationship_api.rst +doc/build/orm/relationship_persistence.rst +doc/build/orm/relationships.rst +doc/build/orm/scalar_mapping.rst +doc/build/orm/self_referential.rst +doc/build/orm/session.rst +doc/build/orm/session_api.rst +doc/build/orm/session_basics.rst +doc/build/orm/session_events.rst +doc/build/orm/session_state_management.rst +doc/build/orm/session_transaction.rst +doc/build/orm/tutorial.rst +doc/build/orm/versioning.rst +doc/build/orm/extensions/associationproxy.rst +doc/build/orm/extensions/automap.rst +doc/build/orm/extensions/baked.rst +doc/build/orm/extensions/horizontal_shard.rst +doc/build/orm/extensions/hybrid.rst +doc/build/orm/extensions/index.rst +doc/build/orm/extensions/indexable.rst +doc/build/orm/extensions/instrumentation.rst +doc/build/orm/extensions/mutable.rst +doc/build/orm/extensions/orderinglist.rst +doc/build/orm/extensions/declarative/api.rst +doc/build/orm/extensions/declarative/basic_use.rst +doc/build/orm/extensions/declarative/index.rst +doc/build/orm/extensions/declarative/inheritance.rst +doc/build/orm/extensions/declarative/mixins.rst +doc/build/orm/extensions/declarative/relationships.rst +doc/build/orm/extensions/declarative/table_config.rst +doc/build/texinputs/Makefile +doc/build/texinputs/sphinx.sty +doc/changelog/changelog_01.html +doc/changelog/changelog_02.html +doc/changelog/changelog_03.html +doc/changelog/changelog_04.html +doc/changelog/changelog_05.html +doc/changelog/changelog_06.html +doc/changelog/changelog_07.html +doc/changelog/changelog_08.html +doc/changelog/changelog_09.html +doc/changelog/changelog_10.html +doc/changelog/changelog_11.html +doc/changelog/index.html +doc/changelog/migration_04.html +doc/changelog/migration_05.html +doc/changelog/migration_06.html +doc/changelog/migration_07.html +doc/changelog/migration_08.html +doc/changelog/migration_09.html +doc/changelog/migration_10.html +doc/changelog/migration_11.html +doc/core/api_basics.html +doc/core/compiler.html +doc/core/connections.html +doc/core/constraints.html +doc/core/custom_types.html +doc/core/ddl.html +doc/core/defaults.html +doc/core/dml.html +doc/core/engines.html +doc/core/engines_connections.html +doc/core/event.html +doc/core/events.html +doc/core/exceptions.html +doc/core/expression_api.html +doc/core/functions.html +doc/core/index.html +doc/core/inspection.html +doc/core/interfaces.html +doc/core/internals.html +doc/core/metadata.html +doc/core/pooling.html +doc/core/reflection.html +doc/core/schema.html +doc/core/selectable.html +doc/core/serializer.html +doc/core/sqlelement.html +doc/core/tutorial.html +doc/core/type_api.html +doc/core/type_basics.html +doc/core/types.html +doc/dialects/firebird.html +doc/dialects/index.html +doc/dialects/mssql.html +doc/dialects/mysql.html +doc/dialects/oracle.html +doc/dialects/postgresql.html +doc/dialects/sqlite.html +doc/dialects/sybase.html +doc/faq/connections.html +doc/faq/index.html +doc/faq/metadata_schema.html +doc/faq/ormconfiguration.html +doc/faq/performance.html +doc/faq/sessions.html +doc/faq/sqlexpressions.html +doc/orm/backref.html +doc/orm/basic_relationships.html +doc/orm/cascades.html +doc/orm/classical.html +doc/orm/collections.html +doc/orm/composites.html +doc/orm/constructors.html +doc/orm/contextual.html +doc/orm/deprecated.html +doc/orm/events.html +doc/orm/examples.html +doc/orm/exceptions.html +doc/orm/extending.html +doc/orm/index.html +doc/orm/inheritance.html +doc/orm/internals.html +doc/orm/join_conditions.html +doc/orm/loading.html +doc/orm/loading_columns.html +doc/orm/loading_objects.html +doc/orm/loading_relationships.html +doc/orm/mapped_attributes.html +doc/orm/mapped_sql_expr.html +doc/orm/mapper_config.html +doc/orm/mapping_api.html +doc/orm/mapping_columns.html +doc/orm/mapping_styles.html +doc/orm/nonstandard_mappings.html +doc/orm/persistence_techniques.html +doc/orm/query.html +doc/orm/relationship_api.html +doc/orm/relationship_persistence.html +doc/orm/relationships.html +doc/orm/scalar_mapping.html +doc/orm/self_referential.html +doc/orm/session.html +doc/orm/session_api.html +doc/orm/session_basics.html +doc/orm/session_events.html +doc/orm/session_state_management.html +doc/orm/session_transaction.html +doc/orm/tutorial.html +doc/orm/versioning.html +doc/orm/extensions/associationproxy.html +doc/orm/extensions/automap.html +doc/orm/extensions/baked.html +doc/orm/extensions/horizontal_shard.html +doc/orm/extensions/hybrid.html +doc/orm/extensions/index.html +doc/orm/extensions/indexable.html +doc/orm/extensions/instrumentation.html +doc/orm/extensions/mutable.html +doc/orm/extensions/orderinglist.html +doc/orm/extensions/declarative/api.html +doc/orm/extensions/declarative/basic_use.html +doc/orm/extensions/declarative/index.html +doc/orm/extensions/declarative/inheritance.html +doc/orm/extensions/declarative/mixins.html +doc/orm/extensions/declarative/relationships.html +doc/orm/extensions/declarative/table_config.html +examples/__init__.py +examples/adjacency_list/__init__.py +examples/adjacency_list/adjacency_list.py +examples/association/__init__.py +examples/association/basic_association.py +examples/association/dict_of_sets_with_default.py +examples/association/proxied_association.py +examples/custom_attributes/__init__.py +examples/custom_attributes/active_column_defaults.py +examples/custom_attributes/custom_management.py +examples/custom_attributes/listen_for_events.py +examples/dogpile_caching/__init__.py +examples/dogpile_caching/advanced.py +examples/dogpile_caching/caching_query.py +examples/dogpile_caching/environment.py +examples/dogpile_caching/fixture_data.py +examples/dogpile_caching/helloworld.py +examples/dogpile_caching/local_session_caching.py +examples/dogpile_caching/model.py +examples/dogpile_caching/relationship_caching.py +examples/dynamic_dict/__init__.py +examples/dynamic_dict/dynamic_dict.py +examples/elementtree/__init__.py +examples/elementtree/adjacency_list.py +examples/elementtree/optimized_al.py +examples/elementtree/pickle.py +examples/elementtree/test.xml +examples/elementtree/test2.xml +examples/elementtree/test3.xml +examples/generic_associations/__init__.py +examples/generic_associations/discriminator_on_association.py +examples/generic_associations/generic_fk.py +examples/generic_associations/table_per_association.py +examples/generic_associations/table_per_related.py +examples/graphs/__init__.py +examples/graphs/directed_graph.py +examples/inheritance/__init__.py +examples/inheritance/concrete.py +examples/inheritance/joined.py +examples/inheritance/single.py +examples/join_conditions/__init__.py +examples/join_conditions/cast.py +examples/join_conditions/threeway.py +examples/large_collection/__init__.py +examples/large_collection/large_collection.py +examples/materialized_paths/__init__.py +examples/materialized_paths/materialized_paths.py +examples/nested_sets/__init__.py +examples/nested_sets/nested_sets.py +examples/performance/__init__.py +examples/performance/__main__.py +examples/performance/bulk_inserts.py +examples/performance/bulk_updates.py +examples/performance/large_resultsets.py +examples/performance/short_selects.py +examples/performance/single_inserts.py +examples/postgis/__init__.py +examples/postgis/postgis.py +examples/sharding/__init__.py +examples/sharding/attribute_shard.py +examples/versioned_history/__init__.py +examples/versioned_history/history_meta.py +examples/versioned_history/test_versioning.py +examples/versioned_rows/__init__.py +examples/versioned_rows/versioned_map.py +examples/versioned_rows/versioned_rows.py +examples/vertical/__init__.py +examples/vertical/dictlike-polymorphic.py +examples/vertical/dictlike.py +lib/SQLAlchemy.egg-info/PKG-INFO +lib/SQLAlchemy.egg-info/SOURCES.txt +lib/SQLAlchemy.egg-info/dependency_links.txt +lib/SQLAlchemy.egg-info/requires.txt +lib/SQLAlchemy.egg-info/top_level.txt +lib/sqlalchemy/__init__.py +lib/sqlalchemy/events.py +lib/sqlalchemy/exc.py +lib/sqlalchemy/inspection.py +lib/sqlalchemy/interfaces.py +lib/sqlalchemy/log.py +lib/sqlalchemy/pool.py +lib/sqlalchemy/processors.py +lib/sqlalchemy/schema.py +lib/sqlalchemy/types.py +lib/sqlalchemy/cextension/processors.c +lib/sqlalchemy/cextension/resultproxy.c +lib/sqlalchemy/cextension/utils.c +lib/sqlalchemy/connectors/__init__.py +lib/sqlalchemy/connectors/mxodbc.py +lib/sqlalchemy/connectors/pyodbc.py +lib/sqlalchemy/connectors/zxJDBC.py +lib/sqlalchemy/databases/__init__.py +lib/sqlalchemy/dialects/__init__.py +lib/sqlalchemy/dialects/type_migration_guidelines.txt +lib/sqlalchemy/dialects/firebird/__init__.py +lib/sqlalchemy/dialects/firebird/base.py +lib/sqlalchemy/dialects/firebird/fdb.py +lib/sqlalchemy/dialects/firebird/kinterbasdb.py +lib/sqlalchemy/dialects/mssql/__init__.py +lib/sqlalchemy/dialects/mssql/adodbapi.py +lib/sqlalchemy/dialects/mssql/base.py +lib/sqlalchemy/dialects/mssql/information_schema.py +lib/sqlalchemy/dialects/mssql/mxodbc.py +lib/sqlalchemy/dialects/mssql/pymssql.py +lib/sqlalchemy/dialects/mssql/pyodbc.py +lib/sqlalchemy/dialects/mssql/zxjdbc.py +lib/sqlalchemy/dialects/mysql/__init__.py +lib/sqlalchemy/dialects/mysql/base.py +lib/sqlalchemy/dialects/mysql/cymysql.py +lib/sqlalchemy/dialects/mysql/enumerated.py +lib/sqlalchemy/dialects/mysql/gaerdbms.py +lib/sqlalchemy/dialects/mysql/json.py +lib/sqlalchemy/dialects/mysql/mysqlconnector.py +lib/sqlalchemy/dialects/mysql/mysqldb.py +lib/sqlalchemy/dialects/mysql/oursql.py +lib/sqlalchemy/dialects/mysql/pymysql.py +lib/sqlalchemy/dialects/mysql/pyodbc.py +lib/sqlalchemy/dialects/mysql/reflection.py +lib/sqlalchemy/dialects/mysql/types.py +lib/sqlalchemy/dialects/mysql/zxjdbc.py +lib/sqlalchemy/dialects/oracle/__init__.py +lib/sqlalchemy/dialects/oracle/base.py +lib/sqlalchemy/dialects/oracle/cx_oracle.py +lib/sqlalchemy/dialects/oracle/zxjdbc.py +lib/sqlalchemy/dialects/postgresql/__init__.py +lib/sqlalchemy/dialects/postgresql/array.py +lib/sqlalchemy/dialects/postgresql/base.py +lib/sqlalchemy/dialects/postgresql/dml.py +lib/sqlalchemy/dialects/postgresql/ext.py +lib/sqlalchemy/dialects/postgresql/hstore.py +lib/sqlalchemy/dialects/postgresql/json.py +lib/sqlalchemy/dialects/postgresql/pg8000.py +lib/sqlalchemy/dialects/postgresql/psycopg2.py +lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py +lib/sqlalchemy/dialects/postgresql/pygresql.py +lib/sqlalchemy/dialects/postgresql/pypostgresql.py +lib/sqlalchemy/dialects/postgresql/ranges.py +lib/sqlalchemy/dialects/postgresql/zxjdbc.py +lib/sqlalchemy/dialects/sqlite/__init__.py +lib/sqlalchemy/dialects/sqlite/base.py +lib/sqlalchemy/dialects/sqlite/pysqlcipher.py +lib/sqlalchemy/dialects/sqlite/pysqlite.py +lib/sqlalchemy/dialects/sybase/__init__.py +lib/sqlalchemy/dialects/sybase/base.py +lib/sqlalchemy/dialects/sybase/mxodbc.py +lib/sqlalchemy/dialects/sybase/pyodbc.py +lib/sqlalchemy/dialects/sybase/pysybase.py +lib/sqlalchemy/engine/__init__.py +lib/sqlalchemy/engine/base.py +lib/sqlalchemy/engine/default.py +lib/sqlalchemy/engine/interfaces.py +lib/sqlalchemy/engine/reflection.py +lib/sqlalchemy/engine/result.py +lib/sqlalchemy/engine/strategies.py +lib/sqlalchemy/engine/threadlocal.py +lib/sqlalchemy/engine/url.py +lib/sqlalchemy/engine/util.py +lib/sqlalchemy/event/__init__.py +lib/sqlalchemy/event/api.py +lib/sqlalchemy/event/attr.py +lib/sqlalchemy/event/base.py +lib/sqlalchemy/event/legacy.py +lib/sqlalchemy/event/registry.py +lib/sqlalchemy/ext/__init__.py +lib/sqlalchemy/ext/associationproxy.py +lib/sqlalchemy/ext/automap.py +lib/sqlalchemy/ext/baked.py +lib/sqlalchemy/ext/compiler.py +lib/sqlalchemy/ext/horizontal_shard.py +lib/sqlalchemy/ext/hybrid.py +lib/sqlalchemy/ext/indexable.py +lib/sqlalchemy/ext/instrumentation.py +lib/sqlalchemy/ext/mutable.py +lib/sqlalchemy/ext/orderinglist.py +lib/sqlalchemy/ext/serializer.py +lib/sqlalchemy/ext/declarative/__init__.py +lib/sqlalchemy/ext/declarative/api.py +lib/sqlalchemy/ext/declarative/base.py +lib/sqlalchemy/ext/declarative/clsregistry.py +lib/sqlalchemy/orm/__init__.py +lib/sqlalchemy/orm/attributes.py +lib/sqlalchemy/orm/base.py +lib/sqlalchemy/orm/collections.py +lib/sqlalchemy/orm/dependency.py +lib/sqlalchemy/orm/deprecated_interfaces.py +lib/sqlalchemy/orm/descriptor_props.py +lib/sqlalchemy/orm/dynamic.py +lib/sqlalchemy/orm/evaluator.py +lib/sqlalchemy/orm/events.py +lib/sqlalchemy/orm/exc.py +lib/sqlalchemy/orm/identity.py +lib/sqlalchemy/orm/instrumentation.py +lib/sqlalchemy/orm/interfaces.py +lib/sqlalchemy/orm/loading.py +lib/sqlalchemy/orm/mapper.py +lib/sqlalchemy/orm/path_registry.py +lib/sqlalchemy/orm/persistence.py +lib/sqlalchemy/orm/properties.py +lib/sqlalchemy/orm/query.py +lib/sqlalchemy/orm/relationships.py +lib/sqlalchemy/orm/scoping.py +lib/sqlalchemy/orm/session.py +lib/sqlalchemy/orm/state.py +lib/sqlalchemy/orm/strategies.py +lib/sqlalchemy/orm/strategy_options.py +lib/sqlalchemy/orm/sync.py +lib/sqlalchemy/orm/unitofwork.py +lib/sqlalchemy/orm/util.py +lib/sqlalchemy/sql/__init__.py +lib/sqlalchemy/sql/annotation.py +lib/sqlalchemy/sql/base.py +lib/sqlalchemy/sql/compiler.py +lib/sqlalchemy/sql/crud.py +lib/sqlalchemy/sql/ddl.py +lib/sqlalchemy/sql/default_comparator.py +lib/sqlalchemy/sql/dml.py +lib/sqlalchemy/sql/elements.py +lib/sqlalchemy/sql/expression.py +lib/sqlalchemy/sql/functions.py +lib/sqlalchemy/sql/naming.py +lib/sqlalchemy/sql/operators.py +lib/sqlalchemy/sql/schema.py +lib/sqlalchemy/sql/selectable.py +lib/sqlalchemy/sql/sqltypes.py +lib/sqlalchemy/sql/type_api.py +lib/sqlalchemy/sql/util.py +lib/sqlalchemy/sql/visitors.py +lib/sqlalchemy/testing/__init__.py +lib/sqlalchemy/testing/assertions.py +lib/sqlalchemy/testing/assertsql.py +lib/sqlalchemy/testing/config.py +lib/sqlalchemy/testing/engines.py +lib/sqlalchemy/testing/entities.py +lib/sqlalchemy/testing/exclusions.py +lib/sqlalchemy/testing/fixtures.py +lib/sqlalchemy/testing/mock.py +lib/sqlalchemy/testing/pickleable.py +lib/sqlalchemy/testing/profiling.py +lib/sqlalchemy/testing/provision.py +lib/sqlalchemy/testing/replay_fixture.py +lib/sqlalchemy/testing/requirements.py +lib/sqlalchemy/testing/runner.py +lib/sqlalchemy/testing/schema.py +lib/sqlalchemy/testing/util.py +lib/sqlalchemy/testing/warnings.py +lib/sqlalchemy/testing/plugin/__init__.py +lib/sqlalchemy/testing/plugin/bootstrap.py +lib/sqlalchemy/testing/plugin/noseplugin.py +lib/sqlalchemy/testing/plugin/plugin_base.py +lib/sqlalchemy/testing/plugin/pytestplugin.py +lib/sqlalchemy/testing/suite/__init__.py +lib/sqlalchemy/testing/suite/test_ddl.py +lib/sqlalchemy/testing/suite/test_dialect.py +lib/sqlalchemy/testing/suite/test_insert.py +lib/sqlalchemy/testing/suite/test_reflection.py +lib/sqlalchemy/testing/suite/test_results.py +lib/sqlalchemy/testing/suite/test_select.py +lib/sqlalchemy/testing/suite/test_sequence.py +lib/sqlalchemy/testing/suite/test_types.py +lib/sqlalchemy/testing/suite/test_update_delete.py +lib/sqlalchemy/util/__init__.py +lib/sqlalchemy/util/_collections.py +lib/sqlalchemy/util/compat.py +lib/sqlalchemy/util/deprecations.py +lib/sqlalchemy/util/langhelpers.py +lib/sqlalchemy/util/queue.py +lib/sqlalchemy/util/topological.py +test/__init__.py +test/binary_data_one.dat +test/binary_data_two.dat +test/conftest.py +test/requirements.py +test/aaa_profiling/__init__.py +test/aaa_profiling/test_compiler.py +test/aaa_profiling/test_memusage.py +test/aaa_profiling/test_orm.py +test/aaa_profiling/test_pool.py +test/aaa_profiling/test_resultset.py +test/aaa_profiling/test_zoomark.py +test/aaa_profiling/test_zoomark_orm.py +test/base/__init__.py +test/base/test_dependency.py +test/base/test_events.py +test/base/test_except.py +test/base/test_inspect.py +test/base/test_tutorials.py +test/base/test_utils.py +test/dialect/__init__.py +test/dialect/test_firebird.py +test/dialect/test_mxodbc.py +test/dialect/test_oracle.py +test/dialect/test_pyodbc.py +test/dialect/test_sqlite.py +test/dialect/test_suite.py +test/dialect/test_sybase.py +test/dialect/mssql/__init__.py +test/dialect/mssql/test_compiler.py +test/dialect/mssql/test_engine.py +test/dialect/mssql/test_query.py +test/dialect/mssql/test_reflection.py +test/dialect/mssql/test_types.py +test/dialect/mysql/__init__.py +test/dialect/mysql/test_compiler.py +test/dialect/mysql/test_dialect.py +test/dialect/mysql/test_query.py +test/dialect/mysql/test_reflection.py +test/dialect/mysql/test_types.py +test/dialect/postgresql/__init__.py +test/dialect/postgresql/test_compiler.py +test/dialect/postgresql/test_dialect.py +test/dialect/postgresql/test_on_conflict.py +test/dialect/postgresql/test_query.py +test/dialect/postgresql/test_reflection.py +test/dialect/postgresql/test_types.py +test/engine/__init__.py +test/engine/test_bind.py +test/engine/test_ddlevents.py +test/engine/test_execute.py +test/engine/test_logging.py +test/engine/test_parseconnect.py +test/engine/test_pool.py +test/engine/test_processors.py +test/engine/test_reconnect.py +test/engine/test_reflection.py +test/engine/test_transaction.py +test/ext/__init__.py +test/ext/test_associationproxy.py +test/ext/test_automap.py +test/ext/test_baked.py +test/ext/test_compiler.py +test/ext/test_extendedattr.py +test/ext/test_horizontal_shard.py +test/ext/test_hybrid.py +test/ext/test_indexable.py +test/ext/test_mutable.py +test/ext/test_orderinglist.py +test/ext/test_serializer.py +test/ext/declarative/__init__.py +test/ext/declarative/test_basic.py +test/ext/declarative/test_clsregistry.py +test/ext/declarative/test_inheritance.py +test/ext/declarative/test_mixin.py +test/ext/declarative/test_reflection.py +test/orm/__init__.py +test/orm/_fixtures.py +test/orm/test_association.py +test/orm/test_assorted_eager.py +test/orm/test_attributes.py +test/orm/test_backref_mutations.py +test/orm/test_bind.py +test/orm/test_bulk.py +test/orm/test_bundle.py +test/orm/test_cascade.py +test/orm/test_collection.py +test/orm/test_compile.py +test/orm/test_composites.py +test/orm/test_cycles.py +test/orm/test_default_strategies.py +test/orm/test_defaults.py +test/orm/test_deferred.py +test/orm/test_deprecations.py +test/orm/test_descriptor.py +test/orm/test_dynamic.py +test/orm/test_eager_relations.py +test/orm/test_evaluator.py +test/orm/test_events.py +test/orm/test_expire.py +test/orm/test_froms.py +test/orm/test_generative.py +test/orm/test_hasparent.py +test/orm/test_immediate_load.py +test/orm/test_inspect.py +test/orm/test_instrumentation.py +test/orm/test_joins.py +test/orm/test_lazy_relations.py +test/orm/test_load_on_fks.py +test/orm/test_loading.py +test/orm/test_lockmode.py +test/orm/test_manytomany.py +test/orm/test_mapper.py +test/orm/test_merge.py +test/orm/test_naturalpks.py +test/orm/test_of_type.py +test/orm/test_onetoone.py +test/orm/test_options.py +test/orm/test_pickled.py +test/orm/test_query.py +test/orm/test_rel_fn.py +test/orm/test_relationships.py +test/orm/test_scoping.py +test/orm/test_selectable.py +test/orm/test_session.py +test/orm/test_subquery_relations.py +test/orm/test_sync.py +test/orm/test_transaction.py +test/orm/test_unitofwork.py +test/orm/test_unitofworkv2.py +test/orm/test_update_delete.py +test/orm/test_utils.py +test/orm/test_validators.py +test/orm/test_versioning.py +test/orm/inheritance/__init__.py +test/orm/inheritance/_poly_fixtures.py +test/orm/inheritance/test_abc_inheritance.py +test/orm/inheritance/test_abc_polymorphic.py +test/orm/inheritance/test_assorted_poly.py +test/orm/inheritance/test_basic.py +test/orm/inheritance/test_concrete.py +test/orm/inheritance/test_magazine.py +test/orm/inheritance/test_manytomany.py +test/orm/inheritance/test_poly_linked_list.py +test/orm/inheritance/test_poly_persistence.py +test/orm/inheritance/test_polymorphic_rel.py +test/orm/inheritance/test_productspec.py +test/orm/inheritance/test_relationship.py +test/orm/inheritance/test_selects.py +test/orm/inheritance/test_single.py +test/orm/inheritance/test_with_poly.py +test/perf/invalidate_stresstest.py +test/perf/orm2010.py +test/sql/__init__.py +test/sql/test_case_statement.py +test/sql/test_compiler.py +test/sql/test_constraints.py +test/sql/test_cte.py +test/sql/test_ddlemit.py +test/sql/test_defaults.py +test/sql/test_delete.py +test/sql/test_functions.py +test/sql/test_generative.py +test/sql/test_insert.py +test/sql/test_insert_exec.py +test/sql/test_inspect.py +test/sql/test_join_rewriting.py +test/sql/test_labels.py +test/sql/test_lateral.py +test/sql/test_metadata.py +test/sql/test_operators.py +test/sql/test_query.py +test/sql/test_quote.py +test/sql/test_resultset.py +test/sql/test_returning.py +test/sql/test_rowcount.py +test/sql/test_selectable.py +test/sql/test_tablesample.py +test/sql/test_text.py +test/sql/test_type_expressions.py +test/sql/test_types.py +test/sql/test_unicode.py +test/sql/test_update.py +test/sql/test_utils.py \ No newline at end of file diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/dependency_links.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/installed-files.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/installed-files.txt new file mode 100644 index 0000000..d2681e6 --- /dev/null +++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/installed-files.txt @@ -0,0 +1,387 @@ +../sqlalchemy/__init__.py +../sqlalchemy/events.py +../sqlalchemy/exc.py +../sqlalchemy/inspection.py +../sqlalchemy/interfaces.py +../sqlalchemy/log.py +../sqlalchemy/pool.py +../sqlalchemy/processors.py +../sqlalchemy/schema.py +../sqlalchemy/types.py +../sqlalchemy/connectors/__init__.py +../sqlalchemy/connectors/mxodbc.py +../sqlalchemy/connectors/pyodbc.py +../sqlalchemy/connectors/zxJDBC.py +../sqlalchemy/databases/__init__.py +../sqlalchemy/dialects/__init__.py +../sqlalchemy/engine/__init__.py +../sqlalchemy/engine/base.py +../sqlalchemy/engine/default.py +../sqlalchemy/engine/interfaces.py +../sqlalchemy/engine/reflection.py +../sqlalchemy/engine/result.py +../sqlalchemy/engine/strategies.py +../sqlalchemy/engine/threadlocal.py +../sqlalchemy/engine/url.py +../sqlalchemy/engine/util.py +../sqlalchemy/event/__init__.py +../sqlalchemy/event/api.py +../sqlalchemy/event/attr.py +../sqlalchemy/event/base.py +../sqlalchemy/event/legacy.py +../sqlalchemy/event/registry.py +../sqlalchemy/ext/__init__.py +../sqlalchemy/ext/associationproxy.py +../sqlalchemy/ext/automap.py +../sqlalchemy/ext/baked.py +../sqlalchemy/ext/compiler.py +../sqlalchemy/ext/horizontal_shard.py +../sqlalchemy/ext/hybrid.py +../sqlalchemy/ext/indexable.py +../sqlalchemy/ext/instrumentation.py +../sqlalchemy/ext/mutable.py +../sqlalchemy/ext/orderinglist.py +../sqlalchemy/ext/serializer.py +../sqlalchemy/orm/__init__.py +../sqlalchemy/orm/attributes.py +../sqlalchemy/orm/base.py +../sqlalchemy/orm/collections.py +../sqlalchemy/orm/dependency.py +../sqlalchemy/orm/deprecated_interfaces.py +../sqlalchemy/orm/descriptor_props.py +../sqlalchemy/orm/dynamic.py +../sqlalchemy/orm/evaluator.py +../sqlalchemy/orm/events.py +../sqlalchemy/orm/exc.py +../sqlalchemy/orm/identity.py +../sqlalchemy/orm/instrumentation.py +../sqlalchemy/orm/interfaces.py +../sqlalchemy/orm/loading.py +../sqlalchemy/orm/mapper.py +../sqlalchemy/orm/path_registry.py +../sqlalchemy/orm/persistence.py +../sqlalchemy/orm/properties.py +../sqlalchemy/orm/query.py +../sqlalchemy/orm/relationships.py +../sqlalchemy/orm/scoping.py +../sqlalchemy/orm/session.py +../sqlalchemy/orm/state.py +../sqlalchemy/orm/strategies.py +../sqlalchemy/orm/strategy_options.py +../sqlalchemy/orm/sync.py +../sqlalchemy/orm/unitofwork.py +../sqlalchemy/orm/util.py +../sqlalchemy/sql/__init__.py +../sqlalchemy/sql/annotation.py +../sqlalchemy/sql/base.py +../sqlalchemy/sql/compiler.py +../sqlalchemy/sql/crud.py +../sqlalchemy/sql/ddl.py +../sqlalchemy/sql/default_comparator.py +../sqlalchemy/sql/dml.py +../sqlalchemy/sql/elements.py +../sqlalchemy/sql/expression.py +../sqlalchemy/sql/functions.py +../sqlalchemy/sql/naming.py +../sqlalchemy/sql/operators.py +../sqlalchemy/sql/schema.py +../sqlalchemy/sql/selectable.py +../sqlalchemy/sql/sqltypes.py +../sqlalchemy/sql/type_api.py +../sqlalchemy/sql/util.py +../sqlalchemy/sql/visitors.py +../sqlalchemy/testing/__init__.py +../sqlalchemy/testing/assertions.py +../sqlalchemy/testing/assertsql.py +../sqlalchemy/testing/config.py +../sqlalchemy/testing/engines.py +../sqlalchemy/testing/entities.py +../sqlalchemy/testing/exclusions.py +../sqlalchemy/testing/fixtures.py +../sqlalchemy/testing/mock.py +../sqlalchemy/testing/pickleable.py +../sqlalchemy/testing/profiling.py +../sqlalchemy/testing/provision.py +../sqlalchemy/testing/replay_fixture.py +../sqlalchemy/testing/requirements.py +../sqlalchemy/testing/runner.py +../sqlalchemy/testing/schema.py +../sqlalchemy/testing/util.py +../sqlalchemy/testing/warnings.py +../sqlalchemy/util/__init__.py +../sqlalchemy/util/_collections.py +../sqlalchemy/util/compat.py +../sqlalchemy/util/deprecations.py +../sqlalchemy/util/langhelpers.py +../sqlalchemy/util/queue.py +../sqlalchemy/util/topological.py +../sqlalchemy/dialects/firebird/__init__.py +../sqlalchemy/dialects/firebird/base.py +../sqlalchemy/dialects/firebird/fdb.py +../sqlalchemy/dialects/firebird/kinterbasdb.py +../sqlalchemy/dialects/mssql/__init__.py +../sqlalchemy/dialects/mssql/adodbapi.py +../sqlalchemy/dialects/mssql/base.py +../sqlalchemy/dialects/mssql/information_schema.py +../sqlalchemy/dialects/mssql/mxodbc.py +../sqlalchemy/dialects/mssql/pymssql.py +../sqlalchemy/dialects/mssql/pyodbc.py +../sqlalchemy/dialects/mssql/zxjdbc.py +../sqlalchemy/dialects/mysql/__init__.py +../sqlalchemy/dialects/mysql/base.py +../sqlalchemy/dialects/mysql/cymysql.py +../sqlalchemy/dialects/mysql/enumerated.py +../sqlalchemy/dialects/mysql/gaerdbms.py +../sqlalchemy/dialects/mysql/json.py +../sqlalchemy/dialects/mysql/mysqlconnector.py +../sqlalchemy/dialects/mysql/mysqldb.py +../sqlalchemy/dialects/mysql/oursql.py +../sqlalchemy/dialects/mysql/pymysql.py +../sqlalchemy/dialects/mysql/pyodbc.py +../sqlalchemy/dialects/mysql/reflection.py +../sqlalchemy/dialects/mysql/types.py +../sqlalchemy/dialects/mysql/zxjdbc.py +../sqlalchemy/dialects/oracle/__init__.py +../sqlalchemy/dialects/oracle/base.py +../sqlalchemy/dialects/oracle/cx_oracle.py +../sqlalchemy/dialects/oracle/zxjdbc.py +../sqlalchemy/dialects/postgresql/__init__.py +../sqlalchemy/dialects/postgresql/array.py +../sqlalchemy/dialects/postgresql/base.py +../sqlalchemy/dialects/postgresql/dml.py +../sqlalchemy/dialects/postgresql/ext.py +../sqlalchemy/dialects/postgresql/hstore.py +../sqlalchemy/dialects/postgresql/json.py +../sqlalchemy/dialects/postgresql/pg8000.py +../sqlalchemy/dialects/postgresql/psycopg2.py +../sqlalchemy/dialects/postgresql/psycopg2cffi.py +../sqlalchemy/dialects/postgresql/pygresql.py +../sqlalchemy/dialects/postgresql/pypostgresql.py +../sqlalchemy/dialects/postgresql/ranges.py +../sqlalchemy/dialects/postgresql/zxjdbc.py +../sqlalchemy/dialects/sqlite/__init__.py +../sqlalchemy/dialects/sqlite/base.py +../sqlalchemy/dialects/sqlite/pysqlcipher.py +../sqlalchemy/dialects/sqlite/pysqlite.py +../sqlalchemy/dialects/sybase/__init__.py +../sqlalchemy/dialects/sybase/base.py +../sqlalchemy/dialects/sybase/mxodbc.py +../sqlalchemy/dialects/sybase/pyodbc.py +../sqlalchemy/dialects/sybase/pysybase.py +../sqlalchemy/ext/declarative/__init__.py +../sqlalchemy/ext/declarative/api.py +../sqlalchemy/ext/declarative/base.py +../sqlalchemy/ext/declarative/clsregistry.py +../sqlalchemy/testing/plugin/__init__.py +../sqlalchemy/testing/plugin/bootstrap.py +../sqlalchemy/testing/plugin/noseplugin.py +../sqlalchemy/testing/plugin/plugin_base.py +../sqlalchemy/testing/plugin/pytestplugin.py +../sqlalchemy/testing/suite/__init__.py +../sqlalchemy/testing/suite/test_ddl.py +../sqlalchemy/testing/suite/test_dialect.py +../sqlalchemy/testing/suite/test_insert.py +../sqlalchemy/testing/suite/test_reflection.py +../sqlalchemy/testing/suite/test_results.py +../sqlalchemy/testing/suite/test_select.py +../sqlalchemy/testing/suite/test_sequence.py +../sqlalchemy/testing/suite/test_types.py +../sqlalchemy/testing/suite/test_update_delete.py +../sqlalchemy/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/__pycache__/events.cpython-34.pyc +../sqlalchemy/__pycache__/exc.cpython-34.pyc +../sqlalchemy/__pycache__/inspection.cpython-34.pyc +../sqlalchemy/__pycache__/interfaces.cpython-34.pyc +../sqlalchemy/__pycache__/log.cpython-34.pyc +../sqlalchemy/__pycache__/pool.cpython-34.pyc +../sqlalchemy/__pycache__/processors.cpython-34.pyc +../sqlalchemy/__pycache__/schema.cpython-34.pyc +../sqlalchemy/__pycache__/types.cpython-34.pyc +../sqlalchemy/connectors/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/connectors/__pycache__/mxodbc.cpython-34.pyc +../sqlalchemy/connectors/__pycache__/pyodbc.cpython-34.pyc +../sqlalchemy/connectors/__pycache__/zxJDBC.cpython-34.pyc +../sqlalchemy/databases/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/engine/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/engine/__pycache__/base.cpython-34.pyc +../sqlalchemy/engine/__pycache__/default.cpython-34.pyc +../sqlalchemy/engine/__pycache__/interfaces.cpython-34.pyc +../sqlalchemy/engine/__pycache__/reflection.cpython-34.pyc +../sqlalchemy/engine/__pycache__/result.cpython-34.pyc +../sqlalchemy/engine/__pycache__/strategies.cpython-34.pyc +../sqlalchemy/engine/__pycache__/threadlocal.cpython-34.pyc +../sqlalchemy/engine/__pycache__/url.cpython-34.pyc +../sqlalchemy/engine/__pycache__/util.cpython-34.pyc +../sqlalchemy/event/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/event/__pycache__/api.cpython-34.pyc +../sqlalchemy/event/__pycache__/attr.cpython-34.pyc +../sqlalchemy/event/__pycache__/base.cpython-34.pyc +../sqlalchemy/event/__pycache__/legacy.cpython-34.pyc +../sqlalchemy/event/__pycache__/registry.cpython-34.pyc +../sqlalchemy/ext/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/ext/__pycache__/associationproxy.cpython-34.pyc +../sqlalchemy/ext/__pycache__/automap.cpython-34.pyc +../sqlalchemy/ext/__pycache__/baked.cpython-34.pyc +../sqlalchemy/ext/__pycache__/compiler.cpython-34.pyc +../sqlalchemy/ext/__pycache__/horizontal_shard.cpython-34.pyc +../sqlalchemy/ext/__pycache__/hybrid.cpython-34.pyc +../sqlalchemy/ext/__pycache__/indexable.cpython-34.pyc +../sqlalchemy/ext/__pycache__/instrumentation.cpython-34.pyc +../sqlalchemy/ext/__pycache__/mutable.cpython-34.pyc +../sqlalchemy/ext/__pycache__/orderinglist.cpython-34.pyc +../sqlalchemy/ext/__pycache__/serializer.cpython-34.pyc +../sqlalchemy/orm/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/orm/__pycache__/attributes.cpython-34.pyc +../sqlalchemy/orm/__pycache__/base.cpython-34.pyc +../sqlalchemy/orm/__pycache__/collections.cpython-34.pyc +../sqlalchemy/orm/__pycache__/dependency.cpython-34.pyc +../sqlalchemy/orm/__pycache__/deprecated_interfaces.cpython-34.pyc +../sqlalchemy/orm/__pycache__/descriptor_props.cpython-34.pyc +../sqlalchemy/orm/__pycache__/dynamic.cpython-34.pyc +../sqlalchemy/orm/__pycache__/evaluator.cpython-34.pyc +../sqlalchemy/orm/__pycache__/events.cpython-34.pyc +../sqlalchemy/orm/__pycache__/exc.cpython-34.pyc +../sqlalchemy/orm/__pycache__/identity.cpython-34.pyc +../sqlalchemy/orm/__pycache__/instrumentation.cpython-34.pyc +../sqlalchemy/orm/__pycache__/interfaces.cpython-34.pyc +../sqlalchemy/orm/__pycache__/loading.cpython-34.pyc +../sqlalchemy/orm/__pycache__/mapper.cpython-34.pyc +../sqlalchemy/orm/__pycache__/path_registry.cpython-34.pyc +../sqlalchemy/orm/__pycache__/persistence.cpython-34.pyc +../sqlalchemy/orm/__pycache__/properties.cpython-34.pyc +../sqlalchemy/orm/__pycache__/query.cpython-34.pyc +../sqlalchemy/orm/__pycache__/relationships.cpython-34.pyc +../sqlalchemy/orm/__pycache__/scoping.cpython-34.pyc +../sqlalchemy/orm/__pycache__/session.cpython-34.pyc +../sqlalchemy/orm/__pycache__/state.cpython-34.pyc +../sqlalchemy/orm/__pycache__/strategies.cpython-34.pyc +../sqlalchemy/orm/__pycache__/strategy_options.cpython-34.pyc +../sqlalchemy/orm/__pycache__/sync.cpython-34.pyc +../sqlalchemy/orm/__pycache__/unitofwork.cpython-34.pyc +../sqlalchemy/orm/__pycache__/util.cpython-34.pyc +../sqlalchemy/sql/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/sql/__pycache__/annotation.cpython-34.pyc +../sqlalchemy/sql/__pycache__/base.cpython-34.pyc +../sqlalchemy/sql/__pycache__/compiler.cpython-34.pyc +../sqlalchemy/sql/__pycache__/crud.cpython-34.pyc +../sqlalchemy/sql/__pycache__/ddl.cpython-34.pyc +../sqlalchemy/sql/__pycache__/default_comparator.cpython-34.pyc +../sqlalchemy/sql/__pycache__/dml.cpython-34.pyc +../sqlalchemy/sql/__pycache__/elements.cpython-34.pyc +../sqlalchemy/sql/__pycache__/expression.cpython-34.pyc +../sqlalchemy/sql/__pycache__/functions.cpython-34.pyc +../sqlalchemy/sql/__pycache__/naming.cpython-34.pyc +../sqlalchemy/sql/__pycache__/operators.cpython-34.pyc +../sqlalchemy/sql/__pycache__/schema.cpython-34.pyc +../sqlalchemy/sql/__pycache__/selectable.cpython-34.pyc +../sqlalchemy/sql/__pycache__/sqltypes.cpython-34.pyc +../sqlalchemy/sql/__pycache__/type_api.cpython-34.pyc +../sqlalchemy/sql/__pycache__/util.cpython-34.pyc +../sqlalchemy/sql/__pycache__/visitors.cpython-34.pyc +../sqlalchemy/testing/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/testing/__pycache__/assertions.cpython-34.pyc +../sqlalchemy/testing/__pycache__/assertsql.cpython-34.pyc +../sqlalchemy/testing/__pycache__/config.cpython-34.pyc +../sqlalchemy/testing/__pycache__/engines.cpython-34.pyc +../sqlalchemy/testing/__pycache__/entities.cpython-34.pyc +../sqlalchemy/testing/__pycache__/exclusions.cpython-34.pyc +../sqlalchemy/testing/__pycache__/fixtures.cpython-34.pyc +../sqlalchemy/testing/__pycache__/mock.cpython-34.pyc +../sqlalchemy/testing/__pycache__/pickleable.cpython-34.pyc +../sqlalchemy/testing/__pycache__/profiling.cpython-34.pyc +../sqlalchemy/testing/__pycache__/provision.cpython-34.pyc +../sqlalchemy/testing/__pycache__/replay_fixture.cpython-34.pyc +../sqlalchemy/testing/__pycache__/requirements.cpython-34.pyc +../sqlalchemy/testing/__pycache__/runner.cpython-34.pyc +../sqlalchemy/testing/__pycache__/schema.cpython-34.pyc +../sqlalchemy/testing/__pycache__/util.cpython-34.pyc +../sqlalchemy/testing/__pycache__/warnings.cpython-34.pyc +../sqlalchemy/util/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/util/__pycache__/_collections.cpython-34.pyc +../sqlalchemy/util/__pycache__/compat.cpython-34.pyc +../sqlalchemy/util/__pycache__/deprecations.cpython-34.pyc +../sqlalchemy/util/__pycache__/langhelpers.cpython-34.pyc +../sqlalchemy/util/__pycache__/queue.cpython-34.pyc +../sqlalchemy/util/__pycache__/topological.cpython-34.pyc +../sqlalchemy/dialects/firebird/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/firebird/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/firebird/__pycache__/fdb.cpython-34.pyc +../sqlalchemy/dialects/firebird/__pycache__/kinterbasdb.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/adodbapi.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/mxodbc.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-34.pyc +../sqlalchemy/dialects/mssql/__pycache__/zxjdbc.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/enumerated.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/gaerdbms.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/json.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/oursql.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/reflection.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/types.cpython-34.pyc +../sqlalchemy/dialects/mysql/__pycache__/zxjdbc.cpython-34.pyc +../sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/oracle/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-34.pyc +../sqlalchemy/dialects/oracle/__pycache__/zxjdbc.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/array.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/dml.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/ext.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/json.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/psycopg2cffi.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/pygresql.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/pypostgresql.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-34.pyc +../sqlalchemy/dialects/postgresql/__pycache__/zxjdbc.cpython-34.pyc +../sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/sqlite/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/sqlite/__pycache__/pysqlcipher.cpython-34.pyc +../sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-34.pyc +../sqlalchemy/dialects/sybase/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/dialects/sybase/__pycache__/base.cpython-34.pyc +../sqlalchemy/dialects/sybase/__pycache__/mxodbc.cpython-34.pyc +../sqlalchemy/dialects/sybase/__pycache__/pyodbc.cpython-34.pyc +../sqlalchemy/dialects/sybase/__pycache__/pysybase.cpython-34.pyc +../sqlalchemy/ext/declarative/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/ext/declarative/__pycache__/api.cpython-34.pyc +../sqlalchemy/ext/declarative/__pycache__/base.cpython-34.pyc +../sqlalchemy/ext/declarative/__pycache__/clsregistry.cpython-34.pyc +../sqlalchemy/testing/plugin/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/testing/plugin/__pycache__/bootstrap.cpython-34.pyc +../sqlalchemy/testing/plugin/__pycache__/noseplugin.cpython-34.pyc +../sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-34.pyc +../sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/__init__.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_dialect.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_insert.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_results.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_select.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_types.cpython-34.pyc +../sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-34.pyc +../sqlalchemy/cprocessors.so +../sqlalchemy/cresultproxy.so +../sqlalchemy/cutils.so +./ +dependency_links.txt +PKG-INFO +requires.txt +SOURCES.txt +top_level.txt diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/requires.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/requires.txt new file mode 100644 index 0000000..f2a83ed --- /dev/null +++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/requires.txt @@ -0,0 +1,24 @@ + +[mssql_pymssql] +pymssql + +[mssql_pyodbc] +pyodbc + +[mysql] +mysqlclient + +[oracle] +cx_oracle + +[postgresql] +psycopg2 + +[postgresql_pg8000] +pg8000 + +[postgresql_psycopg2cffi] +psycopg2cffi + +[pymysql] +pymysql diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/top_level.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/top_level.txt new file mode 100644 index 0000000..39fb2be --- /dev/null +++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/top_level.txt @@ -0,0 +1 @@ +sqlalchemy diff --git a/app/lib/Werkzeug-0.12.1.dist-info/DESCRIPTION.rst b/app/lib/Werkzeug-0.12.1.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..3e3abb7 --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,54 @@ +Werkzeug +======== + +Werkzeug started as simple collection of various utilities for WSGI +applications and has become one of the most advanced WSGI utility +modules. It includes a powerful debugger, full featured request and +response objects, HTTP utilities to handle entity tags, cache control +headers, HTTP dates, cookie handling, file uploads, a powerful URL +routing system and a bunch of community contributed addon modules. + +Werkzeug is unicode aware and doesn't enforce a specific template +engine, database adapter or anything else. It doesn't even enforce +a specific way of handling requests and leaves all that up to the +developer. It's most useful for end user applications which should work +on as many server environments as possible (such as blogs, wikis, +bulletin boards, etc.). + +Details and example applications are available on the +`Werkzeug website `_. + + +Features +-------- + +- unicode awareness + +- request and response objects + +- various utility functions for dealing with HTTP headers such as + `Accept` and `Cache-Control` headers. + +- thread local objects with proper cleanup at request end + +- an interactive debugger + +- A simple WSGI server with support for threading and forking + with an automatic reloader. + +- a flexible URL routing system with REST support. + +- fully WSGI compatible + + +Development Version +------------------- + +The Werkzeug development version can be installed by cloning the git +repository from `github`_:: + + git clone git@github.com:pallets/werkzeug.git + +.. _github: http://github.com/pallets/werkzeug + + diff --git a/app/lib/Werkzeug-0.12.1.dist-info/LICENSE.txt b/app/lib/Werkzeug-0.12.1.dist-info/LICENSE.txt new file mode 100644 index 0000000..1c2e0b7 --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/LICENSE.txt @@ -0,0 +1,29 @@ +Copyright (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/app/lib/Werkzeug-0.12.1.dist-info/METADATA b/app/lib/Werkzeug-0.12.1.dist-info/METADATA new file mode 100644 index 0000000..86c5bdd --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/METADATA @@ -0,0 +1,83 @@ +Metadata-Version: 2.0 +Name: Werkzeug +Version: 0.12.1 +Summary: The Swiss Army knife of Python web development +Home-page: http://werkzeug.pocoo.org/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: termcolor +Requires-Dist: termcolor; extra == 'termcolor' +Provides-Extra: watchdog +Requires-Dist: watchdog; extra == 'watchdog' + +Werkzeug +======== + +Werkzeug started as simple collection of various utilities for WSGI +applications and has become one of the most advanced WSGI utility +modules. It includes a powerful debugger, full featured request and +response objects, HTTP utilities to handle entity tags, cache control +headers, HTTP dates, cookie handling, file uploads, a powerful URL +routing system and a bunch of community contributed addon modules. + +Werkzeug is unicode aware and doesn't enforce a specific template +engine, database adapter or anything else. It doesn't even enforce +a specific way of handling requests and leaves all that up to the +developer. It's most useful for end user applications which should work +on as many server environments as possible (such as blogs, wikis, +bulletin boards, etc.). + +Details and example applications are available on the +`Werkzeug website `_. + + +Features +-------- + +- unicode awareness + +- request and response objects + +- various utility functions for dealing with HTTP headers such as + `Accept` and `Cache-Control` headers. + +- thread local objects with proper cleanup at request end + +- an interactive debugger + +- A simple WSGI server with support for threading and forking + with an automatic reloader. + +- a flexible URL routing system with REST support. + +- fully WSGI compatible + + +Development Version +------------------- + +The Werkzeug development version can be installed by cloning the git +repository from `github`_:: + + git clone git@github.com:pallets/werkzeug.git + +.. _github: http://github.com/pallets/werkzeug + + diff --git a/app/lib/Werkzeug-0.12.1.dist-info/RECORD b/app/lib/Werkzeug-0.12.1.dist-info/RECORD new file mode 100644 index 0000000..d6c4b77 --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/RECORD @@ -0,0 +1,94 @@ +werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519 +werkzeug/security.py,sha256=rbd9Q-Xga1NldPzuiVW5czvBGUXkhWARm_lxrT1Tx54,8990 +werkzeug/__init__.py,sha256=xQyZHWJtl_P88xARoyNi4QXv22yiApBfTRHsob__IFs,6864 +werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396 +werkzeug/http.py,sha256=nrk-ASJzcKOuoBEz274TWA8jKt0CQSOBZuP_A0UASTA,36658 +werkzeug/routing.py,sha256=g25wg0GNfff8WcfRlc1ZxTGvz1KbVj09w2S7wxopseQ,66746 +werkzeug/utils.py,sha256=lkybtv_mq35zV1qhelvEcILTzrMUwZ9yon6E8XwapJE,22972 +werkzeug/exceptions.py,sha256=3wp95Hqj9FqV8MdikV99JRcHse_fSMn27V8tgP5Hw2c,20505 +werkzeug/_reloader.py,sha256=NkIXQCTa6b22wWLpXob_jIVUxux8LtAsfWehLkKt0iM,8816 +werkzeug/formparser.py,sha256=DxN53eOCb6i7PxqtldrF2Kv9Mx00BqW297N4t-RxkWE,21241 +werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311 +werkzeug/datastructures.py,sha256=uajnldfkuVXEcE92qW-uo9YUAprO3Yh5dPDQfnUL9eE,89075 +werkzeug/wrappers.py,sha256=wceh1RhvhIZVzKuok3XMQ5jqjYYCEYv5JqKY3Nc_oRY,82986 +werkzeug/test.py,sha256=xnabNSpty66ftZiXHcoZaYFP1E4WUNxydw5Oe8Mjhoo,34795 +werkzeug/urls.py,sha256=fSbI4Gb29_p02Zk21VAZQRN1QdOVY9CNTgpb2rbajNQ,36710 +werkzeug/script.py,sha256=Jh9OAktqjLNc_IBBUatVM7uP5LDcbxaYA8n2ObnS4bo,11666 +werkzeug/useragents.py,sha256=Ck3G977Y0Rzdk9wFcLpL0PyOrONtdK1_d2Zexb78cX4,5640 +werkzeug/local.py,sha256=QdQhWV5L8p1Y1CJ1CDStwxaUs24SuN5aebHwjVD08C8,14553 +werkzeug/serving.py,sha256=bN5nO4zTqzs-QX4rBxbW1LVF5af0c_vCokhZHO4muUE,28961 +werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175 +werkzeug/wsgi.py,sha256=TjPo5ups3NI1RVVGdMvd3XaceqFtqlMX5X169gWWFrQ,42838 +werkzeug/_internal.py,sha256=sE2JbLnMzN9mRI1iipTYWrFAGEWaZVECqtHAiNEhqUE,13841 +werkzeug/contrib/fixers.py,sha256=gR06T-w71ur-tHQ_31kP_4jpOncPJ4Wc1dOqTvYusr8,10179 +werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334 +werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623 +werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453 +werkzeug/contrib/iterio.py,sha256=RlqDvGhz0RneTpzE8dVc-yWCUv4nkPl1jEc_EDp2fH0,10814 +werkzeug/contrib/cache.py,sha256=nyUUxsS0MTHiFmu-481y9PHd8NvWH5pzCoEX1yA0mHY,30341 +werkzeug/contrib/securecookie.py,sha256=bDsAJmslkwmXjycnPjEjWtfLBvhz0ud4z3k7tdezUVs,12174 +werkzeug/contrib/lint.py,sha256=qZlmqiWJ5tQJOEzLnPmHWA8eUEpcBIWkAb_V2RKJg4o,12558 +werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151 +werkzeug/contrib/wrappers.py,sha256=v7OYlz7wQtDlS9fey75UiRZ1IkUWqCpzbhsLy4k14Hw,10398 +werkzeug/contrib/atom.py,sha256=qqfJcfIn2RYY-3hO3Oz0aLq9YuNubcPQ_KZcNsDwVJo,15575 +werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564 +werkzeug/contrib/sessions.py,sha256=39LVNvLbm5JWpbxM79WC2l87MJFbqeISARjwYbkJatw,12577 +werkzeug/debug/console.py,sha256=n3-dsKk1TsjnN-u4ZgmuWCU_HO0qw5IA7ttjhyyMM6I,5607 +werkzeug/debug/repr.py,sha256=bKqstDYGfECpeLerd48s_hxuqK4b6UWnjMu3d_DHO8I,9340 +werkzeug/debug/__init__.py,sha256=GTsOsjE3PqUAlsUVm2Mgc_KWA2kjjSsUz0JsM7Qu41w,17266 +werkzeug/debug/tbtools.py,sha256=rBudXCmkVdAKIcdhxANxgf09g6kQjJWW9_5bjSpr4OY,18451 +werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191 +werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818 +werkzeug/debug/shared/debugger.js,sha256=PKPVYuyO4SX1hkqLOwCLvmIEO5154WatFYaXE-zIfKI,6264 +werkzeug/debug/shared/style.css,sha256=IEO0PC2pWmh2aEyGCaN--txuWsRCliuhlbEhPDFwh0A,6270 +werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507 +werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673 +werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957 +werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220 +werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200 +Werkzeug-0.12.1.dist-info/DESCRIPTION.rst,sha256=z9r9xqJ0fYSAn1Tz7KRBdFGDerL2y4pHWSW_72pUgTc,1591 +Werkzeug-0.12.1.dist-info/metadata.json,sha256=gzIVhjk_QMpO2SldxIcRtgkXI3U-x92yMDpyuMy8-WM,1276 +Werkzeug-0.12.1.dist-info/RECORD,, +Werkzeug-0.12.1.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9 +Werkzeug-0.12.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +Werkzeug-0.12.1.dist-info/LICENSE.txt,sha256=F84h8-PZAuC-Hq-_252D3yhH6mqIc-WUbXUPbfOtjXM,1532 +Werkzeug-0.12.1.dist-info/METADATA,sha256=QqCGwUjZaX70SDkofJYsMxbG6UfcXLb6pkvSMGKN5DA,2738 +werkzeug/__pycache__/testapp.cpython-34.pyc,, +werkzeug/debug/__pycache__/console.cpython-34.pyc,, +werkzeug/__pycache__/formparser.cpython-34.pyc,, +werkzeug/__pycache__/test.cpython-34.pyc,, +werkzeug/__pycache__/wsgi.cpython-34.pyc,, +werkzeug/contrib/__pycache__/iterio.cpython-34.pyc,, +werkzeug/__pycache__/urls.cpython-34.pyc,, +werkzeug/debug/__pycache__/tbtools.cpython-34.pyc,, +werkzeug/__pycache__/wrappers.cpython-34.pyc,, +werkzeug/debug/__pycache__/__init__.cpython-34.pyc,, +werkzeug/__pycache__/_reloader.cpython-34.pyc,, +werkzeug/contrib/__pycache__/securecookie.cpython-34.pyc,, +werkzeug/contrib/__pycache__/lint.cpython-34.pyc,, +werkzeug/__pycache__/security.cpython-34.pyc,, +werkzeug/contrib/__pycache__/profiler.cpython-34.pyc,, +werkzeug/contrib/__pycache__/atom.cpython-34.pyc,, +werkzeug/__pycache__/filesystem.cpython-34.pyc,, +werkzeug/contrib/__pycache__/fixers.cpython-34.pyc,, +werkzeug/__pycache__/exceptions.cpython-34.pyc,, +werkzeug/contrib/__pycache__/sessions.cpython-34.pyc,, +werkzeug/__pycache__/utils.cpython-34.pyc,, +werkzeug/__pycache__/routing.cpython-34.pyc,, +werkzeug/contrib/__pycache__/wrappers.cpython-34.pyc,, +werkzeug/__pycache__/posixemulation.cpython-34.pyc,, +werkzeug/__pycache__/useragents.cpython-34.pyc,, +werkzeug/contrib/__pycache__/limiter.cpython-34.pyc,, +werkzeug/__pycache__/_compat.cpython-34.pyc,, +werkzeug/__pycache__/http.cpython-34.pyc,, +werkzeug/__pycache__/local.cpython-34.pyc,, +werkzeug/__pycache__/script.cpython-34.pyc,, +werkzeug/__pycache__/datastructures.cpython-34.pyc,, +werkzeug/contrib/__pycache__/__init__.cpython-34.pyc,, +werkzeug/contrib/__pycache__/cache.cpython-34.pyc,, +werkzeug/debug/__pycache__/repr.cpython-34.pyc,, +werkzeug/contrib/__pycache__/testtools.cpython-34.pyc,, +werkzeug/__pycache__/serving.cpython-34.pyc,, +werkzeug/__pycache__/__init__.cpython-34.pyc,, +werkzeug/contrib/__pycache__/jsrouting.cpython-34.pyc,, +werkzeug/__pycache__/_internal.cpython-34.pyc,, diff --git a/app/lib/Werkzeug-0.12.1.dist-info/WHEEL b/app/lib/Werkzeug-0.12.1.dist-info/WHEEL new file mode 100644 index 0000000..9dff69d --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/Werkzeug-0.12.1.dist-info/metadata.json b/app/lib/Werkzeug-0.12.1.dist-info/metadata.json new file mode 100644 index 0000000..4fc2cf2 --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"license": "BSD", "name": "Werkzeug", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "The Swiss Army knife of Python web development", "platform": "any", "run_requires": [{"requires": ["watchdog"], "extra": "watchdog"}, {"requires": ["termcolor"], "extra": "termcolor"}], "version": "0.12.1", "extensions": {"python.details": {"project_urls": {"Home": "http://werkzeug.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extras": ["termcolor", "watchdog"]} \ No newline at end of file diff --git a/app/lib/Werkzeug-0.12.1.dist-info/top_level.txt b/app/lib/Werkzeug-0.12.1.dist-info/top_level.txt new file mode 100644 index 0000000..6fe8da8 --- /dev/null +++ b/app/lib/Werkzeug-0.12.1.dist-info/top_level.txt @@ -0,0 +1 @@ +werkzeug diff --git a/app/lib/click-6.7.dist-info/DESCRIPTION.rst b/app/lib/click-6.7.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..e118723 --- /dev/null +++ b/app/lib/click-6.7.dist-info/DESCRIPTION.rst @@ -0,0 +1,3 @@ +UNKNOWN + + diff --git a/app/lib/click-6.7.dist-info/METADATA b/app/lib/click-6.7.dist-info/METADATA new file mode 100644 index 0000000..1f10885 --- /dev/null +++ b/app/lib/click-6.7.dist-info/METADATA @@ -0,0 +1,16 @@ +Metadata-Version: 2.0 +Name: click +Version: 6.7 +Summary: A simple wrapper around optparse for powerful command line utilities. +Home-page: http://github.com/mitsuhiko/click +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: UNKNOWN +Platform: UNKNOWN +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 + +UNKNOWN + + diff --git a/app/lib/click-6.7.dist-info/RECORD b/app/lib/click-6.7.dist-info/RECORD new file mode 100644 index 0000000..841d846 --- /dev/null +++ b/app/lib/click-6.7.dist-info/RECORD @@ -0,0 +1,40 @@ +click/__init__.py,sha256=k8R00cFKWI8dhDVKQeLBlAdNh1CxerMEDRiGnr32gdw,2858 +click/_bashcomplete.py,sha256=82rMiibtEurdwBq60NHXVCBuGXJHDpblFO9o2YxJDF0,2423 +click/_compat.py,sha256=j59MpzxYGE-fTGj0A5sg8UI8GhHod1XMojiCA0jvbL0,21011 +click/_termui_impl.py,sha256=Ol1JJhvBRw3l8j1WIU0tOWjQtxxmwGE44lFDbzDqzoA,16395 +click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198 +click/_unicodefun.py,sha256=A3UOzJw6lEZyol2SBg3fNXgweTutaOzkJ61OB7vik3Y,4204 +click/_winconsole.py,sha256=MzG46DEYPoRyx4SO7EIhFuFZHESgooAfJLIukbB6p5c,7790 +click/core.py,sha256=M0nJ6Kkye7XZXYG7HCbkJWSfy14WHV6bQmGLACrOhKw,70254 +click/decorators.py,sha256=y7CX2needh8iRWafj-QS_hGQFsN24eyXAhx5Y2ATwas,10941 +click/exceptions.py,sha256=rOa0pP3PbSy0_AAPOW9irBEM8AJ3BySN-4z2VUwFVo4,6788 +click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889 +click/globals.py,sha256=PAgnKvGxq4YuEIldw3lgYOGBLYwsyxnm1IByBX3BFXo,1515 +click/parser.py,sha256=i01xgYuIA6AwQWEXjshwHSwnTR3gUep4FxJIfyW4ta4,15510 +click/termui.py,sha256=Bp99MSWQtyoWe1_7HggDmA77n--3KLxu7NsZMFMaCUo,21008 +click/testing.py,sha256=kJ9mjtJgwNAlkgKcFf9-ISxufmaPDbbuOHVC9WIvKdY,11002 +click/types.py,sha256=ZGb2lmFs5Vwd9loTRIMbGcqhPVOql8mGoBhWBRT6V4E,18864 +click/utils.py,sha256=1jalPlkUU28JReTEQeeSFtbJd-SirYWBNfjtELBKzT4,14916 +click-6.7.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 +click-6.7.dist-info/METADATA,sha256=l6lAyogIUXiHKUK_rWguef-EMcvO5C6bXzFCNCcblbQ,424 +click-6.7.dist-info/RECORD,, +click-6.7.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113 +click-6.7.dist-info/metadata.json,sha256=qg0uO6amNHkIkOxnmWX7Xa_DNQMQ62Q6drivuP9Gh1c,571 +click-6.7.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 +click/__pycache__/_bashcomplete.cpython-34.pyc,, +click/__pycache__/types.cpython-34.pyc,, +click/__pycache__/parser.cpython-34.pyc,, +click/__pycache__/testing.cpython-34.pyc,, +click/__pycache__/_textwrap.cpython-34.pyc,, +click/__pycache__/core.cpython-34.pyc,, +click/__pycache__/_winconsole.cpython-34.pyc,, +click/__pycache__/decorators.cpython-34.pyc,, +click/__pycache__/_compat.cpython-34.pyc,, +click/__pycache__/termui.cpython-34.pyc,, +click/__pycache__/_unicodefun.cpython-34.pyc,, +click/__pycache__/__init__.cpython-34.pyc,, +click/__pycache__/_termui_impl.cpython-34.pyc,, +click/__pycache__/globals.cpython-34.pyc,, +click/__pycache__/exceptions.cpython-34.pyc,, +click/__pycache__/utils.cpython-34.pyc,, +click/__pycache__/formatting.cpython-34.pyc,, diff --git a/app/lib/click-6.7.dist-info/WHEEL b/app/lib/click-6.7.dist-info/WHEEL new file mode 100644 index 0000000..7bf9daa --- /dev/null +++ b/app/lib/click-6.7.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0.a0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/click-6.7.dist-info/metadata.json b/app/lib/click-6.7.dist-info/metadata.json new file mode 100644 index 0000000..0a4cfb1 --- /dev/null +++ b/app/lib/click-6.7.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mitsuhiko/click"}}}, "generator": "bdist_wheel (0.30.0.a0)", "metadata_version": "2.0", "name": "click", "summary": "A simple wrapper around optparse for powerful command line utilities.", "version": "6.7"} \ No newline at end of file diff --git a/app/lib/click-6.7.dist-info/top_level.txt b/app/lib/click-6.7.dist-info/top_level.txt new file mode 100644 index 0000000..dca9a90 --- /dev/null +++ b/app/lib/click-6.7.dist-info/top_level.txt @@ -0,0 +1 @@ +click diff --git a/app/lib/click/__init__.py b/app/lib/click/__init__.py new file mode 100644 index 0000000..971e55d --- /dev/null +++ b/app/lib/click/__init__.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +""" + click + ~~~~~ + + Click is a simple Python module that wraps the stdlib's optparse to make + writing command line scripts fun. Unlike other modules, it's based around + a simple API that does not come with too much magic and is composable. + + In case optparse ever gets removed from the stdlib, it will be shipped by + this module. + + :copyright: (c) 2014 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +# Core classes +from .core import Context, BaseCommand, Command, MultiCommand, Group, \ + CommandCollection, Parameter, Option, Argument + +# Globals +from .globals import get_current_context + +# Decorators +from .decorators import pass_context, pass_obj, make_pass_decorator, \ + command, group, argument, option, confirmation_option, \ + password_option, version_option, help_option + +# Types +from .types import ParamType, File, Path, Choice, IntRange, Tuple, \ + STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED + +# Utilities +from .utils import echo, get_binary_stream, get_text_stream, open_file, \ + format_filename, get_app_dir, get_os_args + +# Terminal functions +from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \ + progressbar, clear, style, unstyle, secho, edit, launch, getchar, \ + pause + +# Exceptions +from .exceptions import ClickException, UsageError, BadParameter, \ + FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \ + MissingParameter + +# Formatting +from .formatting import HelpFormatter, wrap_text + +# Parsing +from .parser import OptionParser + + +__all__ = [ + # Core classes + 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group', + 'CommandCollection', 'Parameter', 'Option', 'Argument', + + # Globals + 'get_current_context', + + # Decorators + 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group', + 'argument', 'option', 'confirmation_option', 'password_option', + 'version_option', 'help_option', + + # Types + 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING', + 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED', + + # Utilities + 'echo', 'get_binary_stream', 'get_text_stream', 'open_file', + 'format_filename', 'get_app_dir', 'get_os_args', + + # Terminal functions + 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager', + 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch', + 'getchar', 'pause', + + # Exceptions + 'ClickException', 'UsageError', 'BadParameter', 'FileError', + 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage', + 'MissingParameter', + + # Formatting + 'HelpFormatter', 'wrap_text', + + # Parsing + 'OptionParser', +] + + +# Controls if click should emit the warning about the use of unicode +# literals. +disable_unicode_literals_warning = False + + +__version__ = '6.7' diff --git a/app/lib/click/_bashcomplete.py b/app/lib/click/_bashcomplete.py new file mode 100644 index 0000000..d9d26d2 --- /dev/null +++ b/app/lib/click/_bashcomplete.py @@ -0,0 +1,83 @@ +import os +import re +from .utils import echo +from .parser import split_arg_string +from .core import MultiCommand, Option + + +COMPLETION_SCRIPT = ''' +%(complete_func)s() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ + COMP_CWORD=$COMP_CWORD \\ + %(autocomplete_var)s=complete $1 ) ) + return 0 +} + +complete -F %(complete_func)s -o default %(script_names)s +''' + +_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') + + +def get_completion_script(prog_name, complete_var): + cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) + return (COMPLETION_SCRIPT % { + 'complete_func': '_%s_completion' % cf_name, + 'script_names': prog_name, + 'autocomplete_var': complete_var, + }).strip() + ';' + + +def resolve_ctx(cli, prog_name, args): + ctx = cli.make_context(prog_name, args, resilient_parsing=True) + while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand): + a = ctx.protected_args + ctx.args + cmd = ctx.command.get_command(ctx, a[0]) + if cmd is None: + return None + ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True) + return ctx + + +def get_choices(cli, prog_name, args, incomplete): + ctx = resolve_ctx(cli, prog_name, args) + if ctx is None: + return + + choices = [] + if incomplete and not incomplete[:1].isalnum(): + for param in ctx.command.params: + if not isinstance(param, Option): + continue + choices.extend(param.opts) + choices.extend(param.secondary_opts) + elif isinstance(ctx.command, MultiCommand): + choices.extend(ctx.command.list_commands(ctx)) + + for item in choices: + if item.startswith(incomplete): + yield item + + +def do_complete(cli, prog_name): + cwords = split_arg_string(os.environ['COMP_WORDS']) + cword = int(os.environ['COMP_CWORD']) + args = cwords[1:cword] + try: + incomplete = cwords[cword] + except IndexError: + incomplete = '' + + for item in get_choices(cli, prog_name, args, incomplete): + echo(item) + + return True + + +def bashcomplete(cli, prog_name, complete_var, complete_instr): + if complete_instr == 'source': + echo(get_completion_script(prog_name, complete_var)) + return True + elif complete_instr == 'complete': + return do_complete(cli, prog_name) + return False diff --git a/app/lib/click/_compat.py b/app/lib/click/_compat.py new file mode 100644 index 0000000..2b43412 --- /dev/null +++ b/app/lib/click/_compat.py @@ -0,0 +1,648 @@ +import re +import io +import os +import sys +import codecs +from weakref import WeakKeyDictionary + + +PY2 = sys.version_info[0] == 2 +WIN = sys.platform.startswith('win') +DEFAULT_COLUMNS = 80 + + +_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') + + +def get_filesystem_encoding(): + return sys.getfilesystemencoding() or sys.getdefaultencoding() + + +def _make_text_stream(stream, encoding, errors): + if encoding is None: + encoding = get_best_encoding(stream) + if errors is None: + errors = 'replace' + return _NonClosingTextIOWrapper(stream, encoding, errors, + line_buffering=True) + + +def is_ascii_encoding(encoding): + """Checks if a given encoding is ascii.""" + try: + return codecs.lookup(encoding).name == 'ascii' + except LookupError: + return False + + +def get_best_encoding(stream): + """Returns the default stream encoding if not found.""" + rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding() + if is_ascii_encoding(rv): + return 'utf-8' + return rv + + +class _NonClosingTextIOWrapper(io.TextIOWrapper): + + def __init__(self, stream, encoding, errors, **extra): + self._stream = stream = _FixupStream(stream) + io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) + + # The io module is a place where the Python 3 text behavior + # was forced upon Python 2, so we need to unbreak + # it to look like Python 2. + if PY2: + def write(self, x): + if isinstance(x, str) or is_bytes(x): + try: + self.flush() + except Exception: + pass + return self.buffer.write(str(x)) + return io.TextIOWrapper.write(self, x) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def __del__(self): + try: + self.detach() + except Exception: + pass + + def isatty(self): + # https://bitbucket.org/pypy/pypy/issue/1803 + return self._stream.isatty() + + +class _FixupStream(object): + """The new io interface needs more from streams than streams + traditionally implement. As such, this fix-up code is necessary in + some circumstances. + """ + + def __init__(self, stream): + self._stream = stream + + def __getattr__(self, name): + return getattr(self._stream, name) + + def read1(self, size): + f = getattr(self._stream, 'read1', None) + if f is not None: + return f(size) + # We only dispatch to readline instead of read in Python 2 as we + # do not want cause problems with the different implementation + # of line buffering. + if PY2: + return self._stream.readline(size) + return self._stream.read(size) + + def readable(self): + x = getattr(self._stream, 'readable', None) + if x is not None: + return x() + try: + self._stream.read(0) + except Exception: + return False + return True + + def writable(self): + x = getattr(self._stream, 'writable', None) + if x is not None: + return x() + try: + self._stream.write('') + except Exception: + try: + self._stream.write(b'') + except Exception: + return False + return True + + def seekable(self): + x = getattr(self._stream, 'seekable', None) + if x is not None: + return x() + try: + self._stream.seek(self._stream.tell()) + except Exception: + return False + return True + + +if PY2: + text_type = unicode + bytes = str + raw_input = raw_input + string_types = (str, unicode) + iteritems = lambda x: x.iteritems() + range_type = xrange + + def is_bytes(x): + return isinstance(x, (buffer, bytearray)) + + _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') + + # For Windows, we need to force stdout/stdin/stderr to binary if it's + # fetched for that. This obviously is not the most correct way to do + # it as it changes global state. Unfortunately, there does not seem to + # be a clear better way to do it as just reopening the file in binary + # mode does not change anything. + # + # An option would be to do what Python 3 does and to open the file as + # binary only, patch it back to the system, and then use a wrapper + # stream that converts newlines. It's not quite clear what's the + # correct option here. + # + # This code also lives in _winconsole for the fallback to the console + # emulation stream. + # + # There are also Windows environments where the `msvcrt` module is not + # available (which is why we use try-catch instead of the WIN variable + # here), such as the Google App Engine development server on Windows. In + # those cases there is just nothing we can do. + try: + import msvcrt + except ImportError: + set_binary_mode = lambda x: x + else: + def set_binary_mode(f): + try: + fileno = f.fileno() + except Exception: + pass + else: + msvcrt.setmode(fileno, os.O_BINARY) + return f + + def isidentifier(x): + return _identifier_re.search(x) is not None + + def get_binary_stdin(): + return set_binary_mode(sys.stdin) + + def get_binary_stdout(): + return set_binary_mode(sys.stdout) + + def get_binary_stderr(): + return set_binary_mode(sys.stderr) + + def get_text_stdin(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _make_text_stream(sys.stdin, encoding, errors) + + def get_text_stdout(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _make_text_stream(sys.stdout, encoding, errors) + + def get_text_stderr(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _make_text_stream(sys.stderr, encoding, errors) + + def filename_to_ui(value): + if isinstance(value, bytes): + value = value.decode(get_filesystem_encoding(), 'replace') + return value +else: + import io + text_type = str + raw_input = input + string_types = (str,) + range_type = range + isidentifier = lambda x: x.isidentifier() + iteritems = lambda x: iter(x.items()) + + def is_bytes(x): + return isinstance(x, (bytes, memoryview, bytearray)) + + def _is_binary_reader(stream, default=False): + try: + return isinstance(stream.read(0), bytes) + except Exception: + return default + # This happens in some cases where the stream was already + # closed. In this case, we assume the default. + + def _is_binary_writer(stream, default=False): + try: + stream.write(b'') + except Exception: + try: + stream.write('') + return False + except Exception: + pass + return default + return True + + def _find_binary_reader(stream): + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_reader(stream, False): + return stream + + buf = getattr(stream, 'buffer', None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_reader(buf, True): + return buf + + def _find_binary_writer(stream): + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detatching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_writer(stream, False): + return stream + + buf = getattr(stream, 'buffer', None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_writer(buf, True): + return buf + + def _stream_is_misconfigured(stream): + """A stream is misconfigured if its encoding is ASCII.""" + # If the stream does not have an encoding set, we assume it's set + # to ASCII. This appears to happen in certain unittest + # environments. It's not quite clear what the correct behavior is + # but this at least will force Click to recover somehow. + return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii') + + def _is_compatible_text_stream(stream, encoding, errors): + stream_encoding = getattr(stream, 'encoding', None) + stream_errors = getattr(stream, 'errors', None) + + # Perfect match. + if stream_encoding == encoding and stream_errors == errors: + return True + + # Otherwise, it's only a compatible stream if we did not ask for + # an encoding. + if encoding is None: + return stream_encoding is not None + + return False + + def _force_correct_text_reader(text_reader, encoding, errors): + if _is_binary_reader(text_reader, False): + binary_reader = text_reader + else: + # If there is no target encoding set, we need to verify that the + # reader is not actually misconfigured. + if encoding is None and not _stream_is_misconfigured(text_reader): + return text_reader + + if _is_compatible_text_stream(text_reader, encoding, errors): + return text_reader + + # If the reader has no encoding, we try to find the underlying + # binary reader for it. If that fails because the environment is + # misconfigured, we silently go with the same reader because this + # is too common to happen. In that case, mojibake is better than + # exceptions. + binary_reader = _find_binary_reader(text_reader) + if binary_reader is None: + return text_reader + + # At this point, we default the errors to replace instead of strict + # because nobody handles those errors anyways and at this point + # we're so fundamentally fucked that nothing can repair it. + if errors is None: + errors = 'replace' + return _make_text_stream(binary_reader, encoding, errors) + + def _force_correct_text_writer(text_writer, encoding, errors): + if _is_binary_writer(text_writer, False): + binary_writer = text_writer + else: + # If there is no target encoding set, we need to verify that the + # writer is not actually misconfigured. + if encoding is None and not _stream_is_misconfigured(text_writer): + return text_writer + + if _is_compatible_text_stream(text_writer, encoding, errors): + return text_writer + + # If the writer has no encoding, we try to find the underlying + # binary writer for it. If that fails because the environment is + # misconfigured, we silently go with the same writer because this + # is too common to happen. In that case, mojibake is better than + # exceptions. + binary_writer = _find_binary_writer(text_writer) + if binary_writer is None: + return text_writer + + # At this point, we default the errors to replace instead of strict + # because nobody handles those errors anyways and at this point + # we're so fundamentally fucked that nothing can repair it. + if errors is None: + errors = 'replace' + return _make_text_stream(binary_writer, encoding, errors) + + def get_binary_stdin(): + reader = _find_binary_reader(sys.stdin) + if reader is None: + raise RuntimeError('Was not able to determine binary ' + 'stream for sys.stdin.') + return reader + + def get_binary_stdout(): + writer = _find_binary_writer(sys.stdout) + if writer is None: + raise RuntimeError('Was not able to determine binary ' + 'stream for sys.stdout.') + return writer + + def get_binary_stderr(): + writer = _find_binary_writer(sys.stderr) + if writer is None: + raise RuntimeError('Was not able to determine binary ' + 'stream for sys.stderr.') + return writer + + def get_text_stdin(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_reader(sys.stdin, encoding, errors) + + def get_text_stdout(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stdout, encoding, errors) + + def get_text_stderr(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stderr, encoding, errors) + + def filename_to_ui(value): + if isinstance(value, bytes): + value = value.decode(get_filesystem_encoding(), 'replace') + else: + value = value.encode('utf-8', 'surrogateescape') \ + .decode('utf-8', 'replace') + return value + + +def get_streerror(e, default=None): + if hasattr(e, 'strerror'): + msg = e.strerror + else: + if default is not None: + msg = default + else: + msg = str(e) + if isinstance(msg, bytes): + msg = msg.decode('utf-8', 'replace') + return msg + + +def open_stream(filename, mode='r', encoding=None, errors='strict', + atomic=False): + # Standard streams first. These are simple because they don't need + # special handling for the atomic flag. It's entirely ignored. + if filename == '-': + if 'w' in mode: + if 'b' in mode: + return get_binary_stdout(), False + return get_text_stdout(encoding=encoding, errors=errors), False + if 'b' in mode: + return get_binary_stdin(), False + return get_text_stdin(encoding=encoding, errors=errors), False + + # Non-atomic writes directly go out through the regular open functions. + if not atomic: + if encoding is None: + return open(filename, mode), True + return io.open(filename, mode, encoding=encoding, errors=errors), True + + # Some usability stuff for atomic writes + if 'a' in mode: + raise ValueError( + 'Appending to an existing file is not supported, because that ' + 'would involve an expensive `copy`-operation to a temporary ' + 'file. Open the file in normal `w`-mode and copy explicitly ' + 'if that\'s what you\'re after.' + ) + if 'x' in mode: + raise ValueError('Use the `overwrite`-parameter instead.') + if 'w' not in mode: + raise ValueError('Atomic writes only make sense with `w`-mode.') + + # Atomic writes are more complicated. They work by opening a file + # as a proxy in the same folder and then using the fdopen + # functionality to wrap it in a Python file. Then we wrap it in an + # atomic file that moves the file over on close. + import tempfile + fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename), + prefix='.__atomic-write') + + if encoding is not None: + f = io.open(fd, mode, encoding=encoding, errors=errors) + else: + f = os.fdopen(fd, mode) + + return _AtomicFile(f, tmp_filename, filename), True + + +# Used in a destructor call, needs extra protection from interpreter cleanup. +if hasattr(os, 'replace'): + _replace = os.replace + _can_replace = True +else: + _replace = os.rename + _can_replace = not WIN + + +class _AtomicFile(object): + + def __init__(self, f, tmp_filename, real_filename): + self._f = f + self._tmp_filename = tmp_filename + self._real_filename = real_filename + self.closed = False + + @property + def name(self): + return self._real_filename + + def close(self, delete=False): + if self.closed: + return + self._f.close() + if not _can_replace: + try: + os.remove(self._real_filename) + except OSError: + pass + _replace(self._tmp_filename, self._real_filename) + self.closed = True + + def __getattr__(self, name): + return getattr(self._f, name) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close(delete=exc_type is not None) + + def __repr__(self): + return repr(self._f) + + +auto_wrap_for_ansi = None +colorama = None +get_winterm_size = None + + +def strip_ansi(value): + return _ansi_re.sub('', value) + + +def should_strip_ansi(stream=None, color=None): + if color is None: + if stream is None: + stream = sys.stdin + return not isatty(stream) + return not color + + +# If we're on Windows, we provide transparent integration through +# colorama. This will make ANSI colors through the echo function +# work automatically. +if WIN: + # Windows has a smaller terminal + DEFAULT_COLUMNS = 79 + + from ._winconsole import _get_windows_console_stream + + def _get_argv_encoding(): + import locale + return locale.getpreferredencoding() + + if PY2: + def raw_input(prompt=''): + sys.stderr.flush() + if prompt: + stdout = _default_text_stdout() + stdout.write(prompt) + stdin = _default_text_stdin() + return stdin.readline().rstrip('\r\n') + + try: + import colorama + except ImportError: + pass + else: + _ansi_stream_wrappers = WeakKeyDictionary() + + def auto_wrap_for_ansi(stream, color=None): + """This function wraps a stream so that calls through colorama + are issued to the win32 console API to recolor on demand. It + also ensures to reset the colors if a write call is interrupted + to not destroy the console afterwards. + """ + try: + cached = _ansi_stream_wrappers.get(stream) + except Exception: + cached = None + if cached is not None: + return cached + strip = should_strip_ansi(stream, color) + ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) + rv = ansi_wrapper.stream + _write = rv.write + + def _safe_write(s): + try: + return _write(s) + except: + ansi_wrapper.reset_all() + raise + + rv.write = _safe_write + try: + _ansi_stream_wrappers[stream] = rv + except Exception: + pass + return rv + + def get_winterm_size(): + win = colorama.win32.GetConsoleScreenBufferInfo( + colorama.win32.STDOUT).srWindow + return win.Right - win.Left, win.Bottom - win.Top +else: + def _get_argv_encoding(): + return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() + + _get_windows_console_stream = lambda *x: None + + +def term_len(x): + return len(strip_ansi(x)) + + +def isatty(stream): + try: + return stream.isatty() + except Exception: + return False + + +def _make_cached_stream_func(src_func, wrapper_func): + cache = WeakKeyDictionary() + def func(): + stream = src_func() + try: + rv = cache.get(stream) + except Exception: + rv = None + if rv is not None: + return rv + rv = wrapper_func() + try: + cache[stream] = rv + except Exception: + pass + return rv + return func + + +_default_text_stdin = _make_cached_stream_func( + lambda: sys.stdin, get_text_stdin) +_default_text_stdout = _make_cached_stream_func( + lambda: sys.stdout, get_text_stdout) +_default_text_stderr = _make_cached_stream_func( + lambda: sys.stderr, get_text_stderr) + + +binary_streams = { + 'stdin': get_binary_stdin, + 'stdout': get_binary_stdout, + 'stderr': get_binary_stderr, +} + +text_streams = { + 'stdin': get_text_stdin, + 'stdout': get_text_stdout, + 'stderr': get_text_stderr, +} diff --git a/app/lib/click/_termui_impl.py b/app/lib/click/_termui_impl.py new file mode 100644 index 0000000..7cfd3d5 --- /dev/null +++ b/app/lib/click/_termui_impl.py @@ -0,0 +1,547 @@ +""" + click._termui_impl + ~~~~~~~~~~~~~~~~~~ + + This module contains implementations for the termui module. To keep the + import time of Click down, some infrequently used functionality is placed + in this module and only imported as needed. + + :copyright: (c) 2014 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import os +import sys +import time +import math +from ._compat import _default_text_stdout, range_type, PY2, isatty, \ + open_stream, strip_ansi, term_len, get_best_encoding, WIN +from .utils import echo +from .exceptions import ClickException + + +if os.name == 'nt': + BEFORE_BAR = '\r' + AFTER_BAR = '\n' +else: + BEFORE_BAR = '\r\033[?25l' + AFTER_BAR = '\033[?25h\n' + + +def _length_hint(obj): + """Returns the length hint of an object.""" + try: + return len(obj) + except (AttributeError, TypeError): + try: + get_hint = type(obj).__length_hint__ + except AttributeError: + return None + try: + hint = get_hint(obj) + except TypeError: + return None + if hint is NotImplemented or \ + not isinstance(hint, (int, long)) or \ + hint < 0: + return None + return hint + + +class ProgressBar(object): + + def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', + bar_template='%(bar)s', info_sep=' ', show_eta=True, + show_percent=None, show_pos=False, item_show_func=None, + label=None, file=None, color=None, width=30): + self.fill_char = fill_char + self.empty_char = empty_char + self.bar_template = bar_template + self.info_sep = info_sep + self.show_eta = show_eta + self.show_percent = show_percent + self.show_pos = show_pos + self.item_show_func = item_show_func + self.label = label or '' + if file is None: + file = _default_text_stdout() + self.file = file + self.color = color + self.width = width + self.autowidth = width == 0 + + if length is None: + length = _length_hint(iterable) + if iterable is None: + if length is None: + raise TypeError('iterable or length is required') + iterable = range_type(length) + self.iter = iter(iterable) + self.length = length + self.length_known = length is not None + self.pos = 0 + self.avg = [] + self.start = self.last_eta = time.time() + self.eta_known = False + self.finished = False + self.max_width = None + self.entered = False + self.current_item = None + self.is_hidden = not isatty(self.file) + self._last_line = None + + def __enter__(self): + self.entered = True + self.render_progress() + return self + + def __exit__(self, exc_type, exc_value, tb): + self.render_finish() + + def __iter__(self): + if not self.entered: + raise RuntimeError('You need to use progress bars in a with block.') + self.render_progress() + return self + + def render_finish(self): + if self.is_hidden: + return + self.file.write(AFTER_BAR) + self.file.flush() + + @property + def pct(self): + if self.finished: + return 1.0 + return min(self.pos / (float(self.length) or 1), 1.0) + + @property + def time_per_iteration(self): + if not self.avg: + return 0.0 + return sum(self.avg) / float(len(self.avg)) + + @property + def eta(self): + if self.length_known and not self.finished: + return self.time_per_iteration * (self.length - self.pos) + return 0.0 + + def format_eta(self): + if self.eta_known: + t = self.eta + 1 + seconds = t % 60 + t /= 60 + minutes = t % 60 + t /= 60 + hours = t % 24 + t /= 24 + if t > 0: + days = t + return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) + else: + return '%02d:%02d:%02d' % (hours, minutes, seconds) + return '' + + def format_pos(self): + pos = str(self.pos) + if self.length_known: + pos += '/%s' % self.length + return pos + + def format_pct(self): + return ('% 4d%%' % int(self.pct * 100))[1:] + + def format_progress_line(self): + show_percent = self.show_percent + + info_bits = [] + if self.length_known: + bar_length = int(self.pct * self.width) + bar = self.fill_char * bar_length + bar += self.empty_char * (self.width - bar_length) + if show_percent is None: + show_percent = not self.show_pos + else: + if self.finished: + bar = self.fill_char * self.width + else: + bar = list(self.empty_char * (self.width or 1)) + if self.time_per_iteration != 0: + bar[int((math.cos(self.pos * self.time_per_iteration) + / 2.0 + 0.5) * self.width)] = self.fill_char + bar = ''.join(bar) + + if self.show_pos: + info_bits.append(self.format_pos()) + if show_percent: + info_bits.append(self.format_pct()) + if self.show_eta and self.eta_known and not self.finished: + info_bits.append(self.format_eta()) + if self.item_show_func is not None: + item_info = self.item_show_func(self.current_item) + if item_info is not None: + info_bits.append(item_info) + + return (self.bar_template % { + 'label': self.label, + 'bar': bar, + 'info': self.info_sep.join(info_bits) + }).rstrip() + + def render_progress(self): + from .termui import get_terminal_size + nl = False + + if self.is_hidden: + buf = [self.label] + nl = True + else: + buf = [] + # Update width in case the terminal has been resized + if self.autowidth: + old_width = self.width + self.width = 0 + clutter_length = term_len(self.format_progress_line()) + new_width = max(0, get_terminal_size()[0] - clutter_length) + if new_width < old_width: + buf.append(BEFORE_BAR) + buf.append(' ' * self.max_width) + self.max_width = new_width + self.width = new_width + + clear_width = self.width + if self.max_width is not None: + clear_width = self.max_width + + buf.append(BEFORE_BAR) + line = self.format_progress_line() + line_len = term_len(line) + if self.max_width is None or self.max_width < line_len: + self.max_width = line_len + buf.append(line) + + buf.append(' ' * (clear_width - line_len)) + line = ''.join(buf) + + # Render the line only if it changed. + if line != self._last_line: + self._last_line = line + echo(line, file=self.file, color=self.color, nl=nl) + self.file.flush() + + def make_step(self, n_steps): + self.pos += n_steps + if self.length_known and self.pos >= self.length: + self.finished = True + + if (time.time() - self.last_eta) < 1.0: + return + + self.last_eta = time.time() + self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)] + + self.eta_known = self.length_known + + def update(self, n_steps): + self.make_step(n_steps) + self.render_progress() + + def finish(self): + self.eta_known = 0 + self.current_item = None + self.finished = True + + def next(self): + if self.is_hidden: + return next(self.iter) + try: + rv = next(self.iter) + self.current_item = rv + except StopIteration: + self.finish() + self.render_progress() + raise StopIteration() + else: + self.update(1) + return rv + + if not PY2: + __next__ = next + del next + + +def pager(text, color=None): + """Decide what method to use for paging through text.""" + stdout = _default_text_stdout() + if not isatty(sys.stdin) or not isatty(stdout): + return _nullpager(stdout, text, color) + pager_cmd = (os.environ.get('PAGER', None) or '').strip() + if pager_cmd: + if WIN: + return _tempfilepager(text, pager_cmd, color) + return _pipepager(text, pager_cmd, color) + if os.environ.get('TERM') in ('dumb', 'emacs'): + return _nullpager(stdout, text, color) + if WIN or sys.platform.startswith('os2'): + return _tempfilepager(text, 'more <', color) + if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: + return _pipepager(text, 'less', color) + + import tempfile + fd, filename = tempfile.mkstemp() + os.close(fd) + try: + if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: + return _pipepager(text, 'more', color) + return _nullpager(stdout, text, color) + finally: + os.unlink(filename) + + +def _pipepager(text, cmd, color): + """Page through text by feeding it to another program. Invoking a + pager through this might support colors. + """ + import subprocess + env = dict(os.environ) + + # If we're piping to less we might support colors under the + # condition that + cmd_detail = cmd.rsplit('/', 1)[-1].split() + if color is None and cmd_detail[0] == 'less': + less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) + if not less_flags: + env['LESS'] = '-R' + color = True + elif 'r' in less_flags or 'R' in less_flags: + color = True + + if not color: + text = strip_ansi(text) + + c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, + env=env) + encoding = get_best_encoding(c.stdin) + try: + c.stdin.write(text.encode(encoding, 'replace')) + c.stdin.close() + except (IOError, KeyboardInterrupt): + pass + + # Less doesn't respect ^C, but catches it for its own UI purposes (aborting + # search or other commands inside less). + # + # That means when the user hits ^C, the parent process (click) terminates, + # but less is still alive, paging the output and messing up the terminal. + # + # If the user wants to make the pager exit on ^C, they should set + # `LESS='-K'`. It's not our decision to make. + while True: + try: + c.wait() + except KeyboardInterrupt: + pass + else: + break + + +def _tempfilepager(text, cmd, color): + """Page through text by invoking a program on a temporary file.""" + import tempfile + filename = tempfile.mktemp() + if not color: + text = strip_ansi(text) + encoding = get_best_encoding(sys.stdout) + with open_stream(filename, 'wb')[0] as f: + f.write(text.encode(encoding)) + try: + os.system(cmd + ' "' + filename + '"') + finally: + os.unlink(filename) + + +def _nullpager(stream, text, color): + """Simply print unformatted text. This is the ultimate fallback.""" + if not color: + text = strip_ansi(text) + stream.write(text) + + +class Editor(object): + + def __init__(self, editor=None, env=None, require_save=True, + extension='.txt'): + self.editor = editor + self.env = env + self.require_save = require_save + self.extension = extension + + def get_editor(self): + if self.editor is not None: + return self.editor + for key in 'VISUAL', 'EDITOR': + rv = os.environ.get(key) + if rv: + return rv + if WIN: + return 'notepad' + for editor in 'vim', 'nano': + if os.system('which %s >/dev/null 2>&1' % editor) == 0: + return editor + return 'vi' + + def edit_file(self, filename): + import subprocess + editor = self.get_editor() + if self.env: + environ = os.environ.copy() + environ.update(self.env) + else: + environ = None + try: + c = subprocess.Popen('%s "%s"' % (editor, filename), + env=environ, shell=True) + exit_code = c.wait() + if exit_code != 0: + raise ClickException('%s: Editing failed!' % editor) + except OSError as e: + raise ClickException('%s: Editing failed: %s' % (editor, e)) + + def edit(self, text): + import tempfile + + text = text or '' + if text and not text.endswith('\n'): + text += '\n' + + fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) + try: + if WIN: + encoding = 'utf-8-sig' + text = text.replace('\n', '\r\n') + else: + encoding = 'utf-8' + text = text.encode(encoding) + + f = os.fdopen(fd, 'wb') + f.write(text) + f.close() + timestamp = os.path.getmtime(name) + + self.edit_file(name) + + if self.require_save \ + and os.path.getmtime(name) == timestamp: + return None + + f = open(name, 'rb') + try: + rv = f.read() + finally: + f.close() + return rv.decode('utf-8-sig').replace('\r\n', '\n') + finally: + os.unlink(name) + + +def open_url(url, wait=False, locate=False): + import subprocess + + def _unquote_file(url): + try: + import urllib + except ImportError: + import urllib + if url.startswith('file://'): + url = urllib.unquote(url[7:]) + return url + + if sys.platform == 'darwin': + args = ['open'] + if wait: + args.append('-W') + if locate: + args.append('-R') + args.append(_unquote_file(url)) + null = open('/dev/null', 'w') + try: + return subprocess.Popen(args, stderr=null).wait() + finally: + null.close() + elif WIN: + if locate: + url = _unquote_file(url) + args = 'explorer /select,"%s"' % _unquote_file( + url.replace('"', '')) + else: + args = 'start %s "" "%s"' % ( + wait and '/WAIT' or '', url.replace('"', '')) + return os.system(args) + + try: + if locate: + url = os.path.dirname(_unquote_file(url)) or '.' + else: + url = _unquote_file(url) + c = subprocess.Popen(['xdg-open', url]) + if wait: + return c.wait() + return 0 + except OSError: + if url.startswith(('http://', 'https://')) and not locate and not wait: + import webbrowser + webbrowser.open(url) + return 0 + return 1 + + +def _translate_ch_to_exc(ch): + if ch == '\x03': + raise KeyboardInterrupt() + if ch == '\x04': + raise EOFError() + + +if WIN: + import msvcrt + + def getchar(echo): + rv = msvcrt.getch() + if echo: + msvcrt.putchar(rv) + _translate_ch_to_exc(rv) + if PY2: + enc = getattr(sys.stdin, 'encoding', None) + if enc is not None: + rv = rv.decode(enc, 'replace') + else: + rv = rv.decode('cp1252', 'replace') + return rv +else: + import tty + import termios + + def getchar(echo): + if not isatty(sys.stdin): + f = open('/dev/tty') + fd = f.fileno() + else: + fd = sys.stdin.fileno() + f = None + try: + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = os.read(fd, 32) + if echo and isatty(sys.stdout): + sys.stdout.write(ch) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + sys.stdout.flush() + if f is not None: + f.close() + except termios.error: + pass + _translate_ch_to_exc(ch) + return ch.decode(get_best_encoding(sys.stdin), 'replace') diff --git a/app/lib/click/_textwrap.py b/app/lib/click/_textwrap.py new file mode 100644 index 0000000..7e77603 --- /dev/null +++ b/app/lib/click/_textwrap.py @@ -0,0 +1,38 @@ +import textwrap +from contextlib import contextmanager + + +class TextWrapper(textwrap.TextWrapper): + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + space_left = max(width - cur_len, 1) + + if self.break_long_words: + last = reversed_chunks[-1] + cut = last[:space_left] + res = last[space_left:] + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + @contextmanager + def extra_indent(self, indent): + old_initial_indent = self.initial_indent + old_subsequent_indent = self.subsequent_indent + self.initial_indent += indent + self.subsequent_indent += indent + try: + yield + finally: + self.initial_indent = old_initial_indent + self.subsequent_indent = old_subsequent_indent + + def indent_only(self, text): + rv = [] + for idx, line in enumerate(text.splitlines()): + indent = self.initial_indent + if idx > 0: + indent = self.subsequent_indent + rv.append(indent + line) + return '\n'.join(rv) diff --git a/app/lib/click/_unicodefun.py b/app/lib/click/_unicodefun.py new file mode 100644 index 0000000..9e17a38 --- /dev/null +++ b/app/lib/click/_unicodefun.py @@ -0,0 +1,118 @@ +import os +import sys +import codecs + +from ._compat import PY2 + + +# If someone wants to vendor click, we want to ensure the +# correct package is discovered. Ideally we could use a +# relative import here but unfortunately Python does not +# support that. +click = sys.modules[__name__.rsplit('.', 1)[0]] + + +def _find_unicode_literals_frame(): + import __future__ + frm = sys._getframe(1) + idx = 1 + while frm is not None: + if frm.f_globals.get('__name__', '').startswith('click.'): + frm = frm.f_back + idx += 1 + elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: + return idx + else: + break + return 0 + + +def _check_for_unicode_literals(): + if not __debug__: + return + if not PY2 or click.disable_unicode_literals_warning: + return + bad_frame = _find_unicode_literals_frame() + if bad_frame <= 0: + return + from warnings import warn + warn(Warning('Click detected the use of the unicode_literals ' + '__future__ import. This is heavily discouraged ' + 'because it can introduce subtle bugs in your ' + 'code. You should instead use explicit u"" literals ' + 'for your unicode strings. For more information see ' + 'http://click.pocoo.org/python3/'), + stacklevel=bad_frame) + + +def _verify_python3_env(): + """Ensures that the environment is good for unicode on Python 3.""" + if PY2: + return + try: + import locale + fs_enc = codecs.lookup(locale.getpreferredencoding()).name + except Exception: + fs_enc = 'ascii' + if fs_enc != 'ascii': + return + + extra = '' + if os.name == 'posix': + import subprocess + rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate()[0] + good_locales = set() + has_c_utf8 = False + + # Make sure we're operating on text here. + if isinstance(rv, bytes): + rv = rv.decode('ascii', 'replace') + + for line in rv.splitlines(): + locale = line.strip() + if locale.lower().endswith(('.utf-8', '.utf8')): + good_locales.add(locale) + if locale.lower() in ('c.utf8', 'c.utf-8'): + has_c_utf8 = True + + extra += '\n\n' + if not good_locales: + extra += ( + 'Additional information: on this system no suitable UTF-8\n' + 'locales were discovered. This most likely requires resolving\n' + 'by reconfiguring the locale system.' + ) + elif has_c_utf8: + extra += ( + 'This system supports the C.UTF-8 locale which is recommended.\n' + 'You might be able to resolve your issue by exporting the\n' + 'following environment variables:\n\n' + ' export LC_ALL=C.UTF-8\n' + ' export LANG=C.UTF-8' + ) + else: + extra += ( + 'This system lists a couple of UTF-8 supporting locales that\n' + 'you can pick from. The following suitable locales where\n' + 'discovered: %s' + ) % ', '.join(sorted(good_locales)) + + bad_locale = None + for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): + if locale and locale.lower().endswith(('.utf-8', '.utf8')): + bad_locale = locale + if locale is not None: + break + if bad_locale is not None: + extra += ( + '\n\nClick discovered that you exported a UTF-8 locale\n' + 'but the locale system could not pick up from it because\n' + 'it does not exist. The exported locale is "%s" but it\n' + 'is not supported' + ) % bad_locale + + raise RuntimeError('Click will abort further execution because Python 3 ' + 'was configured to use ASCII as encoding for the ' + 'environment. Consult http://click.pocoo.org/python3/' + 'for mitigation steps.' + extra) diff --git a/app/lib/click/_winconsole.py b/app/lib/click/_winconsole.py new file mode 100644 index 0000000..9aed942 --- /dev/null +++ b/app/lib/click/_winconsole.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# This module is based on the excellent work by Adam Bartoš who +# provided a lot of what went into the implementation here in +# the discussion to issue1602 in the Python bug tracker. +# +# There are some general differences in regards to how this works +# compared to the original patches as we do not need to patch +# the entire interpreter but just work in our little world of +# echo and prmopt. + +import io +import os +import sys +import zlib +import time +import ctypes +import msvcrt +from click._compat import _NonClosingTextIOWrapper, text_type, PY2 +from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ + c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE +try: + from ctypes import pythonapi + PyObject_GetBuffer = pythonapi.PyObject_GetBuffer + PyBuffer_Release = pythonapi.PyBuffer_Release +except ImportError: + pythonapi = None +from ctypes.wintypes import LPWSTR, LPCWSTR + + +c_ssize_p = POINTER(c_ssize_t) + +kernel32 = windll.kernel32 +GetStdHandle = kernel32.GetStdHandle +ReadConsoleW = kernel32.ReadConsoleW +WriteConsoleW = kernel32.WriteConsoleW +GetLastError = kernel32.GetLastError +GetCommandLineW = WINFUNCTYPE(LPWSTR)( + ('GetCommandLineW', windll.kernel32)) +CommandLineToArgvW = WINFUNCTYPE( + POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( + ('CommandLineToArgvW', windll.shell32)) + + +STDIN_HANDLE = GetStdHandle(-10) +STDOUT_HANDLE = GetStdHandle(-11) +STDERR_HANDLE = GetStdHandle(-12) + + +PyBUF_SIMPLE = 0 +PyBUF_WRITABLE = 1 + +ERROR_SUCCESS = 0 +ERROR_NOT_ENOUGH_MEMORY = 8 +ERROR_OPERATION_ABORTED = 995 + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +EOF = b'\x1a' +MAX_BYTES_WRITTEN = 32767 + + +class Py_buffer(ctypes.Structure): + _fields_ = [ + ('buf', c_void_p), + ('obj', py_object), + ('len', c_ssize_t), + ('itemsize', c_ssize_t), + ('readonly', c_int), + ('ndim', c_int), + ('format', c_char_p), + ('shape', c_ssize_p), + ('strides', c_ssize_p), + ('suboffsets', c_ssize_p), + ('internal', c_void_p) + ] + + if PY2: + _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) + + +# On PyPy we cannot get buffers so our ability to operate here is +# serverly limited. +if pythonapi is None: + get_buffer = None +else: + def get_buffer(obj, writable=False): + buf = Py_buffer() + flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE + PyObject_GetBuffer(py_object(obj), byref(buf), flags) + try: + buffer_type = c_char * buf.len + return buffer_type.from_address(buf.buf) + finally: + PyBuffer_Release(byref(buf)) + + +class _WindowsConsoleRawIOBase(io.RawIOBase): + + def __init__(self, handle): + self.handle = handle + + def isatty(self): + io.RawIOBase.isatty(self) + return True + + +class _WindowsConsoleReader(_WindowsConsoleRawIOBase): + + def readable(self): + return True + + def readinto(self, b): + bytes_to_be_read = len(b) + if not bytes_to_be_read: + return 0 + elif bytes_to_be_read % 2: + raise ValueError('cannot read odd number of bytes from ' + 'UTF-16-LE encoded console') + + buffer = get_buffer(b, writable=True) + code_units_to_be_read = bytes_to_be_read // 2 + code_units_read = c_ulong() + + rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, + byref(code_units_read), None) + if GetLastError() == ERROR_OPERATION_ABORTED: + # wait for KeyboardInterrupt + time.sleep(0.1) + if not rv: + raise OSError('Windows error: %s' % GetLastError()) + + if buffer[0] == EOF: + return 0 + return 2 * code_units_read.value + + +class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): + + def writable(self): + return True + + @staticmethod + def _get_error_message(errno): + if errno == ERROR_SUCCESS: + return 'ERROR_SUCCESS' + elif errno == ERROR_NOT_ENOUGH_MEMORY: + return 'ERROR_NOT_ENOUGH_MEMORY' + return 'Windows error %s' % errno + + def write(self, b): + bytes_to_be_written = len(b) + buf = get_buffer(b) + code_units_to_be_written = min(bytes_to_be_written, + MAX_BYTES_WRITTEN) // 2 + code_units_written = c_ulong() + + WriteConsoleW(self.handle, buf, code_units_to_be_written, + byref(code_units_written), None) + bytes_written = 2 * code_units_written.value + + if bytes_written == 0 and bytes_to_be_written > 0: + raise OSError(self._get_error_message(GetLastError())) + return bytes_written + + +class ConsoleStream(object): + + def __init__(self, text_stream, byte_stream): + self._text_stream = text_stream + self.buffer = byte_stream + + @property + def name(self): + return self.buffer.name + + def write(self, x): + if isinstance(x, text_type): + return self._text_stream.write(x) + try: + self.flush() + except Exception: + pass + return self.buffer.write(x) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def __getattr__(self, name): + return getattr(self._text_stream, name) + + def isatty(self): + return self.buffer.isatty() + + def __repr__(self): + return '' % ( + self.name, + self.encoding, + ) + + +def _get_text_stdin(buffer_stream): + text_stream = _NonClosingTextIOWrapper( + io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), + 'utf-16-le', 'strict', line_buffering=True) + return ConsoleStream(text_stream, buffer_stream) + + +def _get_text_stdout(buffer_stream): + text_stream = _NonClosingTextIOWrapper( + _WindowsConsoleWriter(STDOUT_HANDLE), + 'utf-16-le', 'strict', line_buffering=True) + return ConsoleStream(text_stream, buffer_stream) + + +def _get_text_stderr(buffer_stream): + text_stream = _NonClosingTextIOWrapper( + _WindowsConsoleWriter(STDERR_HANDLE), + 'utf-16-le', 'strict', line_buffering=True) + return ConsoleStream(text_stream, buffer_stream) + + +if PY2: + def _hash_py_argv(): + return zlib.crc32('\x00'.join(sys.argv[1:])) + + _initial_argv_hash = _hash_py_argv() + + def _get_windows_argv(): + argc = c_int(0) + argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) + argv = [argv_unicode[i] for i in range(0, argc.value)] + + if not hasattr(sys, 'frozen'): + argv = argv[1:] + while len(argv) > 0: + arg = argv[0] + if not arg.startswith('-') or arg == '-': + break + argv = argv[1:] + if arg.startswith(('-c', '-m')): + break + + return argv[1:] + + +_stream_factories = { + 0: _get_text_stdin, + 1: _get_text_stdout, + 2: _get_text_stderr, +} + + +def _get_windows_console_stream(f, encoding, errors): + if get_buffer is not None and \ + encoding in ('utf-16-le', None) \ + and errors in ('strict', None) and \ + hasattr(f, 'isatty') and f.isatty(): + func = _stream_factories.get(f.fileno()) + if func is not None: + if not PY2: + f = getattr(f, 'buffer') + if f is None: + return None + else: + # If we are on Python 2 we need to set the stream that we + # deal with to binary mode as otherwise the exercise if a + # bit moot. The same problems apply as for + # get_binary_stdin and friends from _compat. + msvcrt.setmode(f.fileno(), os.O_BINARY) + return func(f) diff --git a/app/lib/click/core.py b/app/lib/click/core.py new file mode 100644 index 0000000..7456451 --- /dev/null +++ b/app/lib/click/core.py @@ -0,0 +1,1744 @@ +import errno +import os +import sys +from contextlib import contextmanager +from itertools import repeat +from functools import update_wrapper + +from .types import convert_type, IntRange, BOOL +from .utils import make_str, make_default_short_help, echo, get_os_args +from .exceptions import ClickException, UsageError, BadParameter, Abort, \ + MissingParameter +from .termui import prompt, confirm +from .formatting import HelpFormatter, join_options +from .parser import OptionParser, split_opt +from .globals import push_context, pop_context + +from ._compat import PY2, isidentifier, iteritems +from ._unicodefun import _check_for_unicode_literals, _verify_python3_env + + +_missing = object() + + +SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...' +SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...' + + +def _bashcomplete(cmd, prog_name, complete_var=None): + """Internal handler for the bash completion support.""" + if complete_var is None: + complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() + complete_instr = os.environ.get(complete_var) + if not complete_instr: + return + + from ._bashcomplete import bashcomplete + if bashcomplete(cmd, prog_name, complete_var, complete_instr): + sys.exit(1) + + +def _check_multicommand(base_command, cmd_name, cmd, register=False): + if not base_command.chain or not isinstance(cmd, MultiCommand): + return + if register: + hint = 'It is not possible to add multi commands as children to ' \ + 'another multi command that is in chain mode' + else: + hint = 'Found a multi command as subcommand to a multi command ' \ + 'that is in chain mode. This is not supported' + raise RuntimeError('%s. Command "%s" is set to chain and "%s" was ' + 'added as subcommand but it in itself is a ' + 'multi command. ("%s" is a %s within a chained ' + '%s named "%s"). This restriction was supposed to ' + 'be lifted in 6.0 but the fix was flawed. This ' + 'will be fixed in Click 7.0' % ( + hint, base_command.name, cmd_name, + cmd_name, cmd.__class__.__name__, + base_command.__class__.__name__, + base_command.name)) + + +def batch(iterable, batch_size): + return list(zip(*repeat(iter(iterable), batch_size))) + + +def invoke_param_callback(callback, ctx, param, value): + code = getattr(callback, '__code__', None) + args = getattr(code, 'co_argcount', 3) + + if args < 3: + # This will become a warning in Click 3.0: + from warnings import warn + warn(Warning('Invoked legacy parameter callback "%s". The new ' + 'signature for such callbacks starting with ' + 'click 2.0 is (ctx, param, value).' + % callback), stacklevel=3) + return callback(ctx, value) + return callback(ctx, param, value) + + +@contextmanager +def augment_usage_errors(ctx, param=None): + """Context manager that attaches extra information to exceptions that + fly. + """ + try: + yield + except BadParameter as e: + if e.ctx is None: + e.ctx = ctx + if param is not None and e.param is None: + e.param = param + raise + except UsageError as e: + if e.ctx is None: + e.ctx = ctx + raise + + +def iter_params_for_processing(invocation_order, declaration_order): + """Given a sequence of parameters in the order as should be considered + for processing and an iterable of parameters that exist, this returns + a list in the correct order as they should be processed. + """ + def sort_key(item): + try: + idx = invocation_order.index(item) + except ValueError: + idx = float('inf') + return (not item.is_eager, idx) + + return sorted(declaration_order, key=sort_key) + + +class Context(object): + """The context is a special internal object that holds state relevant + for the script execution at every single level. It's normally invisible + to commands unless they opt-in to getting access to it. + + The context is useful as it can pass internal objects around and can + control special execution features such as reading data from + environment variables. + + A context can be used as context manager in which case it will call + :meth:`close` on teardown. + + .. versionadded:: 2.0 + Added the `resilient_parsing`, `help_option_names`, + `token_normalize_func` parameters. + + .. versionadded:: 3.0 + Added the `allow_extra_args` and `allow_interspersed_args` + parameters. + + .. versionadded:: 4.0 + Added the `color`, `ignore_unknown_options`, and + `max_content_width` parameters. + + :param command: the command class for this context. + :param parent: the parent context. + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it is usually + the name of the script, for commands below it it's + the name of the script. + :param obj: an arbitrary object of user data. + :param auto_envvar_prefix: the prefix to use for automatic environment + variables. If this is `None` then reading + from environment variables is disabled. This + does not affect manually set environment + variables which are always read. + :param default_map: a dictionary (like object) with default values + for parameters. + :param terminal_width: the width of the terminal. The default is + inherit from parent context. If no context + defines the terminal width then auto + detection will be applied. + :param max_content_width: the maximum width for content rendered by + Click (this currently only affects help + pages). This defaults to 80 characters if + not overridden. In other words: even if the + terminal is larger than that, Click will not + format things wider than 80 characters by + default. In addition to that, formatters might + add some safety mapping on the right. + :param resilient_parsing: if this flag is enabled then Click will + parse without any interactivity or callback + invocation. This is useful for implementing + things such as completion support. + :param allow_extra_args: if this is set to `True` then extra arguments + at the end will not raise an error and will be + kept on the context. The default is to inherit + from the command. + :param allow_interspersed_args: if this is set to `False` then options + and arguments cannot be mixed. The + default is to inherit from the command. + :param ignore_unknown_options: instructs click to ignore options it does + not know and keeps them for later + processing. + :param help_option_names: optionally a list of strings that define how + the default help parameter is named. The + default is ``['--help']``. + :param token_normalize_func: an optional function that is used to + normalize tokens (options, choices, + etc.). This for instance can be used to + implement case insensitive behavior. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are used in texts that Click prints which is by + default not the case. This for instance would affect + help output. + """ + + def __init__(self, command, parent=None, info_name=None, obj=None, + auto_envvar_prefix=None, default_map=None, + terminal_width=None, max_content_width=None, + resilient_parsing=False, allow_extra_args=None, + allow_interspersed_args=None, + ignore_unknown_options=None, help_option_names=None, + token_normalize_func=None, color=None): + #: the parent context or `None` if none exists. + self.parent = parent + #: the :class:`Command` for this context. + self.command = command + #: the descriptive information name + self.info_name = info_name + #: the parsed parameters except if the value is hidden in which + #: case it's not remembered. + self.params = {} + #: the leftover arguments. + self.args = [] + #: protected arguments. These are arguments that are prepended + #: to `args` when certain parsing scenarios are encountered but + #: must be never propagated to another arguments. This is used + #: to implement nested parsing. + self.protected_args = [] + if obj is None and parent is not None: + obj = parent.obj + #: the user object stored. + self.obj = obj + self._meta = getattr(parent, 'meta', {}) + + #: A dictionary (-like object) with defaults for parameters. + if default_map is None \ + and parent is not None \ + and parent.default_map is not None: + default_map = parent.default_map.get(info_name) + self.default_map = default_map + + #: This flag indicates if a subcommand is going to be executed. A + #: group callback can use this information to figure out if it's + #: being executed directly or because the execution flow passes + #: onwards to a subcommand. By default it's None, but it can be + #: the name of the subcommand to execute. + #: + #: If chaining is enabled this will be set to ``'*'`` in case + #: any commands are executed. It is however not possible to + #: figure out which ones. If you require this knowledge you + #: should use a :func:`resultcallback`. + self.invoked_subcommand = None + + if terminal_width is None and parent is not None: + terminal_width = parent.terminal_width + #: The width of the terminal (None is autodetection). + self.terminal_width = terminal_width + + if max_content_width is None and parent is not None: + max_content_width = parent.max_content_width + #: The maximum width of formatted content (None implies a sensible + #: default which is 80 for most things). + self.max_content_width = max_content_width + + if allow_extra_args is None: + allow_extra_args = command.allow_extra_args + #: Indicates if the context allows extra args or if it should + #: fail on parsing. + #: + #: .. versionadded:: 3.0 + self.allow_extra_args = allow_extra_args + + if allow_interspersed_args is None: + allow_interspersed_args = command.allow_interspersed_args + #: Indicates if the context allows mixing of arguments and + #: options or not. + #: + #: .. versionadded:: 3.0 + self.allow_interspersed_args = allow_interspersed_args + + if ignore_unknown_options is None: + ignore_unknown_options = command.ignore_unknown_options + #: Instructs click to ignore options that a command does not + #: understand and will store it on the context for later + #: processing. This is primarily useful for situations where you + #: want to call into external programs. Generally this pattern is + #: strongly discouraged because it's not possibly to losslessly + #: forward all arguments. + #: + #: .. versionadded:: 4.0 + self.ignore_unknown_options = ignore_unknown_options + + if help_option_names is None: + if parent is not None: + help_option_names = parent.help_option_names + else: + help_option_names = ['--help'] + + #: The names for the help options. + self.help_option_names = help_option_names + + if token_normalize_func is None and parent is not None: + token_normalize_func = parent.token_normalize_func + + #: An optional normalization function for tokens. This is + #: options, choices, commands etc. + self.token_normalize_func = token_normalize_func + + #: Indicates if resilient parsing is enabled. In that case Click + #: will do its best to not cause any failures. + self.resilient_parsing = resilient_parsing + + # If there is no envvar prefix yet, but the parent has one and + # the command on this level has a name, we can expand the envvar + # prefix automatically. + if auto_envvar_prefix is None: + if parent is not None \ + and parent.auto_envvar_prefix is not None and \ + self.info_name is not None: + auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix, + self.info_name.upper()) + else: + self.auto_envvar_prefix = auto_envvar_prefix.upper() + self.auto_envvar_prefix = auto_envvar_prefix + + if color is None and parent is not None: + color = parent.color + + #: Controls if styling output is wanted or not. + self.color = color + + self._close_callbacks = [] + self._depth = 0 + + def __enter__(self): + self._depth += 1 + push_context(self) + return self + + def __exit__(self, exc_type, exc_value, tb): + self._depth -= 1 + if self._depth == 0: + self.close() + pop_context() + + @contextmanager + def scope(self, cleanup=True): + """This helper method can be used with the context object to promote + it to the current thread local (see :func:`get_current_context`). + The default behavior of this is to invoke the cleanup functions which + can be disabled by setting `cleanup` to `False`. The cleanup + functions are typically used for things such as closing file handles. + + If the cleanup is intended the context object can also be directly + used as a context manager. + + Example usage:: + + with ctx.scope(): + assert get_current_context() is ctx + + This is equivalent:: + + with ctx: + assert get_current_context() is ctx + + .. versionadded:: 5.0 + + :param cleanup: controls if the cleanup functions should be run or + not. The default is to run these functions. In + some situations the context only wants to be + temporarily pushed in which case this can be disabled. + Nested pushes automatically defer the cleanup. + """ + if not cleanup: + self._depth += 1 + try: + with self as rv: + yield rv + finally: + if not cleanup: + self._depth -= 1 + + @property + def meta(self): + """This is a dictionary which is shared with all the contexts + that are nested. It exists so that click utiltiies can store some + state here if they need to. It is however the responsibility of + that code to manage this dictionary well. + + The keys are supposed to be unique dotted strings. For instance + module paths are a good choice for it. What is stored in there is + irrelevant for the operation of click. However what is important is + that code that places data here adheres to the general semantics of + the system. + + Example usage:: + + LANG_KEY = __name__ + '.lang' + + def set_language(value): + ctx = get_current_context() + ctx.meta[LANG_KEY] = value + + def get_language(): + return get_current_context().meta.get(LANG_KEY, 'en_US') + + .. versionadded:: 5.0 + """ + return self._meta + + def make_formatter(self): + """Creates the formatter for the help and usage output.""" + return HelpFormatter(width=self.terminal_width, + max_width=self.max_content_width) + + def call_on_close(self, f): + """This decorator remembers a function as callback that should be + executed when the context tears down. This is most useful to bind + resource handling to the script execution. For instance, file objects + opened by the :class:`File` type will register their close callbacks + here. + + :param f: the function to execute on teardown. + """ + self._close_callbacks.append(f) + return f + + def close(self): + """Invokes all close callbacks.""" + for cb in self._close_callbacks: + cb() + self._close_callbacks = [] + + @property + def command_path(self): + """The computed command path. This is used for the ``usage`` + information on the help page. It's automatically created by + combining the info names of the chain of contexts to the root. + """ + rv = '' + if self.info_name is not None: + rv = self.info_name + if self.parent is not None: + rv = self.parent.command_path + ' ' + rv + return rv.lstrip() + + def find_root(self): + """Finds the outermost context.""" + node = self + while node.parent is not None: + node = node.parent + return node + + def find_object(self, object_type): + """Finds the closest object of a given type.""" + node = self + while node is not None: + if isinstance(node.obj, object_type): + return node.obj + node = node.parent + + def ensure_object(self, object_type): + """Like :meth:`find_object` but sets the innermost object to a + new instance of `object_type` if it does not exist. + """ + rv = self.find_object(object_type) + if rv is None: + self.obj = rv = object_type() + return rv + + def lookup_default(self, name): + """Looks up the default for a parameter name. This by default + looks into the :attr:`default_map` if available. + """ + if self.default_map is not None: + rv = self.default_map.get(name) + if callable(rv): + rv = rv() + return rv + + def fail(self, message): + """Aborts the execution of the program with a specific error + message. + + :param message: the error message to fail with. + """ + raise UsageError(message, self) + + def abort(self): + """Aborts the script.""" + raise Abort() + + def exit(self, code=0): + """Exits the application with a given exit code.""" + sys.exit(code) + + def get_usage(self): + """Helper method to get formatted usage string for the current + context and command. + """ + return self.command.get_usage(self) + + def get_help(self): + """Helper method to get formatted help page for the current + context and command. + """ + return self.command.get_help(self) + + def invoke(*args, **kwargs): + """Invokes a command callback in exactly the way it expects. There + are two ways to invoke this method: + + 1. the first argument can be a callback and all other arguments and + keyword arguments are forwarded directly to the function. + 2. the first argument is a click command object. In that case all + arguments are forwarded as well but proper click parameters + (options and click arguments) must be keyword arguments and Click + will fill in defaults. + + Note that before Click 3.2 keyword arguments were not properly filled + in against the intention of this code and no context was created. For + more information about this change and why it was done in a bugfix + release see :ref:`upgrade-to-3.2`. + """ + self, callback = args[:2] + ctx = self + + # It's also possible to invoke another command which might or + # might not have a callback. In that case we also fill + # in defaults and make a new context for this command. + if isinstance(callback, Command): + other_cmd = callback + callback = other_cmd.callback + ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) + if callback is None: + raise TypeError('The given command does not have a ' + 'callback that can be invoked.') + + for param in other_cmd.params: + if param.name not in kwargs and param.expose_value: + kwargs[param.name] = param.get_default(ctx) + + args = args[2:] + with augment_usage_errors(self): + with ctx: + return callback(*args, **kwargs) + + def forward(*args, **kwargs): + """Similar to :meth:`invoke` but fills in default keyword + arguments from the current context if the other command expects + it. This cannot invoke callbacks directly, only other commands. + """ + self, cmd = args[:2] + + # It's also possible to invoke another command which might or + # might not have a callback. + if not isinstance(cmd, Command): + raise TypeError('Callback is not a command.') + + for param in self.params: + if param not in kwargs: + kwargs[param] = self.params[param] + + return self.invoke(cmd, **kwargs) + + +class BaseCommand(object): + """The base command implements the minimal API contract of commands. + Most code will never use this as it does not implement a lot of useful + functionality but it can act as the direct subclass of alternative + parsing methods that do not depend on the Click parser. + + For instance, this can be used to bridge Click and other systems like + argparse or docopt. + + Because base commands do not implement a lot of the API that other + parts of Click take for granted, they are not supported for all + operations. For instance, they cannot be used with the decorators + usually and they have no built-in callback system. + + .. versionchanged:: 2.0 + Added the `context_settings` parameter. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + """ + #: the default for the :attr:`Context.allow_extra_args` flag. + allow_extra_args = False + #: the default for the :attr:`Context.allow_interspersed_args` flag. + allow_interspersed_args = True + #: the default for the :attr:`Context.ignore_unknown_options` flag. + ignore_unknown_options = False + + def __init__(self, name, context_settings=None): + #: the name the command thinks it has. Upon registering a command + #: on a :class:`Group` the group will default the command name + #: with this information. You should instead use the + #: :class:`Context`\'s :attr:`~Context.info_name` attribute. + self.name = name + if context_settings is None: + context_settings = {} + #: an optional dictionary with defaults passed to the context. + self.context_settings = context_settings + + def get_usage(self, ctx): + raise NotImplementedError('Base commands cannot get usage') + + def get_help(self, ctx): + raise NotImplementedError('Base commands cannot get help') + + def make_context(self, info_name, args, parent=None, **extra): + """This function when given an info name and arguments will kick + off the parsing and create a new :class:`Context`. It does not + invoke the actual command callback though. + + :param info_name: the info name for this invokation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it's usually + the name of the script, for commands below it it's + the name of the script. + :param args: the arguments to parse as list of strings. + :param parent: the parent context if available. + :param extra: extra keyword arguments forwarded to the context + constructor. + """ + for key, value in iteritems(self.context_settings): + if key not in extra: + extra[key] = value + ctx = Context(self, info_name=info_name, parent=parent, **extra) + with ctx.scope(cleanup=False): + self.parse_args(ctx, args) + return ctx + + def parse_args(self, ctx, args): + """Given a context and a list of arguments this creates the parser + and parses the arguments, then modifies the context as necessary. + This is automatically invoked by :meth:`make_context`. + """ + raise NotImplementedError('Base commands do not know how to parse ' + 'arguments.') + + def invoke(self, ctx): + """Given a context, this invokes the command. The default + implementation is raising a not implemented error. + """ + raise NotImplementedError('Base commands are not invokable by default') + + def main(self, args=None, prog_name=None, complete_var=None, + standalone_mode=True, **extra): + """This is the way to invoke a script with all the bells and + whistles as a command line application. This will always terminate + the application after a call. If this is not wanted, ``SystemExit`` + needs to be caught. + + This method is also available by directly calling the instance of + a :class:`Command`. + + .. versionadded:: 3.0 + Added the `standalone_mode` flag to control the standalone mode. + + :param args: the arguments that should be used for parsing. If not + provided, ``sys.argv[1:]`` is used. + :param prog_name: the program name that should be used. By default + the program name is constructed by taking the file + name from ``sys.argv[0]``. + :param complete_var: the environment variable that controls the + bash completion support. The default is + ``"__COMPLETE"`` with prog name in + uppercase. + :param standalone_mode: the default behavior is to invoke the script + in standalone mode. Click will then + handle exceptions and convert them into + error messages and the function will never + return but shut down the interpreter. If + this is set to `False` they will be + propagated to the caller and the return + value of this function is the return value + of :meth:`invoke`. + :param extra: extra keyword arguments are forwarded to the context + constructor. See :class:`Context` for more information. + """ + # If we are in Python 3, we will verify that the environment is + # sane at this point of reject further execution to avoid a + # broken script. + if not PY2: + _verify_python3_env() + else: + _check_for_unicode_literals() + + if args is None: + args = get_os_args() + else: + args = list(args) + + if prog_name is None: + prog_name = make_str(os.path.basename( + sys.argv and sys.argv[0] or __file__)) + + # Hook for the Bash completion. This only activates if the Bash + # completion is actually enabled, otherwise this is quite a fast + # noop. + _bashcomplete(self, prog_name, complete_var) + + try: + try: + with self.make_context(prog_name, args, **extra) as ctx: + rv = self.invoke(ctx) + if not standalone_mode: + return rv + ctx.exit() + except (EOFError, KeyboardInterrupt): + echo(file=sys.stderr) + raise Abort() + except ClickException as e: + if not standalone_mode: + raise + e.show() + sys.exit(e.exit_code) + except IOError as e: + if e.errno == errno.EPIPE: + sys.exit(1) + else: + raise + except Abort: + if not standalone_mode: + raise + echo('Aborted!', file=sys.stderr) + sys.exit(1) + + def __call__(self, *args, **kwargs): + """Alias for :meth:`main`.""" + return self.main(*args, **kwargs) + + +class Command(BaseCommand): + """Commands are the basic building block of command line interfaces in + Click. A basic command handles command line parsing and might dispatch + more parsing to commands nested below it. + + .. versionchanged:: 2.0 + Added the `context_settings` parameter. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + :param callback: the callback to invoke. This is optional. + :param params: the parameters to register with this command. This can + be either :class:`Option` or :class:`Argument` objects. + :param help: the help string to use for this command. + :param epilog: like the help string but it's printed at the end of the + help page after everything else. + :param short_help: the short help to use for this command. This is + shown on the command listing of the parent command. + :param add_help_option: by default each command registers a ``--help`` + option. This can be disabled by this parameter. + """ + + def __init__(self, name, context_settings=None, callback=None, + params=None, help=None, epilog=None, short_help=None, + options_metavar='[OPTIONS]', add_help_option=True): + BaseCommand.__init__(self, name, context_settings) + #: the callback to execute when the command fires. This might be + #: `None` in which case nothing happens. + self.callback = callback + #: the list of parameters for this command in the order they + #: should show up in the help page and execute. Eager parameters + #: will automatically be handled before non eager ones. + self.params = params or [] + self.help = help + self.epilog = epilog + self.options_metavar = options_metavar + if short_help is None and help: + short_help = make_default_short_help(help) + self.short_help = short_help + self.add_help_option = add_help_option + + def get_usage(self, ctx): + formatter = ctx.make_formatter() + self.format_usage(ctx, formatter) + return formatter.getvalue().rstrip('\n') + + def get_params(self, ctx): + rv = self.params + help_option = self.get_help_option(ctx) + if help_option is not None: + rv = rv + [help_option] + return rv + + def format_usage(self, ctx, formatter): + """Writes the usage line into the formatter.""" + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage(ctx.command_path, ' '.join(pieces)) + + def collect_usage_pieces(self, ctx): + """Returns all the pieces that go into the usage line and returns + it as a list of strings. + """ + rv = [self.options_metavar] + for param in self.get_params(ctx): + rv.extend(param.get_usage_pieces(ctx)) + return rv + + def get_help_option_names(self, ctx): + """Returns the names for the help option.""" + all_names = set(ctx.help_option_names) + for param in self.params: + all_names.difference_update(param.opts) + all_names.difference_update(param.secondary_opts) + return all_names + + def get_help_option(self, ctx): + """Returns the help option object.""" + help_options = self.get_help_option_names(ctx) + if not help_options or not self.add_help_option: + return + + def show_help(ctx, param, value): + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + return Option(help_options, is_flag=True, + is_eager=True, expose_value=False, + callback=show_help, + help='Show this message and exit.') + + def make_parser(self, ctx): + """Creates the underlying option parser for this command.""" + parser = OptionParser(ctx) + parser.allow_interspersed_args = ctx.allow_interspersed_args + parser.ignore_unknown_options = ctx.ignore_unknown_options + for param in self.get_params(ctx): + param.add_to_parser(parser, ctx) + return parser + + def get_help(self, ctx): + """Formats the help into a string and returns it. This creates a + formatter and will call into the following formatting methods: + """ + formatter = ctx.make_formatter() + self.format_help(ctx, formatter) + return formatter.getvalue().rstrip('\n') + + def format_help(self, ctx, formatter): + """Writes the help into the formatter if it exists. + + This calls into the following methods: + + - :meth:`format_usage` + - :meth:`format_help_text` + - :meth:`format_options` + - :meth:`format_epilog` + """ + self.format_usage(ctx, formatter) + self.format_help_text(ctx, formatter) + self.format_options(ctx, formatter) + self.format_epilog(ctx, formatter) + + def format_help_text(self, ctx, formatter): + """Writes the help text to the formatter if it exists.""" + if self.help: + formatter.write_paragraph() + with formatter.indentation(): + formatter.write_text(self.help) + + def format_options(self, ctx, formatter): + """Writes all the options into the formatter if they exist.""" + opts = [] + for param in self.get_params(ctx): + rv = param.get_help_record(ctx) + if rv is not None: + opts.append(rv) + + if opts: + with formatter.section('Options'): + formatter.write_dl(opts) + + def format_epilog(self, ctx, formatter): + """Writes the epilog into the formatter if it exists.""" + if self.epilog: + formatter.write_paragraph() + with formatter.indentation(): + formatter.write_text(self.epilog) + + def parse_args(self, ctx, args): + parser = self.make_parser(ctx) + opts, args, param_order = parser.parse_args(args=args) + + for param in iter_params_for_processing( + param_order, self.get_params(ctx)): + value, args = param.handle_parse_result(ctx, opts, args) + + if args and not ctx.allow_extra_args and not ctx.resilient_parsing: + ctx.fail('Got unexpected extra argument%s (%s)' + % (len(args) != 1 and 's' or '', + ' '.join(map(make_str, args)))) + + ctx.args = args + return args + + def invoke(self, ctx): + """Given a context, this invokes the attached callback (if it exists) + in the right way. + """ + if self.callback is not None: + return ctx.invoke(self.callback, **ctx.params) + + +class MultiCommand(Command): + """A multi command is the basic implementation of a command that + dispatches to subcommands. The most common version is the + :class:`Group`. + + :param invoke_without_command: this controls how the multi command itself + is invoked. By default it's only invoked + if a subcommand is provided. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is enabled by default if + `invoke_without_command` is disabled or disabled + if it's enabled. If enabled this will add + ``--help`` as argument if no arguments are + passed. + :param subcommand_metavar: the string that is used in the documentation + to indicate the subcommand place. + :param chain: if this is set to `True` chaining of multiple subcommands + is enabled. This restricts the form of commands in that + they cannot have optional arguments but it allows + multiple commands to be chained together. + :param result_callback: the result callback to attach to this multi + command. + """ + allow_extra_args = True + allow_interspersed_args = False + + def __init__(self, name=None, invoke_without_command=False, + no_args_is_help=None, subcommand_metavar=None, + chain=False, result_callback=None, **attrs): + Command.__init__(self, name, **attrs) + if no_args_is_help is None: + no_args_is_help = not invoke_without_command + self.no_args_is_help = no_args_is_help + self.invoke_without_command = invoke_without_command + if subcommand_metavar is None: + if chain: + subcommand_metavar = SUBCOMMANDS_METAVAR + else: + subcommand_metavar = SUBCOMMAND_METAVAR + self.subcommand_metavar = subcommand_metavar + self.chain = chain + #: The result callback that is stored. This can be set or + #: overridden with the :func:`resultcallback` decorator. + self.result_callback = result_callback + + if self.chain: + for param in self.params: + if isinstance(param, Argument) and not param.required: + raise RuntimeError('Multi commands in chain mode cannot ' + 'have optional arguments.') + + def collect_usage_pieces(self, ctx): + rv = Command.collect_usage_pieces(self, ctx) + rv.append(self.subcommand_metavar) + return rv + + def format_options(self, ctx, formatter): + Command.format_options(self, ctx, formatter) + self.format_commands(ctx, formatter) + + def resultcallback(self, replace=False): + """Adds a result callback to the chain command. By default if a + result callback is already registered this will chain them but + this can be disabled with the `replace` parameter. The result + callback is invoked with the return value of the subcommand + (or the list of return values from all subcommands if chaining + is enabled) as well as the parameters as they would be passed + to the main callback. + + Example:: + + @click.group() + @click.option('-i', '--input', default=23) + def cli(input): + return 42 + + @cli.resultcallback() + def process_result(result, input): + return result + input + + .. versionadded:: 3.0 + + :param replace: if set to `True` an already existing result + callback will be removed. + """ + def decorator(f): + old_callback = self.result_callback + if old_callback is None or replace: + self.result_callback = f + return f + def function(__value, *args, **kwargs): + return f(old_callback(__value, *args, **kwargs), + *args, **kwargs) + self.result_callback = rv = update_wrapper(function, f) + return rv + return decorator + + def format_commands(self, ctx, formatter): + """Extra format methods for multi methods that adds all the commands + after the options. + """ + rows = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + + help = cmd.short_help or '' + rows.append((subcommand, help)) + + if rows: + with formatter.section('Commands'): + formatter.write_dl(rows) + + def parse_args(self, ctx, args): + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + rest = Command.parse_args(self, ctx, args) + if self.chain: + ctx.protected_args = rest + ctx.args = [] + elif rest: + ctx.protected_args, ctx.args = rest[:1], rest[1:] + + return ctx.args + + def invoke(self, ctx): + def _process_result(value): + if self.result_callback is not None: + value = ctx.invoke(self.result_callback, value, + **ctx.params) + return value + + if not ctx.protected_args: + # If we are invoked without command the chain flag controls + # how this happens. If we are not in chain mode, the return + # value here is the return value of the command. + # If however we are in chain mode, the return value is the + # return value of the result processor invoked with an empty + # list (which means that no subcommand actually was executed). + if self.invoke_without_command: + if not self.chain: + return Command.invoke(self, ctx) + with ctx: + Command.invoke(self, ctx) + return _process_result([]) + ctx.fail('Missing command.') + + # Fetch args back out + args = ctx.protected_args + ctx.args + ctx.args = [] + ctx.protected_args = [] + + # If we're not in chain mode, we only allow the invocation of a + # single command but we also inform the current context about the + # name of the command to invoke. + if not self.chain: + # Make sure the context is entered so we do not clean up + # resources until the result processor has worked. + with ctx: + cmd_name, cmd, args = self.resolve_command(ctx, args) + ctx.invoked_subcommand = cmd_name + Command.invoke(self, ctx) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) + with sub_ctx: + return _process_result(sub_ctx.command.invoke(sub_ctx)) + + # In chain mode we create the contexts step by step, but after the + # base command has been invoked. Because at that point we do not + # know the subcommands yet, the invoked subcommand attribute is + # set to ``*`` to inform the command that subcommands are executed + # but nothing else. + with ctx: + ctx.invoked_subcommand = args and '*' or None + Command.invoke(self, ctx) + + # Otherwise we make every single context and invoke them in a + # chain. In that case the return value to the result processor + # is the list of all invoked subcommand's results. + contexts = [] + while args: + cmd_name, cmd, args = self.resolve_command(ctx, args) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False) + contexts.append(sub_ctx) + args, sub_ctx.args = sub_ctx.args, [] + + rv = [] + for sub_ctx in contexts: + with sub_ctx: + rv.append(sub_ctx.command.invoke(sub_ctx)) + return _process_result(rv) + + def resolve_command(self, ctx, args): + cmd_name = make_str(args[0]) + original_cmd_name = cmd_name + + # Get the command + cmd = self.get_command(ctx, cmd_name) + + # If we can't find the command but there is a normalization + # function available, we try with that one. + if cmd is None and ctx.token_normalize_func is not None: + cmd_name = ctx.token_normalize_func(cmd_name) + cmd = self.get_command(ctx, cmd_name) + + # If we don't find the command we want to show an error message + # to the user that it was not provided. However, there is + # something else we should do: if the first argument looks like + # an option we want to kick off parsing again for arguments to + # resolve things like --help which now should go to the main + # place. + if cmd is None: + if split_opt(cmd_name)[0]: + self.parse_args(ctx, ctx.args) + ctx.fail('No such command "%s".' % original_cmd_name) + + return cmd_name, cmd, args[1:] + + def get_command(self, ctx, cmd_name): + """Given a context and a command name, this returns a + :class:`Command` object if it exists or returns `None`. + """ + raise NotImplementedError() + + def list_commands(self, ctx): + """Returns a list of subcommand names in the order they should + appear. + """ + return [] + + +class Group(MultiCommand): + """A group allows a command to have subcommands attached. This is the + most common way to implement nesting in Click. + + :param commands: a dictionary of commands. + """ + + def __init__(self, name=None, commands=None, **attrs): + MultiCommand.__init__(self, name, **attrs) + #: the registered subcommands by their exported names. + self.commands = commands or {} + + def add_command(self, cmd, name=None): + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + """ + name = name or cmd.name + if name is None: + raise TypeError('Command has no name.') + _check_multicommand(self, name, cmd, register=True) + self.commands[name] = cmd + + def command(self, *args, **kwargs): + """A shortcut decorator for declaring and attaching a command to + the group. This takes the same arguments as :func:`command` but + immediately registers the created command with this instance by + calling into :meth:`add_command`. + """ + def decorator(f): + cmd = command(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + return decorator + + def group(self, *args, **kwargs): + """A shortcut decorator for declaring and attaching a group to + the group. This takes the same arguments as :func:`group` but + immediately registers the created command with this instance by + calling into :meth:`add_command`. + """ + def decorator(f): + cmd = group(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + return decorator + + def get_command(self, ctx, cmd_name): + return self.commands.get(cmd_name) + + def list_commands(self, ctx): + return sorted(self.commands) + + +class CommandCollection(MultiCommand): + """A command collection is a multi command that merges multiple multi + commands together into one. This is a straightforward implementation + that accepts a list of different multi commands as sources and + provides all the commands for each of them. + """ + + def __init__(self, name=None, sources=None, **attrs): + MultiCommand.__init__(self, name, **attrs) + #: The list of registered multi commands. + self.sources = sources or [] + + def add_source(self, multi_cmd): + """Adds a new multi command to the chain dispatcher.""" + self.sources.append(multi_cmd) + + def get_command(self, ctx, cmd_name): + for source in self.sources: + rv = source.get_command(ctx, cmd_name) + if rv is not None: + if self.chain: + _check_multicommand(self, cmd_name, rv) + return rv + + def list_commands(self, ctx): + rv = set() + for source in self.sources: + rv.update(source.list_commands(ctx)) + return sorted(rv) + + +class Parameter(object): + """A parameter to a command comes in two versions: they are either + :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently + not supported by design as some of the internals for parsing are + intentionally not finalized. + + Some settings are supported by both options and arguments. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. In Click 2.0, the old callback format will still work, + but it will raise a warning to give you change to migrate the + code easier. + + :param param_decls: the parameter declarations for this option or + argument. This is a list of flags or argument + names. + :param type: the type that should be used. Either a :class:`ParamType` + or a Python type. The later is converted into the former + automatically if supported. + :param required: controls if this is optional or not. + :param default: the default value if omitted. This can also be a callable, + in which case it's invoked when the default is needed + without any arguments. + :param callback: a callback that should be executed after the parameter + was matched. This is called as ``fn(ctx, param, + value)`` and needs to return the value. Before Click + 2.0, the signature was ``(ctx, value)``. + :param nargs: the number of arguments to match. If not ``1`` the return + value is a tuple instead of single value. The default for + nargs is ``1`` (except if the type is a tuple, then it's + the arity of the tuple). + :param metavar: how the value is represented in the help page. + :param expose_value: if this is `True` then the value is passed onwards + to the command callback and stored on the context, + otherwise it's skipped. + :param is_eager: eager values are processed before non eager ones. This + should not be set for arguments or it will inverse the + order of processing. + :param envvar: a string or list of strings that are environment variables + that should be checked. + """ + param_type_name = 'parameter' + + def __init__(self, param_decls=None, type=None, required=False, + default=None, callback=None, nargs=None, metavar=None, + expose_value=True, is_eager=False, envvar=None): + self.name, self.opts, self.secondary_opts = \ + self._parse_decls(param_decls or (), expose_value) + + self.type = convert_type(type, default) + + # Default nargs to what the type tells us if we have that + # information available. + if nargs is None: + if self.type.is_composite: + nargs = self.type.arity + else: + nargs = 1 + + self.required = required + self.callback = callback + self.nargs = nargs + self.multiple = False + self.expose_value = expose_value + self.default = default + self.is_eager = is_eager + self.metavar = metavar + self.envvar = envvar + + @property + def human_readable_name(self): + """Returns the human readable name of this parameter. This is the + same as the name for options, but the metavar for arguments. + """ + return self.name + + def make_metavar(self): + if self.metavar is not None: + return self.metavar + metavar = self.type.get_metavar(self) + if metavar is None: + metavar = self.type.name.upper() + if self.nargs != 1: + metavar += '...' + return metavar + + def get_default(self, ctx): + """Given a context variable this calculates the default value.""" + # Otherwise go with the regular default. + if callable(self.default): + rv = self.default() + else: + rv = self.default + return self.type_cast_value(ctx, rv) + + def add_to_parser(self, parser, ctx): + pass + + def consume_value(self, ctx, opts): + value = opts.get(self.name) + if value is None: + value = ctx.lookup_default(self.name) + if value is None: + value = self.value_from_envvar(ctx) + return value + + def type_cast_value(self, ctx, value): + """Given a value this runs it properly through the type system. + This automatically handles things like `nargs` and `multiple` as + well as composite types. + """ + if self.type.is_composite: + if self.nargs <= 1: + raise TypeError('Attempted to invoke composite type ' + 'but nargs has been set to %s. This is ' + 'not supported; nargs needs to be set to ' + 'a fixed value > 1.' % self.nargs) + if self.multiple: + return tuple(self.type(x or (), self, ctx) for x in value or ()) + return self.type(value or (), self, ctx) + + def _convert(value, level): + if level == 0: + return self.type(value, self, ctx) + return tuple(_convert(x, level - 1) for x in value or ()) + return _convert(value, (self.nargs != 1) + bool(self.multiple)) + + def process_value(self, ctx, value): + """Given a value and context this runs the logic to convert the + value as necessary. + """ + # If the value we were given is None we do nothing. This way + # code that calls this can easily figure out if something was + # not provided. Otherwise it would be converted into an empty + # tuple for multiple invocations which is inconvenient. + if value is not None: + return self.type_cast_value(ctx, value) + + def value_is_missing(self, value): + if value is None: + return True + if (self.nargs != 1 or self.multiple) and value == (): + return True + return False + + def full_process_value(self, ctx, value): + value = self.process_value(ctx, value) + + if value is None: + value = self.get_default(ctx) + + if self.required and self.value_is_missing(value): + raise MissingParameter(ctx=ctx, param=self) + + return value + + def resolve_envvar_value(self, ctx): + if self.envvar is None: + return + if isinstance(self.envvar, (tuple, list)): + for envvar in self.envvar: + rv = os.environ.get(envvar) + if rv is not None: + return rv + else: + return os.environ.get(self.envvar) + + def value_from_envvar(self, ctx): + rv = self.resolve_envvar_value(ctx) + if rv is not None and self.nargs != 1: + rv = self.type.split_envvar_value(rv) + return rv + + def handle_parse_result(self, ctx, opts, args): + with augment_usage_errors(ctx, param=self): + value = self.consume_value(ctx, opts) + try: + value = self.full_process_value(ctx, value) + except Exception: + if not ctx.resilient_parsing: + raise + value = None + if self.callback is not None: + try: + value = invoke_param_callback( + self.callback, ctx, self, value) + except Exception: + if not ctx.resilient_parsing: + raise + + if self.expose_value: + ctx.params[self.name] = value + return value, args + + def get_help_record(self, ctx): + pass + + def get_usage_pieces(self, ctx): + return [] + + +class Option(Parameter): + """Options are usually optional values on the command line and + have some extra features that arguments don't have. + + All other parameters are passed onwards to the parameter constructor. + + :param show_default: controls if the default value should be shown on the + help page. Normally, defaults are not shown. + :param prompt: if set to `True` or a non empty string then the user will + be prompted for input if not set. If set to `True` the + prompt will be the option name capitalized. + :param confirmation_prompt: if set then the value will need to be confirmed + if it was prompted for. + :param hide_input: if this is `True` then the input on the prompt will be + hidden from the user. This is useful for password + input. + :param is_flag: forces this option to act as a flag. The default is + auto detection. + :param flag_value: which value should be used for this flag if it's + enabled. This is set to a boolean automatically if + the option string contains a slash to mark two options. + :param multiple: if this is set to `True` then the argument is accepted + multiple times and recorded. This is similar to ``nargs`` + in how it works but supports arbitrary number of + arguments. + :param count: this flag makes an option increment an integer. + :param allow_from_autoenv: if this is enabled then the value of this + parameter will be pulled from an environment + variable in case a prefix is defined on the + context. + :param help: the help string. + """ + param_type_name = 'option' + + def __init__(self, param_decls=None, show_default=False, + prompt=False, confirmation_prompt=False, + hide_input=False, is_flag=None, flag_value=None, + multiple=False, count=False, allow_from_autoenv=True, + type=None, help=None, **attrs): + default_is_missing = attrs.get('default', _missing) is _missing + Parameter.__init__(self, param_decls, type=type, **attrs) + + if prompt is True: + prompt_text = self.name.replace('_', ' ').capitalize() + elif prompt is False: + prompt_text = None + else: + prompt_text = prompt + self.prompt = prompt_text + self.confirmation_prompt = confirmation_prompt + self.hide_input = hide_input + + # Flags + if is_flag is None: + if flag_value is not None: + is_flag = True + else: + is_flag = bool(self.secondary_opts) + if is_flag and default_is_missing: + self.default = False + if flag_value is None: + flag_value = not self.default + self.is_flag = is_flag + self.flag_value = flag_value + if self.is_flag and isinstance(self.flag_value, bool) \ + and type is None: + self.type = BOOL + self.is_bool_flag = True + else: + self.is_bool_flag = False + + # Counting + self.count = count + if count: + if type is None: + self.type = IntRange(min=0) + if default_is_missing: + self.default = 0 + + self.multiple = multiple + self.allow_from_autoenv = allow_from_autoenv + self.help = help + self.show_default = show_default + + # Sanity check for stuff we don't support + if __debug__: + if self.nargs < 0: + raise TypeError('Options cannot have nargs < 0') + if self.prompt and self.is_flag and not self.is_bool_flag: + raise TypeError('Cannot prompt for flags that are not bools.') + if not self.is_bool_flag and self.secondary_opts: + raise TypeError('Got secondary option for non boolean flag.') + if self.is_bool_flag and self.hide_input \ + and self.prompt is not None: + raise TypeError('Hidden input does not work with boolean ' + 'flag prompts.') + if self.count: + if self.multiple: + raise TypeError('Options cannot be multiple and count ' + 'at the same time.') + elif self.is_flag: + raise TypeError('Options cannot be count and flags at ' + 'the same time.') + + def _parse_decls(self, decls, expose_value): + opts = [] + secondary_opts = [] + name = None + possible_names = [] + + for decl in decls: + if isidentifier(decl): + if name is not None: + raise TypeError('Name defined twice') + name = decl + else: + split_char = decl[:1] == '/' and ';' or '/' + if split_char in decl: + first, second = decl.split(split_char, 1) + first = first.rstrip() + if first: + possible_names.append(split_opt(first)) + opts.append(first) + second = second.lstrip() + if second: + secondary_opts.append(second.lstrip()) + else: + possible_names.append(split_opt(decl)) + opts.append(decl) + + if name is None and possible_names: + possible_names.sort(key=lambda x: len(x[0])) + name = possible_names[-1][1].replace('-', '_').lower() + if not isidentifier(name): + name = None + + if name is None: + if not expose_value: + return None, opts, secondary_opts + raise TypeError('Could not determine name for option') + + if not opts and not secondary_opts: + raise TypeError('No options defined but a name was passed (%s). ' + 'Did you mean to declare an argument instead ' + 'of an option?' % name) + + return name, opts, secondary_opts + + def add_to_parser(self, parser, ctx): + kwargs = { + 'dest': self.name, + 'nargs': self.nargs, + 'obj': self, + } + + if self.multiple: + action = 'append' + elif self.count: + action = 'count' + else: + action = 'store' + + if self.is_flag: + kwargs.pop('nargs', None) + if self.is_bool_flag and self.secondary_opts: + parser.add_option(self.opts, action=action + '_const', + const=True, **kwargs) + parser.add_option(self.secondary_opts, action=action + + '_const', const=False, **kwargs) + else: + parser.add_option(self.opts, action=action + '_const', + const=self.flag_value, + **kwargs) + else: + kwargs['action'] = action + parser.add_option(self.opts, **kwargs) + + def get_help_record(self, ctx): + any_prefix_is_slash = [] + + def _write_opts(opts): + rv, any_slashes = join_options(opts) + if any_slashes: + any_prefix_is_slash[:] = [True] + if not self.is_flag and not self.count: + rv += ' ' + self.make_metavar() + return rv + + rv = [_write_opts(self.opts)] + if self.secondary_opts: + rv.append(_write_opts(self.secondary_opts)) + + help = self.help or '' + extra = [] + if self.default is not None and self.show_default: + extra.append('default: %s' % ( + ', '.join('%s' % d for d in self.default) + if isinstance(self.default, (list, tuple)) + else self.default, )) + if self.required: + extra.append('required') + if extra: + help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra)) + + return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help) + + def get_default(self, ctx): + # If we're a non boolean flag out default is more complex because + # we need to look at all flags in the same group to figure out + # if we're the the default one in which case we return the flag + # value as default. + if self.is_flag and not self.is_bool_flag: + for param in ctx.command.params: + if param.name == self.name and param.default: + return param.flag_value + return None + return Parameter.get_default(self, ctx) + + def prompt_for_value(self, ctx): + """This is an alternative flow that can be activated in the full + value processing if a value does not exist. It will prompt the + user until a valid value exists and then returns the processed + value as result. + """ + # Calculate the default before prompting anything to be stable. + default = self.get_default(ctx) + + # If this is a prompt for a flag we need to handle this + # differently. + if self.is_bool_flag: + return confirm(self.prompt, default) + + return prompt(self.prompt, default=default, + hide_input=self.hide_input, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x)) + + def resolve_envvar_value(self, ctx): + rv = Parameter.resolve_envvar_value(self, ctx) + if rv is not None: + return rv + if self.allow_from_autoenv and \ + ctx.auto_envvar_prefix is not None: + envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) + return os.environ.get(envvar) + + def value_from_envvar(self, ctx): + rv = self.resolve_envvar_value(ctx) + if rv is None: + return None + value_depth = (self.nargs != 1) + bool(self.multiple) + if value_depth > 0 and rv is not None: + rv = self.type.split_envvar_value(rv) + if self.multiple and self.nargs != 1: + rv = batch(rv, self.nargs) + return rv + + def full_process_value(self, ctx, value): + if value is None and self.prompt is not None \ + and not ctx.resilient_parsing: + return self.prompt_for_value(ctx) + return Parameter.full_process_value(self, ctx, value) + + +class Argument(Parameter): + """Arguments are positional parameters to a command. They generally + provide fewer features than options but can have infinite ``nargs`` + and are required by default. + + All parameters are passed onwards to the parameter constructor. + """ + param_type_name = 'argument' + + def __init__(self, param_decls, required=None, **attrs): + if required is None: + if attrs.get('default') is not None: + required = False + else: + required = attrs.get('nargs', 1) > 0 + Parameter.__init__(self, param_decls, required=required, **attrs) + if self.default is not None and self.nargs < 0: + raise TypeError('nargs=-1 in combination with a default value ' + 'is not supported.') + + @property + def human_readable_name(self): + if self.metavar is not None: + return self.metavar + return self.name.upper() + + def make_metavar(self): + if self.metavar is not None: + return self.metavar + var = self.name.upper() + if not self.required: + var = '[%s]' % var + if self.nargs != 1: + var += '...' + return var + + def _parse_decls(self, decls, expose_value): + if not decls: + if not expose_value: + return None, [], [] + raise TypeError('Could not determine name for argument') + if len(decls) == 1: + name = arg = decls[0] + name = name.replace('-', '_').lower() + elif len(decls) == 2: + name, arg = decls + else: + raise TypeError('Arguments take exactly one or two ' + 'parameter declarations, got %d' % len(decls)) + return name, [arg], [] + + def get_usage_pieces(self, ctx): + return [self.make_metavar()] + + def add_to_parser(self, parser, ctx): + parser.add_argument(dest=self.name, nargs=self.nargs, + obj=self) + + +# Circular dependency between decorators and core +from .decorators import command, group diff --git a/app/lib/click/decorators.py b/app/lib/click/decorators.py new file mode 100644 index 0000000..9893452 --- /dev/null +++ b/app/lib/click/decorators.py @@ -0,0 +1,304 @@ +import sys +import inspect + +from functools import update_wrapper + +from ._compat import iteritems +from ._unicodefun import _check_for_unicode_literals +from .utils import echo +from .globals import get_current_context + + +def pass_context(f): + """Marks a callback as wanting to receive the current context + object as first argument. + """ + def new_func(*args, **kwargs): + return f(get_current_context(), *args, **kwargs) + return update_wrapper(new_func, f) + + +def pass_obj(f): + """Similar to :func:`pass_context`, but only pass the object on the + context onwards (:attr:`Context.obj`). This is useful if that object + represents the state of a nested system. + """ + def new_func(*args, **kwargs): + return f(get_current_context().obj, *args, **kwargs) + return update_wrapper(new_func, f) + + +def make_pass_decorator(object_type, ensure=False): + """Given an object type this creates a decorator that will work + similar to :func:`pass_obj` but instead of passing the object of the + current context, it will find the innermost context of type + :func:`object_type`. + + This generates a decorator that works roughly like this:: + + from functools import update_wrapper + + def decorator(f): + @pass_context + def new_func(ctx, *args, **kwargs): + obj = ctx.find_object(object_type) + return ctx.invoke(f, obj, *args, **kwargs) + return update_wrapper(new_func, f) + return decorator + + :param object_type: the type of the object to pass. + :param ensure: if set to `True`, a new object will be created and + remembered on the context if it's not there yet. + """ + def decorator(f): + def new_func(*args, **kwargs): + ctx = get_current_context() + if ensure: + obj = ctx.ensure_object(object_type) + else: + obj = ctx.find_object(object_type) + if obj is None: + raise RuntimeError('Managed to invoke callback without a ' + 'context object of type %r existing' + % object_type.__name__) + return ctx.invoke(f, obj, *args[1:], **kwargs) + return update_wrapper(new_func, f) + return decorator + + +def _make_command(f, name, attrs, cls): + if isinstance(f, Command): + raise TypeError('Attempted to convert a callback into a ' + 'command twice.') + try: + params = f.__click_params__ + params.reverse() + del f.__click_params__ + except AttributeError: + params = [] + help = attrs.get('help') + if help is None: + help = inspect.getdoc(f) + if isinstance(help, bytes): + help = help.decode('utf-8') + else: + help = inspect.cleandoc(help) + attrs['help'] = help + _check_for_unicode_literals() + return cls(name=name or f.__name__.lower(), + callback=f, params=params, **attrs) + + +def command(name=None, cls=None, **attrs): + """Creates a new :class:`Command` and uses the decorated function as + callback. This will also automatically attach all decorated + :func:`option`\s and :func:`argument`\s as parameters to the command. + + The name of the command defaults to the name of the function. If you + want to change that, you can pass the intended name as the first + argument. + + All keyword arguments are forwarded to the underlying command class. + + Once decorated the function turns into a :class:`Command` instance + that can be invoked as a command line utility or be attached to a + command :class:`Group`. + + :param name: the name of the command. This defaults to the function + name. + :param cls: the command class to instantiate. This defaults to + :class:`Command`. + """ + if cls is None: + cls = Command + def decorator(f): + cmd = _make_command(f, name, attrs, cls) + cmd.__doc__ = f.__doc__ + return cmd + return decorator + + +def group(name=None, **attrs): + """Creates a new :class:`Group` with a function as callback. This + works otherwise the same as :func:`command` just that the `cls` + parameter is set to :class:`Group`. + """ + attrs.setdefault('cls', Group) + return command(name, **attrs) + + +def _param_memo(f, param): + if isinstance(f, Command): + f.params.append(param) + else: + if not hasattr(f, '__click_params__'): + f.__click_params__ = [] + f.__click_params__.append(param) + + +def argument(*param_decls, **attrs): + """Attaches an argument to the command. All positional arguments are + passed as parameter declarations to :class:`Argument`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Argument` instance manually + and attaching it to the :attr:`Command.params` list. + + :param cls: the argument class to instantiate. This defaults to + :class:`Argument`. + """ + def decorator(f): + ArgumentClass = attrs.pop('cls', Argument) + _param_memo(f, ArgumentClass(param_decls, **attrs)) + return f + return decorator + + +def option(*param_decls, **attrs): + """Attaches an option to the command. All positional arguments are + passed as parameter declarations to :class:`Option`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Option` instance manually + and attaching it to the :attr:`Command.params` list. + + :param cls: the option class to instantiate. This defaults to + :class:`Option`. + """ + def decorator(f): + if 'help' in attrs: + attrs['help'] = inspect.cleandoc(attrs['help']) + OptionClass = attrs.pop('cls', Option) + _param_memo(f, OptionClass(param_decls, **attrs)) + return f + return decorator + + +def confirmation_option(*param_decls, **attrs): + """Shortcut for confirmation prompts that can be ignored by passing + ``--yes`` as parameter. + + This is equivalent to decorating a function with :func:`option` with + the following parameters:: + + def callback(ctx, param, value): + if not value: + ctx.abort() + + @click.command() + @click.option('--yes', is_flag=True, callback=callback, + expose_value=False, prompt='Do you want to continue?') + def dropdb(): + pass + """ + def decorator(f): + def callback(ctx, param, value): + if not value: + ctx.abort() + attrs.setdefault('is_flag', True) + attrs.setdefault('callback', callback) + attrs.setdefault('expose_value', False) + attrs.setdefault('prompt', 'Do you want to continue?') + attrs.setdefault('help', 'Confirm the action without prompting.') + return option(*(param_decls or ('--yes',)), **attrs)(f) + return decorator + + +def password_option(*param_decls, **attrs): + """Shortcut for password prompts. + + This is equivalent to decorating a function with :func:`option` with + the following parameters:: + + @click.command() + @click.option('--password', prompt=True, confirmation_prompt=True, + hide_input=True) + def changeadmin(password): + pass + """ + def decorator(f): + attrs.setdefault('prompt', True) + attrs.setdefault('confirmation_prompt', True) + attrs.setdefault('hide_input', True) + return option(*(param_decls or ('--password',)), **attrs)(f) + return decorator + + +def version_option(version=None, *param_decls, **attrs): + """Adds a ``--version`` option which immediately ends the program + printing out the version number. This is implemented as an eager + option that prints the version and exits the program in the callback. + + :param version: the version number to show. If not provided Click + attempts an auto discovery via setuptools. + :param prog_name: the name of the program (defaults to autodetection) + :param message: custom message to show instead of the default + (``'%(prog)s, version %(version)s'``) + :param others: everything else is forwarded to :func:`option`. + """ + if version is None: + module = sys._getframe(1).f_globals.get('__name__') + def decorator(f): + prog_name = attrs.pop('prog_name', None) + message = attrs.pop('message', '%(prog)s, version %(version)s') + + def callback(ctx, param, value): + if not value or ctx.resilient_parsing: + return + prog = prog_name + if prog is None: + prog = ctx.find_root().info_name + ver = version + if ver is None: + try: + import pkg_resources + except ImportError: + pass + else: + for dist in pkg_resources.working_set: + scripts = dist.get_entry_map().get('console_scripts') or {} + for script_name, entry_point in iteritems(scripts): + if entry_point.module_name == module: + ver = dist.version + break + if ver is None: + raise RuntimeError('Could not determine version') + echo(message % { + 'prog': prog, + 'version': ver, + }, color=ctx.color) + ctx.exit() + + attrs.setdefault('is_flag', True) + attrs.setdefault('expose_value', False) + attrs.setdefault('is_eager', True) + attrs.setdefault('help', 'Show the version and exit.') + attrs['callback'] = callback + return option(*(param_decls or ('--version',)), **attrs)(f) + return decorator + + +def help_option(*param_decls, **attrs): + """Adds a ``--help`` option which immediately ends the program + printing out the help page. This is usually unnecessary to add as + this is added by default to all commands unless suppressed. + + Like :func:`version_option`, this is implemented as eager option that + prints in the callback and exits. + + All arguments are forwarded to :func:`option`. + """ + def decorator(f): + def callback(ctx, param, value): + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + attrs.setdefault('is_flag', True) + attrs.setdefault('expose_value', False) + attrs.setdefault('help', 'Show this message and exit.') + attrs.setdefault('is_eager', True) + attrs['callback'] = callback + return option(*(param_decls or ('--help',)), **attrs)(f) + return decorator + + +# Circular dependencies between core and decorators +from .core import Command, Group, Argument, Option diff --git a/app/lib/click/exceptions.py b/app/lib/click/exceptions.py new file mode 100644 index 0000000..74a4542 --- /dev/null +++ b/app/lib/click/exceptions.py @@ -0,0 +1,201 @@ +from ._compat import PY2, filename_to_ui, get_text_stderr +from .utils import echo + + +class ClickException(Exception): + """An exception that Click can handle and show to the user.""" + + #: The exit code for this exception + exit_code = 1 + + def __init__(self, message): + if PY2: + if message is not None: + message = message.encode('utf-8') + Exception.__init__(self, message) + self.message = message + + def format_message(self): + return self.message + + def show(self, file=None): + if file is None: + file = get_text_stderr() + echo('Error: %s' % self.format_message(), file=file) + + +class UsageError(ClickException): + """An internal exception that signals a usage error. This typically + aborts any further handling. + + :param message: the error message to display. + :param ctx: optionally the context that caused this error. Click will + fill in the context automatically in some situations. + """ + exit_code = 2 + + def __init__(self, message, ctx=None): + ClickException.__init__(self, message) + self.ctx = ctx + + def show(self, file=None): + if file is None: + file = get_text_stderr() + color = None + if self.ctx is not None: + color = self.ctx.color + echo(self.ctx.get_usage() + '\n', file=file, color=color) + echo('Error: %s' % self.format_message(), file=file, color=color) + + +class BadParameter(UsageError): + """An exception that formats out a standardized error message for a + bad parameter. This is useful when thrown from a callback or type as + Click will attach contextual information to it (for instance, which + parameter it is). + + .. versionadded:: 2.0 + + :param param: the parameter object that caused this error. This can + be left out, and Click will attach this info itself + if possible. + :param param_hint: a string that shows up as parameter name. This + can be used as alternative to `param` in cases + where custom validation should happen. If it is + a string it's used as such, if it's a list then + each item is quoted and separated. + """ + + def __init__(self, message, ctx=None, param=None, + param_hint=None): + UsageError.__init__(self, message, ctx) + self.param = param + self.param_hint = param_hint + + def format_message(self): + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.opts or [self.param.human_readable_name] + else: + return 'Invalid value: %s' % self.message + if isinstance(param_hint, (tuple, list)): + param_hint = ' / '.join('"%s"' % x for x in param_hint) + return 'Invalid value for %s: %s' % (param_hint, self.message) + + +class MissingParameter(BadParameter): + """Raised if click required an option or argument but it was not + provided when invoking the script. + + .. versionadded:: 4.0 + + :param param_type: a string that indicates the type of the parameter. + The default is to inherit the parameter type from + the given `param`. Valid values are ``'parameter'``, + ``'option'`` or ``'argument'``. + """ + + def __init__(self, message=None, ctx=None, param=None, + param_hint=None, param_type=None): + BadParameter.__init__(self, message, ctx, param, param_hint) + self.param_type = param_type + + def format_message(self): + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.opts or [self.param.human_readable_name] + else: + param_hint = None + if isinstance(param_hint, (tuple, list)): + param_hint = ' / '.join('"%s"' % x for x in param_hint) + + param_type = self.param_type + if param_type is None and self.param is not None: + param_type = self.param.param_type_name + + msg = self.message + if self.param is not None: + msg_extra = self.param.type.get_missing_message(self.param) + if msg_extra: + if msg: + msg += '. ' + msg_extra + else: + msg = msg_extra + + return 'Missing %s%s%s%s' % ( + param_type, + param_hint and ' %s' % param_hint or '', + msg and '. ' or '.', + msg or '', + ) + + +class NoSuchOption(UsageError): + """Raised if click attempted to handle an option that does not + exist. + + .. versionadded:: 4.0 + """ + + def __init__(self, option_name, message=None, possibilities=None, + ctx=None): + if message is None: + message = 'no such option: %s' % option_name + UsageError.__init__(self, message, ctx) + self.option_name = option_name + self.possibilities = possibilities + + def format_message(self): + bits = [self.message] + if self.possibilities: + if len(self.possibilities) == 1: + bits.append('Did you mean %s?' % self.possibilities[0]) + else: + possibilities = sorted(self.possibilities) + bits.append('(Possible options: %s)' % ', '.join(possibilities)) + return ' '.join(bits) + + +class BadOptionUsage(UsageError): + """Raised if an option is generally supplied but the use of the option + was incorrect. This is for instance raised if the number of arguments + for an option is not correct. + + .. versionadded:: 4.0 + """ + + def __init__(self, message, ctx=None): + UsageError.__init__(self, message, ctx) + + +class BadArgumentUsage(UsageError): + """Raised if an argument is generally supplied but the use of the argument + was incorrect. This is for instance raised if the number of values + for an argument is not correct. + + .. versionadded:: 6.0 + """ + + def __init__(self, message, ctx=None): + UsageError.__init__(self, message, ctx) + + +class FileError(ClickException): + """Raised if a file cannot be opened.""" + + def __init__(self, filename, hint=None): + ui_filename = filename_to_ui(filename) + if hint is None: + hint = 'unknown error' + ClickException.__init__(self, hint) + self.ui_filename = ui_filename + self.filename = filename + + def format_message(self): + return 'Could not open file %s: %s' % (self.ui_filename, self.message) + + +class Abort(RuntimeError): + """An internal signalling exception that signals Click to abort.""" diff --git a/app/lib/click/formatting.py b/app/lib/click/formatting.py new file mode 100644 index 0000000..a3d6a4d --- /dev/null +++ b/app/lib/click/formatting.py @@ -0,0 +1,256 @@ +from contextlib import contextmanager +from .termui import get_terminal_size +from .parser import split_opt +from ._compat import term_len + + +# Can force a width. This is used by the test system +FORCED_WIDTH = None + + +def measure_table(rows): + widths = {} + for row in rows: + for idx, col in enumerate(row): + widths[idx] = max(widths.get(idx, 0), term_len(col)) + return tuple(y for x, y in sorted(widths.items())) + + +def iter_rows(rows, col_count): + for row in rows: + row = tuple(row) + yield row + ('',) * (col_count - len(row)) + + +def wrap_text(text, width=78, initial_indent='', subsequent_indent='', + preserve_paragraphs=False): + """A helper function that intelligently wraps text. By default, it + assumes that it operates on a single paragraph of text but if the + `preserve_paragraphs` parameter is provided it will intelligently + handle paragraphs (defined by two empty lines). + + If paragraphs are handled, a paragraph can be prefixed with an empty + line containing the ``\\b`` character (``\\x08``) to indicate that + no rewrapping should happen in that block. + + :param text: the text that should be rewrapped. + :param width: the maximum width for the text. + :param initial_indent: the initial indent that should be placed on the + first line as a string. + :param subsequent_indent: the indent string that should be placed on + each consecutive line. + :param preserve_paragraphs: if this flag is set then the wrapping will + intelligently handle paragraphs. + """ + from ._textwrap import TextWrapper + text = text.expandtabs() + wrapper = TextWrapper(width, initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + replace_whitespace=False) + if not preserve_paragraphs: + return wrapper.fill(text) + + p = [] + buf = [] + indent = None + + def _flush_par(): + if not buf: + return + if buf[0].strip() == '\b': + p.append((indent or 0, True, '\n'.join(buf[1:]))) + else: + p.append((indent or 0, False, ' '.join(buf))) + del buf[:] + + for line in text.splitlines(): + if not line: + _flush_par() + indent = None + else: + if indent is None: + orig_len = term_len(line) + line = line.lstrip() + indent = orig_len - term_len(line) + buf.append(line) + _flush_par() + + rv = [] + for indent, raw, text in p: + with wrapper.extra_indent(' ' * indent): + if raw: + rv.append(wrapper.indent_only(text)) + else: + rv.append(wrapper.fill(text)) + + return '\n\n'.join(rv) + + +class HelpFormatter(object): + """This class helps with formatting text-based help pages. It's + usually just needed for very special internal cases, but it's also + exposed so that developers can write their own fancy outputs. + + At present, it always writes into memory. + + :param indent_increment: the additional increment for each level. + :param width: the width for the text. This defaults to the terminal + width clamped to a maximum of 78. + """ + + def __init__(self, indent_increment=2, width=None, max_width=None): + self.indent_increment = indent_increment + if max_width is None: + max_width = 80 + if width is None: + width = FORCED_WIDTH + if width is None: + width = max(min(get_terminal_size()[0], max_width) - 2, 50) + self.width = width + self.current_indent = 0 + self.buffer = [] + + def write(self, string): + """Writes a unicode string into the internal buffer.""" + self.buffer.append(string) + + def indent(self): + """Increases the indentation.""" + self.current_indent += self.indent_increment + + def dedent(self): + """Decreases the indentation.""" + self.current_indent -= self.indent_increment + + def write_usage(self, prog, args='', prefix='Usage: '): + """Writes a usage line into the buffer. + + :param prog: the program name. + :param args: whitespace separated list of arguments. + :param prefix: the prefix for the first line. + """ + usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog) + text_width = self.width - self.current_indent + + if text_width >= (term_len(usage_prefix) + 20): + # The arguments will fit to the right of the prefix. + indent = ' ' * term_len(usage_prefix) + self.write(wrap_text(args, text_width, + initial_indent=usage_prefix, + subsequent_indent=indent)) + else: + # The prefix is too long, put the arguments on the next line. + self.write(usage_prefix) + self.write('\n') + indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4) + self.write(wrap_text(args, text_width, + initial_indent=indent, + subsequent_indent=indent)) + + self.write('\n') + + def write_heading(self, heading): + """Writes a heading into the buffer.""" + self.write('%*s%s:\n' % (self.current_indent, '', heading)) + + def write_paragraph(self): + """Writes a paragraph into the buffer.""" + if self.buffer: + self.write('\n') + + def write_text(self, text): + """Writes re-indented text into the buffer. This rewraps and + preserves paragraphs. + """ + text_width = max(self.width - self.current_indent, 11) + indent = ' ' * self.current_indent + self.write(wrap_text(text, text_width, + initial_indent=indent, + subsequent_indent=indent, + preserve_paragraphs=True)) + self.write('\n') + + def write_dl(self, rows, col_max=30, col_spacing=2): + """Writes a definition list into the buffer. This is how options + and commands are usually formatted. + + :param rows: a list of two item tuples for the terms and values. + :param col_max: the maximum width of the first column. + :param col_spacing: the number of spaces between the first and + second column. + """ + rows = list(rows) + widths = measure_table(rows) + if len(widths) != 2: + raise TypeError('Expected two columns for definition list') + + first_col = min(widths[0], col_max) + col_spacing + + for first, second in iter_rows(rows, len(widths)): + self.write('%*s%s' % (self.current_indent, '', first)) + if not second: + self.write('\n') + continue + if term_len(first) <= first_col - col_spacing: + self.write(' ' * (first_col - term_len(first))) + else: + self.write('\n') + self.write(' ' * (first_col + self.current_indent)) + + text_width = max(self.width - first_col - 2, 10) + lines = iter(wrap_text(second, text_width).splitlines()) + if lines: + self.write(next(lines) + '\n') + for line in lines: + self.write('%*s%s\n' % ( + first_col + self.current_indent, '', line)) + else: + self.write('\n') + + @contextmanager + def section(self, name): + """Helpful context manager that writes a paragraph, a heading, + and the indents. + + :param name: the section name that is written as heading. + """ + self.write_paragraph() + self.write_heading(name) + self.indent() + try: + yield + finally: + self.dedent() + + @contextmanager + def indentation(self): + """A context manager that increases the indentation.""" + self.indent() + try: + yield + finally: + self.dedent() + + def getvalue(self): + """Returns the buffer contents.""" + return ''.join(self.buffer) + + +def join_options(options): + """Given a list of option strings this joins them in the most appropriate + way and returns them in the form ``(formatted_string, + any_prefix_is_slash)`` where the second item in the tuple is a flag that + indicates if any of the option prefixes was a slash. + """ + rv = [] + any_prefix_is_slash = False + for opt in options: + prefix = split_opt(opt)[0] + if prefix == '/': + any_prefix_is_slash = True + rv.append((len(prefix), opt)) + + rv.sort(key=lambda x: x[0]) + + rv = ', '.join(x[1] for x in rv) + return rv, any_prefix_is_slash diff --git a/app/lib/click/globals.py b/app/lib/click/globals.py new file mode 100644 index 0000000..14338e6 --- /dev/null +++ b/app/lib/click/globals.py @@ -0,0 +1,48 @@ +from threading import local + + +_local = local() + + +def get_current_context(silent=False): + """Returns the current click context. This can be used as a way to + access the current context object from anywhere. This is a more implicit + alternative to the :func:`pass_context` decorator. This function is + primarily useful for helpers such as :func:`echo` which might be + interested in changing it's behavior based on the current context. + + To push the current context, :meth:`Context.scope` can be used. + + .. versionadded:: 5.0 + + :param silent: is set to `True` the return value is `None` if no context + is available. The default behavior is to raise a + :exc:`RuntimeError`. + """ + try: + return getattr(_local, 'stack')[-1] + except (AttributeError, IndexError): + if not silent: + raise RuntimeError('There is no active click context.') + + +def push_context(ctx): + """Pushes a new context to the current stack.""" + _local.__dict__.setdefault('stack', []).append(ctx) + + +def pop_context(): + """Removes the top level from the stack.""" + _local.stack.pop() + + +def resolve_color_default(color=None): + """"Internal helper to get the default value of the color flag. If a + value is passed it's returned unchanged, otherwise it's looked up from + the current context. + """ + if color is not None: + return color + ctx = get_current_context(silent=True) + if ctx is not None: + return ctx.color diff --git a/app/lib/click/parser.py b/app/lib/click/parser.py new file mode 100644 index 0000000..9775c9f --- /dev/null +++ b/app/lib/click/parser.py @@ -0,0 +1,426 @@ +# -*- coding: utf-8 -*- +""" + click.parser + ~~~~~~~~~~~~ + + This module started out as largely a copy paste from the stdlib's + optparse module with the features removed that we do not need from + optparse because we implement them in Click on a higher level (for + instance type handling, help formatting and a lot more). + + The plan is to remove more and more from here over time. + + The reason this is a different module and not optparse from the stdlib + is that there are differences in 2.x and 3.x about the error messages + generated and optparse in the stdlib uses gettext for no good reason + and might cause us issues. +""" +import re +from collections import deque +from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ + BadArgumentUsage + + +def _unpack_args(args, nargs_spec): + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with `None`. + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv = [] + spos = None + + def _fetch(c): + try: + if spos is None: + return c.popleft() + else: + return c.pop() + except IndexError: + return None + + while nargs_spec: + nargs = _fetch(nargs_spec) + if nargs == 1: + rv.append(_fetch(args)) + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError('Cannot have two nargs < 0') + spos = len(rv) + rv.append(None) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + rv[spos + 1:] = reversed(rv[spos + 1:]) + + return tuple(rv), list(args) + + +def _error_opt_args(nargs, opt): + if nargs == 1: + raise BadOptionUsage('%s option requires an argument' % opt) + raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs)) + + +def split_opt(opt): + first = opt[:1] + if first.isalnum(): + return '', opt + if opt[1:2] == first: + return opt[:2], opt[2:] + return first, opt[1:] + + +def normalize_opt(opt, ctx): + if ctx is None or ctx.token_normalize_func is None: + return opt + prefix, opt = split_opt(opt) + return prefix + ctx.token_normalize_func(opt) + + +def split_arg_string(string): + """Given an argument string this attempts to split it into small parts.""" + rv = [] + for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" + r'|"([^"\\]*(?:\\.[^"\\]*)*)"' + r'|\S+)\s*', string, re.S): + arg = match.group().strip() + if arg[:1] == arg[-1:] and arg[:1] in '"\'': + arg = arg[1:-1].encode('ascii', 'backslashreplace') \ + .decode('unicode-escape') + try: + arg = type(string)(arg) + except UnicodeError: + pass + rv.append(arg) + return rv + + +class Option(object): + + def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): + self._short_opts = [] + self._long_opts = [] + self.prefixes = set() + + for opt in opts: + prefix, value = split_opt(opt) + if not prefix: + raise ValueError('Invalid start character for option (%s)' + % opt) + self.prefixes.add(prefix[0]) + if len(prefix) == 1 and len(value) == 1: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + self.prefixes.add(prefix) + + if action is None: + action = 'store' + + self.dest = dest + self.action = action + self.nargs = nargs + self.const = const + self.obj = obj + + @property + def takes_value(self): + return self.action in ('store', 'append') + + def process(self, value, state): + if self.action == 'store': + state.opts[self.dest] = value + elif self.action == 'store_const': + state.opts[self.dest] = self.const + elif self.action == 'append': + state.opts.setdefault(self.dest, []).append(value) + elif self.action == 'append_const': + state.opts.setdefault(self.dest, []).append(self.const) + elif self.action == 'count': + state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 + else: + raise ValueError('unknown action %r' % self.action) + state.order.append(self.obj) + + +class Argument(object): + + def __init__(self, dest, nargs=1, obj=None): + self.dest = dest + self.nargs = nargs + self.obj = obj + + def process(self, value, state): + if self.nargs > 1: + holes = sum(1 for x in value if x is None) + if holes == len(value): + value = None + elif holes != 0: + raise BadArgumentUsage('argument %s takes %d values' + % (self.dest, self.nargs)) + state.opts[self.dest] = value + state.order.append(self.obj) + + +class ParsingState(object): + + def __init__(self, rargs): + self.opts = {} + self.largs = [] + self.rargs = rargs + self.order = [] + + +class OptionParser(object): + """The option parser is an internal class that is ultimately used to + parse options and arguments. It's modelled after optparse and brings + a similar but vastly simplified API. It should generally not be used + directly as the high level Click classes wrap it for you. + + It's not nearly as extensible as optparse or argparse as it does not + implement features that are implemented on a higher level (such as + types or defaults). + + :param ctx: optionally the :class:`~click.Context` where this parser + should go with. + """ + + def __init__(self, ctx=None): + #: The :class:`~click.Context` for this parser. This might be + #: `None` for some advanced use cases. + self.ctx = ctx + #: This controls how the parser deals with interspersed arguments. + #: If this is set to `False`, the parser will stop on the first + #: non-option. Click uses this to implement nested subcommands + #: safely. + self.allow_interspersed_args = True + #: This tells the parser how to deal with unknown options. By + #: default it will error out (which is sensible), but there is a + #: second mode where it will ignore it and continue processing + #: after shifting all the unknown options into the resulting args. + self.ignore_unknown_options = False + if ctx is not None: + self.allow_interspersed_args = ctx.allow_interspersed_args + self.ignore_unknown_options = ctx.ignore_unknown_options + self._short_opt = {} + self._long_opt = {} + self._opt_prefixes = set(['-', '--']) + self._args = [] + + def add_option(self, opts, dest, action=None, nargs=1, const=None, + obj=None): + """Adds a new option named `dest` to the parser. The destination + is not inferred (unlike with optparse) and needs to be explicitly + provided. Action can be any of ``store``, ``store_const``, + ``append``, ``appnd_const`` or ``count``. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + if obj is None: + obj = dest + opts = [normalize_opt(opt, self.ctx) for opt in opts] + option = Option(opts, dest, action=action, nargs=nargs, + const=const, obj=obj) + self._opt_prefixes.update(option.prefixes) + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + def add_argument(self, dest, nargs=1, obj=None): + """Adds a positional argument named `dest` to the parser. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + if obj is None: + obj = dest + self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) + + def parse_args(self, args): + """Parses positional arguments and returns ``(values, args, order)`` + for the parsed options and arguments as well as the leftover + arguments if there are any. The order is a list of objects as they + appear on the command line. If arguments appear multiple times they + will be memorized multiple times as well. + """ + state = ParsingState(args) + try: + self._process_args_for_options(state) + self._process_args_for_args(state) + except UsageError: + if self.ctx is None or not self.ctx.resilient_parsing: + raise + return state.opts, state.largs, state.order + + def _process_args_for_args(self, state): + pargs, args = _unpack_args(state.largs + state.rargs, + [x.nargs for x in self._args]) + + for idx, arg in enumerate(self._args): + arg.process(pargs[idx], state) + + state.largs = args + state.rargs = [] + + def _process_args_for_options(self, state): + while state.rargs: + arg = state.rargs.pop(0) + arglen = len(arg) + # Double dashes always handled explicitly regardless of what + # prefixes are valid. + if arg == '--': + return + elif arg[:1] in self._opt_prefixes and arglen > 1: + self._process_opts(arg, state) + elif self.allow_interspersed_args: + state.largs.append(arg) + else: + state.rargs.insert(0, arg) + return + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt(self, opt, explicit_value, state): + if opt not in self._long_opt: + possibilities = [word for word in self._long_opt + if word.startswith(opt)] + raise NoSuchOption(opt, possibilities=possibilities) + + option = self._long_opt[opt] + if option.takes_value: + # At this point it's safe to modify rargs by injecting the + # explicit value, because no exception is raised in this + # branch. This means that the inserted value will be fully + # consumed. + if explicit_value is not None: + state.rargs.insert(0, explicit_value) + + nargs = option.nargs + if len(state.rargs) < nargs: + _error_opt_args(nargs, opt) + elif nargs == 1: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + elif explicit_value is not None: + raise BadOptionUsage('%s option does not take a value' % opt) + + else: + value = None + + option.process(value, state) + + def _match_short_opt(self, arg, state): + stop = False + i = 1 + prefix = arg[0] + unknown_options = [] + + for ch in arg[1:]: + opt = normalize_opt(prefix + ch, self.ctx) + option = self._short_opt.get(opt) + i += 1 + + if not option: + if self.ignore_unknown_options: + unknown_options.append(ch) + continue + raise NoSuchOption(opt) + if option.takes_value: + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + state.rargs.insert(0, arg[i:]) + stop = True + + nargs = option.nargs + if len(state.rargs) < nargs: + _error_opt_args(nargs, opt) + elif nargs == 1: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + else: + value = None + + option.process(value, state) + + if stop: + break + + # If we got any unknown options we re-combinate the string of the + # remaining options and re-attach the prefix, then report that + # to the state as new larg. This way there is basic combinatorics + # that can be achieved while still ignoring unknown arguments. + if self.ignore_unknown_options and unknown_options: + state.largs.append(prefix + ''.join(unknown_options)) + + def _process_opts(self, arg, state): + explicit_value = None + # Long option handling happens in two parts. The first part is + # supporting explicitly attached values. In any case, we will try + # to long match the option first. + if '=' in arg: + long_opt, explicit_value = arg.split('=', 1) + else: + long_opt = arg + norm_long_opt = normalize_opt(long_opt, self.ctx) + + # At this point we will match the (assumed) long option through + # the long option matching code. Note that this allows options + # like "-foo" to be matched as long options. + try: + self._match_long_opt(norm_long_opt, explicit_value, state) + except NoSuchOption: + # At this point the long option matching failed, and we need + # to try with short options. However there is a special rule + # which says, that if we have a two character options prefix + # (applies to "--foo" for instance), we do not dispatch to the + # short option code and will instead raise the no option + # error. + if arg[:2] not in self._opt_prefixes: + return self._match_short_opt(arg, state) + if not self.ignore_unknown_options: + raise + state.largs.append(arg) diff --git a/app/lib/click/termui.py b/app/lib/click/termui.py new file mode 100644 index 0000000..d9fba52 --- /dev/null +++ b/app/lib/click/termui.py @@ -0,0 +1,539 @@ +import os +import sys +import struct + +from ._compat import raw_input, text_type, string_types, \ + isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN +from .utils import echo +from .exceptions import Abort, UsageError +from .types import convert_type +from .globals import resolve_color_default + + +# The prompt functions to use. The doc tools currently override these +# functions to customize how they work. +visible_prompt_func = raw_input + +_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', + 'cyan', 'white', 'reset') +_ansi_reset_all = '\033[0m' + + +def hidden_prompt_func(prompt): + import getpass + return getpass.getpass(prompt) + + +def _build_prompt(text, suffix, show_default=False, default=None): + prompt = text + if default is not None and show_default: + prompt = '%s [%s]' % (prompt, default) + return prompt + suffix + + +def prompt(text, default=None, hide_input=False, + confirmation_prompt=False, type=None, + value_proc=None, prompt_suffix=': ', + show_default=True, err=False): + """Prompts a user for input. This is a convenience function that can + be used to prompt a user for input later. + + If the user aborts the input by sending a interrupt signal, this + function will catch it and raise a :exc:`Abort` exception. + + .. versionadded:: 6.0 + Added unicode support for cmd.exe on Windows. + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param text: the text to show for the prompt. + :param default: the default value to use if no input happens. If this + is not given it will prompt until it's aborted. + :param hide_input: if this is set to true then the input value will + be hidden. + :param confirmation_prompt: asks for confirmation for the value. + :param type: the type to use to check the value against. + :param value_proc: if this parameter is provided it's a function that + is invoked instead of the type conversion to + convert a value. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + result = None + + def prompt_func(text): + f = hide_input and hidden_prompt_func or visible_prompt_func + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(text, nl=False, err=err) + return f('') + except (KeyboardInterrupt, EOFError): + # getpass doesn't print a newline if the user aborts input with ^C. + # Allegedly this behavior is inherited from getpass(3). + # A doc bug has been filed at https://bugs.python.org/issue24711 + if hide_input: + echo(None, err=err) + raise Abort() + + if value_proc is None: + value_proc = convert_type(type, default) + + prompt = _build_prompt(text, prompt_suffix, show_default, default) + + while 1: + while 1: + value = prompt_func(prompt) + if value: + break + # If a default is set and used, then the confirmation + # prompt is always skipped because that's the only thing + # that really makes sense. + elif default is not None: + return default + try: + result = value_proc(value) + except UsageError as e: + echo('Error: %s' % e.message, err=err) + continue + if not confirmation_prompt: + return result + while 1: + value2 = prompt_func('Repeat for confirmation: ') + if value2: + break + if value == value2: + return result + echo('Error: the two entered values do not match', err=err) + + +def confirm(text, default=False, abort=False, prompt_suffix=': ', + show_default=True, err=False): + """Prompts for confirmation (yes/no question). + + If the user aborts the input by sending a interrupt signal this + function will catch it and raise a :exc:`Abort` exception. + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param text: the question to ask. + :param default: the default for the prompt. + :param abort: if this is set to `True` a negative answer aborts the + exception by raising :exc:`Abort`. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + prompt = _build_prompt(text, prompt_suffix, show_default, + default and 'Y/n' or 'y/N') + while 1: + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(prompt, nl=False, err=err) + value = visible_prompt_func('').lower().strip() + except (KeyboardInterrupt, EOFError): + raise Abort() + if value in ('y', 'yes'): + rv = True + elif value in ('n', 'no'): + rv = False + elif value == '': + rv = default + else: + echo('Error: invalid input', err=err) + continue + break + if abort and not rv: + raise Abort() + return rv + + +def get_terminal_size(): + """Returns the current size of the terminal as tuple in the form + ``(width, height)`` in columns and rows. + """ + # If shutil has get_terminal_size() (Python 3.3 and later) use that + if sys.version_info >= (3, 3): + import shutil + shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) + if shutil_get_terminal_size: + sz = shutil_get_terminal_size() + return sz.columns, sz.lines + + if get_winterm_size is not None: + return get_winterm_size() + + def ioctl_gwinsz(fd): + try: + import fcntl + import termios + cr = struct.unpack( + 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) + except Exception: + return + return cr + + cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + try: + cr = ioctl_gwinsz(fd) + finally: + os.close(fd) + except Exception: + pass + if not cr or not cr[0] or not cr[1]: + cr = (os.environ.get('LINES', 25), + os.environ.get('COLUMNS', DEFAULT_COLUMNS)) + return int(cr[1]), int(cr[0]) + + +def echo_via_pager(text, color=None): + """This function takes a text and shows it via an environment specific + pager on stdout. + + .. versionchanged:: 3.0 + Added the `color` flag. + + :param text: the text to page. + :param color: controls if the pager supports ANSI colors or not. The + default is autodetection. + """ + color = resolve_color_default(color) + if not isinstance(text, string_types): + text = text_type(text) + from ._termui_impl import pager + return pager(text + '\n', color) + + +def progressbar(iterable=None, length=None, label=None, show_eta=True, + show_percent=None, show_pos=False, + item_show_func=None, fill_char='#', empty_char='-', + bar_template='%(label)s [%(bar)s] %(info)s', + info_sep=' ', width=36, file=None, color=None): + """This function creates an iterable context manager that can be used + to iterate over something while showing a progress bar. It will + either iterate over the `iterable` or `length` items (that are counted + up). While iteration happens, this function will print a rendered + progress bar to the given `file` (defaults to stdout) and will attempt + to calculate remaining time and more. By default, this progress bar + will not be rendered if the file is not a terminal. + + The context manager creates the progress bar. When the context + manager is entered the progress bar is already displayed. With every + iteration over the progress bar, the iterable passed to the bar is + advanced and the bar is updated. When the context manager exits, + a newline is printed and the progress bar is finalized on screen. + + No printing must happen or the progress bar will be unintentionally + destroyed. + + Example usage:: + + with progressbar(items) as bar: + for item in bar: + do_something_with(item) + + Alternatively, if no iterable is specified, one can manually update the + progress bar through the `update()` method instead of directly + iterating over the progress bar. The update method accepts the number + of steps to increment the bar with:: + + with progressbar(length=chunks.total_bytes) as bar: + for chunk in chunks: + process_chunk(chunk) + bar.update(chunks.bytes) + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `color` parameter. Added a `update` method to the + progressbar object. + + :param iterable: an iterable to iterate over. If not provided the length + is required. + :param length: the number of items to iterate over. By default the + progressbar will attempt to ask the iterator about its + length, which might or might not work. If an iterable is + also provided this parameter can be used to override the + length. If an iterable is not provided the progress bar + will iterate over a range of that length. + :param label: the label to show next to the progress bar. + :param show_eta: enables or disables the estimated time display. This is + automatically disabled if the length cannot be + determined. + :param show_percent: enables or disables the percentage display. The + default is `True` if the iterable has a length or + `False` if not. + :param show_pos: enables or disables the absolute position display. The + default is `False`. + :param item_show_func: a function called with the current item which + can return a string to show the current item + next to the progress bar. Note that the current + item can be `None`! + :param fill_char: the character to use to show the filled part of the + progress bar. + :param empty_char: the character to use to show the non-filled part of + the progress bar. + :param bar_template: the format string to use as template for the bar. + The parameters in it are ``label`` for the label, + ``bar`` for the progress bar and ``info`` for the + info section. + :param info_sep: the separator between multiple info items (eta etc.) + :param width: the width of the progress bar in characters, 0 means full + terminal width + :param file: the file to write to. If this is not a terminal then + only the label is printed. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are included anywhere in the progress bar output + which is not the case by default. + """ + from ._termui_impl import ProgressBar + color = resolve_color_default(color) + return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, + show_percent=show_percent, show_pos=show_pos, + item_show_func=item_show_func, fill_char=fill_char, + empty_char=empty_char, bar_template=bar_template, + info_sep=info_sep, file=file, label=label, + width=width, color=color) + + +def clear(): + """Clears the terminal screen. This will have the effect of clearing + the whole visible space of the terminal and moving the cursor to the + top left. This does not do anything if not connected to a terminal. + + .. versionadded:: 2.0 + """ + if not isatty(sys.stdout): + return + # If we're on Windows and we don't have colorama available, then we + # clear the screen by shelling out. Otherwise we can use an escape + # sequence. + if WIN: + os.system('cls') + else: + sys.stdout.write('\033[2J\033[1;1H') + + +def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, + blink=None, reverse=None, reset=True): + """Styles a text with ANSI styles and returns the new string. By + default the styling is self contained which means that at the end + of the string a reset code is issued. This can be prevented by + passing ``reset=False``. + + Examples:: + + click.echo(click.style('Hello World!', fg='green')) + click.echo(click.style('ATTENTION!', blink=True)) + click.echo(click.style('Some things', reverse=True, fg='cyan')) + + Supported color names: + + * ``black`` (might be a gray) + * ``red`` + * ``green`` + * ``yellow`` (might be an orange) + * ``blue`` + * ``magenta`` + * ``cyan`` + * ``white`` (might be light gray) + * ``reset`` (reset the color code only) + + .. versionadded:: 2.0 + + :param text: the string to style with ansi codes. + :param fg: if provided this will become the foreground color. + :param bg: if provided this will become the background color. + :param bold: if provided this will enable or disable bold mode. + :param dim: if provided this will enable or disable dim mode. This is + badly supported. + :param underline: if provided this will enable or disable underline. + :param blink: if provided this will enable or disable blinking. + :param reverse: if provided this will enable or disable inverse + rendering (foreground becomes background and the + other way round). + :param reset: by default a reset-all code is added at the end of the + string which means that styles do not carry over. This + can be disabled to compose styles. + """ + bits = [] + if fg: + try: + bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30)) + except ValueError: + raise TypeError('Unknown color %r' % fg) + if bg: + try: + bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40)) + except ValueError: + raise TypeError('Unknown color %r' % bg) + if bold is not None: + bits.append('\033[%dm' % (1 if bold else 22)) + if dim is not None: + bits.append('\033[%dm' % (2 if dim else 22)) + if underline is not None: + bits.append('\033[%dm' % (4 if underline else 24)) + if blink is not None: + bits.append('\033[%dm' % (5 if blink else 25)) + if reverse is not None: + bits.append('\033[%dm' % (7 if reverse else 27)) + bits.append(text) + if reset: + bits.append(_ansi_reset_all) + return ''.join(bits) + + +def unstyle(text): + """Removes ANSI styling information from a string. Usually it's not + necessary to use this function as Click's echo function will + automatically remove styling if necessary. + + .. versionadded:: 2.0 + + :param text: the text to remove style information from. + """ + return strip_ansi(text) + + +def secho(text, file=None, nl=True, err=False, color=None, **styles): + """This function combines :func:`echo` and :func:`style` into one + call. As such the following two calls are the same:: + + click.secho('Hello World!', fg='green') + click.echo(click.style('Hello World!', fg='green')) + + All keyword arguments are forwarded to the underlying functions + depending on which one they go with. + + .. versionadded:: 2.0 + """ + return echo(style(text, **styles), file=file, nl=nl, err=err, color=color) + + +def edit(text=None, editor=None, env=None, require_save=True, + extension='.txt', filename=None): + r"""Edits the given text in the defined editor. If an editor is given + (should be the full path to the executable but the regular operating + system search path is used for finding the executable) it overrides + the detected editor. Optionally, some environment variables can be + used. If the editor is closed without changes, `None` is returned. In + case a file is edited directly the return value is always `None` and + `require_save` and `extension` are ignored. + + If the editor cannot be opened a :exc:`UsageError` is raised. + + Note for Windows: to simplify cross-platform usage, the newlines are + automatically converted from POSIX to Windows and vice versa. As such, + the message here will have ``\n`` as newline markers. + + :param text: the text to edit. + :param editor: optionally the editor to use. Defaults to automatic + detection. + :param env: environment variables to forward to the editor. + :param require_save: if this is true, then not saving in the editor + will make the return value become `None`. + :param extension: the extension to tell the editor about. This defaults + to `.txt` but changing this might change syntax + highlighting. + :param filename: if provided it will edit this file instead of the + provided text contents. It will not use a temporary + file as an indirection in that case. + """ + from ._termui_impl import Editor + editor = Editor(editor=editor, env=env, require_save=require_save, + extension=extension) + if filename is None: + return editor.edit(text) + editor.edit_file(filename) + + +def launch(url, wait=False, locate=False): + """This function launches the given URL (or filename) in the default + viewer application for this file type. If this is an executable, it + might launch the executable in a new session. The return value is + the exit code of the launched application. Usually, ``0`` indicates + success. + + Examples:: + + click.launch('http://click.pocoo.org/') + click.launch('/my/downloaded/file', locate=True) + + .. versionadded:: 2.0 + + :param url: URL or filename of the thing to launch. + :param wait: waits for the program to stop. + :param locate: if this is set to `True` then instead of launching the + application associated with the URL it will attempt to + launch a file manager with the file located. This + might have weird effects if the URL does not point to + the filesystem. + """ + from ._termui_impl import open_url + return open_url(url, wait=wait, locate=locate) + + +# If this is provided, getchar() calls into this instead. This is used +# for unittesting purposes. +_getchar = None + + +def getchar(echo=False): + """Fetches a single character from the terminal and returns it. This + will always return a unicode character and under certain rare + circumstances this might return more than one character. The + situations which more than one character is returned is when for + whatever reason multiple characters end up in the terminal buffer or + standard input was not actually a terminal. + + Note that this will always read from the terminal, even if something + is piped into the standard input. + + .. versionadded:: 2.0 + + :param echo: if set to `True`, the character read will also show up on + the terminal. The default is to not show it. + """ + f = _getchar + if f is None: + from ._termui_impl import getchar as f + return f(echo) + + +def pause(info='Press any key to continue ...', err=False): + """This command stops execution and waits for the user to press any + key to continue. This is similar to the Windows batch "pause" + command. If the program is not run through a terminal, this command + will instead do nothing. + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param info: the info string to print before pausing. + :param err: if set to message goes to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + if not isatty(sys.stdin) or not isatty(sys.stdout): + return + try: + if info: + echo(info, nl=False, err=err) + try: + getchar() + except (KeyboardInterrupt, EOFError): + pass + finally: + if info: + echo(err=err) diff --git a/app/lib/click/testing.py b/app/lib/click/testing.py new file mode 100644 index 0000000..4416c77 --- /dev/null +++ b/app/lib/click/testing.py @@ -0,0 +1,322 @@ +import os +import sys +import shutil +import tempfile +import contextlib + +from ._compat import iteritems, PY2 + + +# If someone wants to vendor click, we want to ensure the +# correct package is discovered. Ideally we could use a +# relative import here but unfortunately Python does not +# support that. +clickpkg = sys.modules[__name__.rsplit('.', 1)[0]] + + +if PY2: + from cStringIO import StringIO +else: + import io + from ._compat import _find_binary_reader + + +class EchoingStdin(object): + + def __init__(self, input, output): + self._input = input + self._output = output + + def __getattr__(self, x): + return getattr(self._input, x) + + def _echo(self, rv): + self._output.write(rv) + return rv + + def read(self, n=-1): + return self._echo(self._input.read(n)) + + def readline(self, n=-1): + return self._echo(self._input.readline(n)) + + def readlines(self): + return [self._echo(x) for x in self._input.readlines()] + + def __iter__(self): + return iter(self._echo(x) for x in self._input) + + def __repr__(self): + return repr(self._input) + + +def make_input_stream(input, charset): + # Is already an input stream. + if hasattr(input, 'read'): + if PY2: + return input + rv = _find_binary_reader(input) + if rv is not None: + return rv + raise TypeError('Could not find binary reader for input stream.') + + if input is None: + input = b'' + elif not isinstance(input, bytes): + input = input.encode(charset) + if PY2: + return StringIO(input) + return io.BytesIO(input) + + +class Result(object): + """Holds the captured result of an invoked CLI script.""" + + def __init__(self, runner, output_bytes, exit_code, exception, + exc_info=None): + #: The runner that created the result + self.runner = runner + #: The output as bytes. + self.output_bytes = output_bytes + #: The exit code as integer. + self.exit_code = exit_code + #: The exception that happend if one did. + self.exception = exception + #: The traceback + self.exc_info = exc_info + + @property + def output(self): + """The output as unicode string.""" + return self.output_bytes.decode(self.runner.charset, 'replace') \ + .replace('\r\n', '\n') + + def __repr__(self): + return '' % ( + self.exception and repr(self.exception) or 'okay', + ) + + +class CliRunner(object): + """The CLI runner provides functionality to invoke a Click command line + script for unittesting purposes in a isolated environment. This only + works in single-threaded systems without any concurrency as it changes the + global interpreter state. + + :param charset: the character set for the input and output data. This is + UTF-8 by default and should not be changed currently as + the reporting to Click only works in Python 2 properly. + :param env: a dictionary with environment variables for overriding. + :param echo_stdin: if this is set to `True`, then reading from stdin writes + to stdout. This is useful for showing examples in + some circumstances. Note that regular prompts + will automatically echo the input. + """ + + def __init__(self, charset=None, env=None, echo_stdin=False): + if charset is None: + charset = 'utf-8' + self.charset = charset + self.env = env or {} + self.echo_stdin = echo_stdin + + def get_default_prog_name(self, cli): + """Given a command object it will return the default program name + for it. The default is the `name` attribute or ``"root"`` if not + set. + """ + return cli.name or 'root' + + def make_env(self, overrides=None): + """Returns the environment overrides for invoking a script.""" + rv = dict(self.env) + if overrides: + rv.update(overrides) + return rv + + @contextlib.contextmanager + def isolation(self, input=None, env=None, color=False): + """A context manager that sets up the isolation for invoking of a + command line tool. This sets up stdin with the given input data + and `os.environ` with the overrides from the given dictionary. + This also rebinds some internals in Click to be mocked (like the + prompt functionality). + + This is automatically done in the :meth:`invoke` method. + + .. versionadded:: 4.0 + The ``color`` parameter was added. + + :param input: the input stream to put into sys.stdin. + :param env: the environment overrides as dictionary. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + """ + input = make_input_stream(input, self.charset) + + old_stdin = sys.stdin + old_stdout = sys.stdout + old_stderr = sys.stderr + old_forced_width = clickpkg.formatting.FORCED_WIDTH + clickpkg.formatting.FORCED_WIDTH = 80 + + env = self.make_env(env) + + if PY2: + sys.stdout = sys.stderr = bytes_output = StringIO() + if self.echo_stdin: + input = EchoingStdin(input, bytes_output) + else: + bytes_output = io.BytesIO() + if self.echo_stdin: + input = EchoingStdin(input, bytes_output) + input = io.TextIOWrapper(input, encoding=self.charset) + sys.stdout = sys.stderr = io.TextIOWrapper( + bytes_output, encoding=self.charset) + + sys.stdin = input + + def visible_input(prompt=None): + sys.stdout.write(prompt or '') + val = input.readline().rstrip('\r\n') + sys.stdout.write(val + '\n') + sys.stdout.flush() + return val + + def hidden_input(prompt=None): + sys.stdout.write((prompt or '') + '\n') + sys.stdout.flush() + return input.readline().rstrip('\r\n') + + def _getchar(echo): + char = sys.stdin.read(1) + if echo: + sys.stdout.write(char) + sys.stdout.flush() + return char + + default_color = color + def should_strip_ansi(stream=None, color=None): + if color is None: + return not default_color + return not color + + old_visible_prompt_func = clickpkg.termui.visible_prompt_func + old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func + old__getchar_func = clickpkg.termui._getchar + old_should_strip_ansi = clickpkg.utils.should_strip_ansi + clickpkg.termui.visible_prompt_func = visible_input + clickpkg.termui.hidden_prompt_func = hidden_input + clickpkg.termui._getchar = _getchar + clickpkg.utils.should_strip_ansi = should_strip_ansi + + old_env = {} + try: + for key, value in iteritems(env): + old_env[key] = os.environ.get(key) + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + yield bytes_output + finally: + for key, value in iteritems(old_env): + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + sys.stdout = old_stdout + sys.stderr = old_stderr + sys.stdin = old_stdin + clickpkg.termui.visible_prompt_func = old_visible_prompt_func + clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func + clickpkg.termui._getchar = old__getchar_func + clickpkg.utils.should_strip_ansi = old_should_strip_ansi + clickpkg.formatting.FORCED_WIDTH = old_forced_width + + def invoke(self, cli, args=None, input=None, env=None, + catch_exceptions=True, color=False, **extra): + """Invokes a command in an isolated environment. The arguments are + forwarded directly to the command line script, the `extra` keyword + arguments are passed to the :meth:`~clickpkg.Command.main` function of + the command. + + This returns a :class:`Result` object. + + .. versionadded:: 3.0 + The ``catch_exceptions`` parameter was added. + + .. versionchanged:: 3.0 + The result object now has an `exc_info` attribute with the + traceback if available. + + .. versionadded:: 4.0 + The ``color`` parameter was added. + + :param cli: the command to invoke + :param args: the arguments to invoke + :param input: the input data for `sys.stdin`. + :param env: the environment overrides. + :param catch_exceptions: Whether to catch any other exceptions than + ``SystemExit``. + :param extra: the keyword arguments to pass to :meth:`main`. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + """ + exc_info = None + with self.isolation(input=input, env=env, color=color) as out: + exception = None + exit_code = 0 + + try: + cli.main(args=args or (), + prog_name=self.get_default_prog_name(cli), **extra) + except SystemExit as e: + if e.code != 0: + exception = e + + exc_info = sys.exc_info() + + exit_code = e.code + if not isinstance(exit_code, int): + sys.stdout.write(str(exit_code)) + sys.stdout.write('\n') + exit_code = 1 + except Exception as e: + if not catch_exceptions: + raise + exception = e + exit_code = -1 + exc_info = sys.exc_info() + finally: + sys.stdout.flush() + output = out.getvalue() + + return Result(runner=self, + output_bytes=output, + exit_code=exit_code, + exception=exception, + exc_info=exc_info) + + @contextlib.contextmanager + def isolated_filesystem(self): + """A context manager that creates a temporary folder and changes + the current working directory to it for isolated filesystem tests. + """ + cwd = os.getcwd() + t = tempfile.mkdtemp() + os.chdir(t) + try: + yield t + finally: + os.chdir(cwd) + try: + shutil.rmtree(t) + except (OSError, IOError): + pass diff --git a/app/lib/click/types.py b/app/lib/click/types.py new file mode 100644 index 0000000..3639002 --- /dev/null +++ b/app/lib/click/types.py @@ -0,0 +1,550 @@ +import os +import stat + +from ._compat import open_stream, text_type, filename_to_ui, \ + get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2 +from .exceptions import BadParameter +from .utils import safecall, LazyFile + + +class ParamType(object): + """Helper for converting values through types. The following is + necessary for a valid type: + + * it needs a name + * it needs to pass through None unchanged + * it needs to convert from a string + * it needs to convert its result type through unchanged + (eg: needs to be idempotent) + * it needs to be able to deal with param and context being `None`. + This can be the case when the object is used with prompt + inputs. + """ + is_composite = False + + #: the descriptive name of this type + name = None + + #: if a list of this type is expected and the value is pulled from a + #: string environment variable, this is what splits it up. `None` + #: means any whitespace. For all parameters the general rule is that + #: whitespace splits them up. The exception are paths and files which + #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on + #: Windows). + envvar_list_splitter = None + + def __call__(self, value, param=None, ctx=None): + if value is not None: + return self.convert(value, param, ctx) + + def get_metavar(self, param): + """Returns the metavar default for this param if it provides one.""" + + def get_missing_message(self, param): + """Optionally might return extra information about a missing + parameter. + + .. versionadded:: 2.0 + """ + + def convert(self, value, param, ctx): + """Converts the value. This is not invoked for values that are + `None` (the missing value). + """ + return value + + def split_envvar_value(self, rv): + """Given a value from an environment variable this splits it up + into small chunks depending on the defined envvar list splitter. + + If the splitter is set to `None`, which means that whitespace splits, + then leading and trailing whitespace is ignored. Otherwise, leading + and trailing splitters usually lead to empty items being included. + """ + return (rv or '').split(self.envvar_list_splitter) + + def fail(self, message, param=None, ctx=None): + """Helper method to fail with an invalid value message.""" + raise BadParameter(message, ctx=ctx, param=param) + + +class CompositeParamType(ParamType): + is_composite = True + + @property + def arity(self): + raise NotImplementedError() + + +class FuncParamType(ParamType): + + def __init__(self, func): + self.name = func.__name__ + self.func = func + + def convert(self, value, param, ctx): + try: + return self.func(value) + except ValueError: + try: + value = text_type(value) + except UnicodeError: + value = str(value).decode('utf-8', 'replace') + self.fail(value, param, ctx) + + +class UnprocessedParamType(ParamType): + name = 'text' + + def convert(self, value, param, ctx): + return value + + def __repr__(self): + return 'UNPROCESSED' + + +class StringParamType(ParamType): + name = 'text' + + def convert(self, value, param, ctx): + if isinstance(value, bytes): + enc = _get_argv_encoding() + try: + value = value.decode(enc) + except UnicodeError: + fs_enc = get_filesystem_encoding() + if fs_enc != enc: + try: + value = value.decode(fs_enc) + except UnicodeError: + value = value.decode('utf-8', 'replace') + return value + return value + + def __repr__(self): + return 'STRING' + + +class Choice(ParamType): + """The choice type allows a value to be checked against a fixed set of + supported values. All of these values have to be strings. + + See :ref:`choice-opts` for an example. + """ + name = 'choice' + + def __init__(self, choices): + self.choices = choices + + def get_metavar(self, param): + return '[%s]' % '|'.join(self.choices) + + def get_missing_message(self, param): + return 'Choose from %s.' % ', '.join(self.choices) + + def convert(self, value, param, ctx): + # Exact match + if value in self.choices: + return value + + # Match through normalization + if ctx is not None and \ + ctx.token_normalize_func is not None: + value = ctx.token_normalize_func(value) + for choice in self.choices: + if ctx.token_normalize_func(choice) == value: + return choice + + self.fail('invalid choice: %s. (choose from %s)' % + (value, ', '.join(self.choices)), param, ctx) + + def __repr__(self): + return 'Choice(%r)' % list(self.choices) + + +class IntParamType(ParamType): + name = 'integer' + + def convert(self, value, param, ctx): + try: + return int(value) + except (ValueError, UnicodeError): + self.fail('%s is not a valid integer' % value, param, ctx) + + def __repr__(self): + return 'INT' + + +class IntRange(IntParamType): + """A parameter that works similar to :data:`click.INT` but restricts + the value to fit into a range. The default behavior is to fail if the + value falls outside the range, but it can also be silently clamped + between the two edges. + + See :ref:`ranges` for an example. + """ + name = 'integer range' + + def __init__(self, min=None, max=None, clamp=False): + self.min = min + self.max = max + self.clamp = clamp + + def convert(self, value, param, ctx): + rv = IntParamType.convert(self, value, param, ctx) + if self.clamp: + if self.min is not None and rv < self.min: + return self.min + if self.max is not None and rv > self.max: + return self.max + if self.min is not None and rv < self.min or \ + self.max is not None and rv > self.max: + if self.min is None: + self.fail('%s is bigger than the maximum valid value ' + '%s.' % (rv, self.max), param, ctx) + elif self.max is None: + self.fail('%s is smaller than the minimum valid value ' + '%s.' % (rv, self.min), param, ctx) + else: + self.fail('%s is not in the valid range of %s to %s.' + % (rv, self.min, self.max), param, ctx) + return rv + + def __repr__(self): + return 'IntRange(%r, %r)' % (self.min, self.max) + + +class BoolParamType(ParamType): + name = 'boolean' + + def convert(self, value, param, ctx): + if isinstance(value, bool): + return bool(value) + value = value.lower() + if value in ('true', '1', 'yes', 'y'): + return True + elif value in ('false', '0', 'no', 'n'): + return False + self.fail('%s is not a valid boolean' % value, param, ctx) + + def __repr__(self): + return 'BOOL' + + +class FloatParamType(ParamType): + name = 'float' + + def convert(self, value, param, ctx): + try: + return float(value) + except (UnicodeError, ValueError): + self.fail('%s is not a valid floating point value' % + value, param, ctx) + + def __repr__(self): + return 'FLOAT' + + +class UUIDParameterType(ParamType): + name = 'uuid' + + def convert(self, value, param, ctx): + import uuid + try: + if PY2 and isinstance(value, text_type): + value = value.encode('ascii') + return uuid.UUID(value) + except (UnicodeError, ValueError): + self.fail('%s is not a valid UUID value' % value, param, ctx) + + def __repr__(self): + return 'UUID' + + +class File(ParamType): + """Declares a parameter to be a file for reading or writing. The file + is automatically closed once the context tears down (after the command + finished working). + + Files can be opened for reading or writing. The special value ``-`` + indicates stdin or stdout depending on the mode. + + By default, the file is opened for reading text data, but it can also be + opened in binary mode or for writing. The encoding parameter can be used + to force a specific encoding. + + The `lazy` flag controls if the file should be opened immediately or + upon first IO. The default is to be non lazy for standard input and + output streams as well as files opened for reading, lazy otherwise. + + Starting with Click 2.0, files can also be opened atomically in which + case all writes go into a separate file in the same folder and upon + completion the file will be moved over to the original location. This + is useful if a file regularly read by other users is modified. + + See :ref:`file-args` for more information. + """ + name = 'filename' + envvar_list_splitter = os.path.pathsep + + def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, + atomic=False): + self.mode = mode + self.encoding = encoding + self.errors = errors + self.lazy = lazy + self.atomic = atomic + + def resolve_lazy_flag(self, value): + if self.lazy is not None: + return self.lazy + if value == '-': + return False + elif 'w' in self.mode: + return True + return False + + def convert(self, value, param, ctx): + try: + if hasattr(value, 'read') or hasattr(value, 'write'): + return value + + lazy = self.resolve_lazy_flag(value) + + if lazy: + f = LazyFile(value, self.mode, self.encoding, self.errors, + atomic=self.atomic) + if ctx is not None: + ctx.call_on_close(f.close_intelligently) + return f + + f, should_close = open_stream(value, self.mode, + self.encoding, self.errors, + atomic=self.atomic) + # If a context is provided, we automatically close the file + # at the end of the context execution (or flush out). If a + # context does not exist, it's the caller's responsibility to + # properly close the file. This for instance happens when the + # type is used with prompts. + if ctx is not None: + if should_close: + ctx.call_on_close(safecall(f.close)) + else: + ctx.call_on_close(safecall(f.flush)) + return f + except (IOError, OSError) as e: + self.fail('Could not open file: %s: %s' % ( + filename_to_ui(value), + get_streerror(e), + ), param, ctx) + + +class Path(ParamType): + """The path type is similar to the :class:`File` type but it performs + different checks. First of all, instead of returning an open file + handle it returns just the filename. Secondly, it can perform various + basic checks about what the file or directory should be. + + .. versionchanged:: 6.0 + `allow_dash` was added. + + :param exists: if set to true, the file or directory needs to exist for + this value to be valid. If this is not required and a + file does indeed not exist, then all further checks are + silently skipped. + :param file_okay: controls if a file is a possible value. + :param dir_okay: controls if a directory is a possible value. + :param writable: if true, a writable check is performed. + :param readable: if true, a readable check is performed. + :param resolve_path: if this is true, then the path is fully resolved + before the value is passed onwards. This means + that it's absolute and symlinks are resolved. + :param allow_dash: If this is set to `True`, a single dash to indicate + standard streams is permitted. + :param type: optionally a string type that should be used to + represent the path. The default is `None` which + means the return value will be either bytes or + unicode depending on what makes most sense given the + input data Click deals with. + """ + envvar_list_splitter = os.path.pathsep + + def __init__(self, exists=False, file_okay=True, dir_okay=True, + writable=False, readable=True, resolve_path=False, + allow_dash=False, path_type=None): + self.exists = exists + self.file_okay = file_okay + self.dir_okay = dir_okay + self.writable = writable + self.readable = readable + self.resolve_path = resolve_path + self.allow_dash = allow_dash + self.type = path_type + + if self.file_okay and not self.dir_okay: + self.name = 'file' + self.path_type = 'File' + if self.dir_okay and not self.file_okay: + self.name = 'directory' + self.path_type = 'Directory' + else: + self.name = 'path' + self.path_type = 'Path' + + def coerce_path_result(self, rv): + if self.type is not None and not isinstance(rv, self.type): + if self.type is text_type: + rv = rv.decode(get_filesystem_encoding()) + else: + rv = rv.encode(get_filesystem_encoding()) + return rv + + def convert(self, value, param, ctx): + rv = value + + is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') + + if not is_dash: + if self.resolve_path: + rv = os.path.realpath(rv) + + try: + st = os.stat(rv) + except OSError: + if not self.exists: + return self.coerce_path_result(rv) + self.fail('%s "%s" does not exist.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + + if not self.file_okay and stat.S_ISREG(st.st_mode): + self.fail('%s "%s" is a file.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + if not self.dir_okay and stat.S_ISDIR(st.st_mode): + self.fail('%s "%s" is a directory.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + if self.writable and not os.access(value, os.W_OK): + self.fail('%s "%s" is not writable.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + if self.readable and not os.access(value, os.R_OK): + self.fail('%s "%s" is not readable.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + + return self.coerce_path_result(rv) + + +class Tuple(CompositeParamType): + """The default behavior of Click is to apply a type on a value directly. + This works well in most cases, except for when `nargs` is set to a fixed + count and different types should be used for different items. In this + case the :class:`Tuple` type can be used. This type can only be used + if `nargs` is set to a fixed number. + + For more information see :ref:`tuple-type`. + + This can be selected by using a Python tuple literal as a type. + + :param types: a list of types that should be used for the tuple items. + """ + + def __init__(self, types): + self.types = [convert_type(ty) for ty in types] + + @property + def name(self): + return "<" + " ".join(ty.name for ty in self.types) + ">" + + @property + def arity(self): + return len(self.types) + + def convert(self, value, param, ctx): + if len(value) != len(self.types): + raise TypeError('It would appear that nargs is set to conflict ' + 'with the composite type arity.') + return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) + + +def convert_type(ty, default=None): + """Converts a callable or python ty into the most appropriate param + ty. + """ + guessed_type = False + if ty is None and default is not None: + if isinstance(default, tuple): + ty = tuple(map(type, default)) + else: + ty = type(default) + guessed_type = True + + if isinstance(ty, tuple): + return Tuple(ty) + if isinstance(ty, ParamType): + return ty + if ty is text_type or ty is str or ty is None: + return STRING + if ty is int: + return INT + # Booleans are only okay if not guessed. This is done because for + # flags the default value is actually a bit of a lie in that it + # indicates which of the flags is the one we want. See get_default() + # for more information. + if ty is bool and not guessed_type: + return BOOL + if ty is float: + return FLOAT + if guessed_type: + return STRING + + # Catch a common mistake + if __debug__: + try: + if issubclass(ty, ParamType): + raise AssertionError('Attempted to use an uninstantiated ' + 'parameter type (%s).' % ty) + except TypeError: + pass + return FuncParamType(ty) + + +#: A dummy parameter type that just does nothing. From a user's +#: perspective this appears to just be the same as `STRING` but internally +#: no string conversion takes place. This is necessary to achieve the +#: same bytes/unicode behavior on Python 2/3 in situations where you want +#: to not convert argument types. This is usually useful when working +#: with file paths as they can appear in bytes and unicode. +#: +#: For path related uses the :class:`Path` type is a better choice but +#: there are situations where an unprocessed type is useful which is why +#: it is is provided. +#: +#: .. versionadded:: 4.0 +UNPROCESSED = UnprocessedParamType() + +#: A unicode string parameter type which is the implicit default. This +#: can also be selected by using ``str`` as type. +STRING = StringParamType() + +#: An integer parameter. This can also be selected by using ``int`` as +#: type. +INT = IntParamType() + +#: A floating point value parameter. This can also be selected by using +#: ``float`` as type. +FLOAT = FloatParamType() + +#: A boolean parameter. This is the default for boolean flags. This can +#: also be selected by using ``bool`` as a type. +BOOL = BoolParamType() + +#: A UUID parameter. +UUID = UUIDParameterType() diff --git a/app/lib/click/utils.py b/app/lib/click/utils.py new file mode 100644 index 0000000..eee626d --- /dev/null +++ b/app/lib/click/utils.py @@ -0,0 +1,415 @@ +import os +import sys + +from .globals import resolve_color_default + +from ._compat import text_type, open_stream, get_filesystem_encoding, \ + get_streerror, string_types, PY2, binary_streams, text_streams, \ + filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \ + _default_text_stdout, _default_text_stderr, is_bytes, WIN + +if not PY2: + from ._compat import _find_binary_writer +elif WIN: + from ._winconsole import _get_windows_argv, \ + _hash_py_argv, _initial_argv_hash + + +echo_native_types = string_types + (bytes, bytearray) + + +def _posixify(name): + return '-'.join(name.split()).lower() + + +def safecall(func): + """Wraps a function so that it swallows exceptions.""" + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception: + pass + return wrapper + + +def make_str(value): + """Converts a value into a valid string.""" + if isinstance(value, bytes): + try: + return value.decode(get_filesystem_encoding()) + except UnicodeError: + return value.decode('utf-8', 'replace') + return text_type(value) + + +def make_default_short_help(help, max_length=45): + words = help.split() + total_length = 0 + result = [] + done = False + + for word in words: + if word[-1:] == '.': + done = True + new_length = result and 1 + len(word) or len(word) + if total_length + new_length > max_length: + result.append('...') + done = True + else: + if result: + result.append(' ') + result.append(word) + if done: + break + total_length += new_length + + return ''.join(result) + + +class LazyFile(object): + """A lazy file works like a regular file but it does not fully open + the file but it does perform some basic checks early to see if the + filename parameter does make sense. This is useful for safely opening + files for writing. + """ + + def __init__(self, filename, mode='r', encoding=None, errors='strict', + atomic=False): + self.name = filename + self.mode = mode + self.encoding = encoding + self.errors = errors + self.atomic = atomic + + if filename == '-': + self._f, self.should_close = open_stream(filename, mode, + encoding, errors) + else: + if 'r' in mode: + # Open and close the file in case we're opening it for + # reading so that we can catch at least some errors in + # some cases early. + open(filename, mode).close() + self._f = None + self.should_close = True + + def __getattr__(self, name): + return getattr(self.open(), name) + + def __repr__(self): + if self._f is not None: + return repr(self._f) + return '' % (self.name, self.mode) + + def open(self): + """Opens the file if it's not yet open. This call might fail with + a :exc:`FileError`. Not handling this error will produce an error + that Click shows. + """ + if self._f is not None: + return self._f + try: + rv, self.should_close = open_stream(self.name, self.mode, + self.encoding, + self.errors, + atomic=self.atomic) + except (IOError, OSError) as e: + from .exceptions import FileError + raise FileError(self.name, hint=get_streerror(e)) + self._f = rv + return rv + + def close(self): + """Closes the underlying file, no matter what.""" + if self._f is not None: + self._f.close() + + def close_intelligently(self): + """This function only closes the file if it was opened by the lazy + file wrapper. For instance this will never close stdin. + """ + if self.should_close: + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close_intelligently() + + def __iter__(self): + self.open() + return iter(self._f) + + +class KeepOpenFile(object): + + def __init__(self, file): + self._file = file + + def __getattr__(self, name): + return getattr(self._file, name) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + pass + + def __repr__(self): + return repr(self._file) + + def __iter__(self): + return iter(self._file) + + +def echo(message=None, file=None, nl=True, err=False, color=None): + """Prints a message plus a newline to the given file or stdout. On + first sight, this looks like the print function, but it has improved + support for handling Unicode and binary data that does not fail no + matter how badly configured the system is. + + Primarily it means that you can print binary data as well as Unicode + data on both 2.x and 3.x to the given file in the most appropriate way + possible. This is a very carefree function as in that it will try its + best to not fail. As of Click 6.0 this includes support for unicode + output on the Windows console. + + In addition to that, if `colorama`_ is installed, the echo function will + also support clever handling of ANSI codes. Essentially it will then + do the following: + + - add transparent handling of ANSI color codes on Windows. + - hide ANSI codes automatically if the destination file is not a + terminal. + + .. _colorama: http://pypi.python.org/pypi/colorama + + .. versionchanged:: 6.0 + As of Click 6.0 the echo function will properly support unicode + output on the windows console. Not that click does not modify + the interpreter in any way which means that `sys.stdout` or the + print statement or function will still not provide unicode support. + + .. versionchanged:: 2.0 + Starting with version 2.0 of Click, the echo function will work + with colorama if it's installed. + + .. versionadded:: 3.0 + The `err` parameter was added. + + .. versionchanged:: 4.0 + Added the `color` flag. + + :param message: the message to print + :param file: the file to write to (defaults to ``stdout``) + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``. This is faster and easier than calling + :func:`get_text_stderr` yourself. + :param nl: if set to `True` (the default) a newline is printed afterwards. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. + """ + if file is None: + if err: + file = _default_text_stderr() + else: + file = _default_text_stdout() + + # Convert non bytes/text into the native string type. + if message is not None and not isinstance(message, echo_native_types): + message = text_type(message) + + if nl: + message = message or u'' + if isinstance(message, text_type): + message += u'\n' + else: + message += b'\n' + + # If there is a message, and we're in Python 3, and the value looks + # like bytes, we manually need to find the binary stream and write the + # message in there. This is done separately so that most stream + # types will work as you would expect. Eg: you can write to StringIO + # for other cases. + if message and not PY2 and is_bytes(message): + binary_file = _find_binary_writer(file) + if binary_file is not None: + file.flush() + binary_file.write(message) + binary_file.flush() + return + + # ANSI-style support. If there is no message or we are dealing with + # bytes nothing is happening. If we are connected to a file we want + # to strip colors. If we are on windows we either wrap the stream + # to strip the color or we use the colorama support to translate the + # ansi codes to API calls. + if message and not is_bytes(message): + color = resolve_color_default(color) + if should_strip_ansi(file, color): + message = strip_ansi(message) + elif WIN: + if auto_wrap_for_ansi is not None: + file = auto_wrap_for_ansi(file) + elif not color: + message = strip_ansi(message) + + if message: + file.write(message) + file.flush() + + +def get_binary_stream(name): + """Returns a system stream for byte processing. This essentially + returns the stream from the sys module with the given name but it + solves some compatibility issues between different Python versions. + Primarily this function is necessary for getting binary streams on + Python 3. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + """ + opener = binary_streams.get(name) + if opener is None: + raise TypeError('Unknown standard stream %r' % name) + return opener() + + +def get_text_stream(name, encoding=None, errors='strict'): + """Returns a system stream for text processing. This usually returns + a wrapped stream around a binary stream returned from + :func:`get_binary_stream` but it also can take shortcuts on Python 3 + for already correctly configured streams. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + :param encoding: overrides the detected default encoding. + :param errors: overrides the default error mode. + """ + opener = text_streams.get(name) + if opener is None: + raise TypeError('Unknown standard stream %r' % name) + return opener(encoding, errors) + + +def open_file(filename, mode='r', encoding=None, errors='strict', + lazy=False, atomic=False): + """This is similar to how the :class:`File` works but for manual + usage. Files are opened non lazy by default. This can open regular + files as well as stdin/stdout if ``'-'`` is passed. + + If stdin/stdout is returned the stream is wrapped so that the context + manager will not close the stream accidentally. This makes it possible + to always use the function like this without having to worry to + accidentally close a standard stream:: + + with open_file(filename) as f: + ... + + .. versionadded:: 3.0 + + :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). + :param mode: the mode in which to open the file. + :param encoding: the encoding to use. + :param errors: the error handling for this file. + :param lazy: can be flipped to true to open the file lazily. + :param atomic: in atomic mode writes go into a temporary file and it's + moved on close. + """ + if lazy: + return LazyFile(filename, mode, encoding, errors, atomic=atomic) + f, should_close = open_stream(filename, mode, encoding, errors, + atomic=atomic) + if not should_close: + f = KeepOpenFile(f) + return f + + +def get_os_args(): + """This returns the argument part of sys.argv in the most appropriate + form for processing. What this means is that this return value is in + a format that works for Click to process but does not necessarily + correspond well to what's actually standard for the interpreter. + + On most environments the return value is ``sys.argv[:1]`` unchanged. + However if you are on Windows and running Python 2 the return value + will actually be a list of unicode strings instead because the + default behavior on that platform otherwise will not be able to + carry all possible values that sys.argv can have. + + .. versionadded:: 6.0 + """ + # We can only extract the unicode argv if sys.argv has not been + # changed since the startup of the application. + if PY2 and WIN and _initial_argv_hash == _hash_py_argv(): + return _get_windows_argv() + return sys.argv[1:] + + +def format_filename(filename, shorten=False): + """Formats a filename for user display. The main purpose of this + function is to ensure that the filename can be displayed at all. This + will decode the filename to unicode if necessary in a way that it will + not fail. Optionally, it can shorten the filename to not include the + full path to the filename. + + :param filename: formats a filename for UI display. This will also convert + the filename into unicode without failing. + :param shorten: this optionally shortens the filename to strip of the + path that leads up to it. + """ + if shorten: + filename = os.path.basename(filename) + return filename_to_ui(filename) + + +def get_app_dir(app_name, roaming=True, force_posix=False): + r"""Returns the config folder for the application. The default behavior + is to return whatever is most appropriate for the operating system. + + To give you an idea, for an app called ``"Foo Bar"``, something like + the following folders could be returned: + + Mac OS X: + ``~/Library/Application Support/Foo Bar`` + Mac OS X (POSIX): + ``~/.foo-bar`` + Unix: + ``~/.config/foo-bar`` + Unix (POSIX): + ``~/.foo-bar`` + Win XP (roaming): + ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` + Win XP (not roaming): + ``C:\Documents and Settings\\Application Data\Foo Bar`` + Win 7 (roaming): + ``C:\Users\\AppData\Roaming\Foo Bar`` + Win 7 (not roaming): + ``C:\Users\\AppData\Local\Foo Bar`` + + .. versionadded:: 2.0 + + :param app_name: the application name. This should be properly capitalized + and can contain whitespace. + :param roaming: controls if the folder should be roaming or not on Windows. + Has no affect otherwise. + :param force_posix: if this is set to `True` then on any POSIX system the + folder will be stored in the home folder with a leading + dot instead of the XDG config home or darwin's + application support folder. + """ + if WIN: + key = roaming and 'APPDATA' or 'LOCALAPPDATA' + folder = os.environ.get(key) + if folder is None: + folder = os.path.expanduser('~') + return os.path.join(folder, app_name) + if force_posix: + return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) + if sys.platform == 'darwin': + return os.path.join(os.path.expanduser( + '~/Library/Application Support'), app_name) + return os.path.join( + os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), + _posixify(app_name)) diff --git a/app/lib/dateutil/__init__.py b/app/lib/dateutil/__init__.py new file mode 100644 index 0000000..ba89aa7 --- /dev/null +++ b/app/lib/dateutil/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +__version__ = "2.6.0" diff --git a/app/lib/dateutil/_common.py b/app/lib/dateutil/_common.py new file mode 100644 index 0000000..cd2a338 --- /dev/null +++ b/app/lib/dateutil/_common.py @@ -0,0 +1,33 @@ +""" +Common code used in multiple modules. +""" + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + __hash__ = None + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) diff --git a/app/lib/dateutil/easter.py b/app/lib/dateutil/easter.py new file mode 100644 index 0000000..e4def97 --- /dev/null +++ b/app/lib/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + http://users.chariot.net.au/~gmarts/eastalg.htm + + and + + http://www.tondering.dk/claus/calendar.html + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/app/lib/dateutil/parser.py b/app/lib/dateutil/parser.py new file mode 100644 index 0000000..147b3f2 --- /dev/null +++ b/app/lib/dateutil/parser.py @@ -0,0 +1,1360 @@ +# -*- coding:iso-8859-1 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import string +import time +import collections +import re +from io import StringIO +from calendar import monthrange, isleap + +from six import text_type, binary_type, integer_types + +from . import relativedelta +from . import tz + +__all__ = ["parse", "parserinfo"] + + +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([\.,])") + + def __init__(self, instream): + if isinstance(instream, binary_type): + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + + if getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), + ("Wed", "Wednesday"), + ("Thu", "Thursday"), + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + if len(name) >= 3: + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + if len(name) >= 3: + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + if year < 100 and not century_specified: + year += self._century + if abs(year - self._year) >= 50: + if year < self._year: + year += 100 + else: + year -= 100 + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, tzstr, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.tzstr = tzstr + + @staticmethod + def token_could_be_year(token, year): + try: + return int(token) == year + except ValueError: + return False + + @staticmethod + def find_potential_year_tokens(year, tokens): + return [token for token in tokens if _ymd.token_could_be_year(token, year)] + + def find_probable_year_index(self, tokens): + """ + attempt to deduce if a pre 100 year was lost + due to padded zeros being taken off + """ + for index, token in enumerate(self): + potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens) + if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2: + return index + + def append(self, val): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + elif val > 100: + self.century_specified = True + + super(self.__class__, self).append(int(val)) + + def resolve_ymd(self, mstridx, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): + # One member, or two members with a month string + if mstridx != -1: + month = self[mstridx] + del self[mstridx] + + if len_ymd > 1 or mstridx == -1: + if self[0] > 31: + year = self[0] + else: + day = self[0] + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if self[0] > 31 or \ + self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \ + (yearfirst and self[1] <= 12 and self[2] <= 31): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in minutes or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param **kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + effective_dt = datetime.datetime.now() + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + else: + effective_dt = default + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ValueError("Unknown string format") + + if len(res) == 0: + raise ValueError("String does not contain a date.") + + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back to + # the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + ret = default.replace(**repl) + + if res.weekday is not None and not res.day: + ret = ret+relativedelta.relativedelta(weekday=res.weekday) + + if not ignoretz: + if (isinstance(tzinfos, collections.Callable) or + tzinfos and res.tzname in tzinfos): + + if isinstance(tzinfos, collections.Callable): + tzdata = tzinfos(res.tzname, res.tzoffset) + else: + tzdata = tzinfos.get(res.tzname) + + if isinstance(tzdata, datetime.tzinfo): + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(res.tzname, tzdata) + else: + raise ValueError("Offset must be tzinfo subclass, " + "tz string, or int offset.") + ret = ret.replace(tzinfo=tzinfo) + elif res.tzname and res.tzname in time.tzname: + ret = ret.replace(tzinfo=tz.tzlocal()) + elif res.tzoffset == 0: + ret = ret.replace(tzinfo=tz.tzutc()) + elif res.tzoffset: + ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + # keep up with the last token skipped so we can recombine + # consecutively skipped tokens (-2 for when i begins at 0). + last_skipped_token_i = -2 + skipped_tokens = list() + + try: + # year/month/day list + ymd = _ymd(timestr) + + # Index of the month string in ymd + mstridx = -1 + + len_l = len(l) + i = 0 + while i < len_l: + + # Check if it's a number + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Token is a number + len_li = len(l[i]) + i += 1 + + if (len(ymd) == 3 and len_li in (2, 4) + and res.hour is None and (i >= len_l or (l[i] != ':' and + info.hms(l[i]) is None))): + # 19990101T23[59] + s = l[i-1] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = l[i-1] + + if not ymd and l[i-1].find('.') == -1: + #ymd.append(info.convertyear(int(s[:2]))) + + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = _parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = l[i-1] + ymd.append(s[:4]) + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif ((i < len_l and info.hms(l[i]) is not None) or + (i+1 < len_l and l[i] == ' ' and + info.hms(l[i+1]) is not None)): + + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + if l[i] == ' ': + i += 1 + + idx = info.hms(l[i]) + + while True: + if idx == 0: + res.hour = int(value) + + if value % 1: + res.minute = int(60*(value % 1)) + + elif idx == 1: + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + + i += 1 + + if i >= len_l or idx == 2: + break + + # 12h00 + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + break + else: + i += 1 + idx += 1 + + if i < len_l: + newidx = info.hms(l[i]) + + if newidx is not None: + idx = newidx + + elif (i == len_l and l[i-2] == ' ' and + info.hms(l[i-3]) is not None): + # X h MM or X m SS + idx = info.hms(l[i-3]) + 1 + + if idx == 1: + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + i += 1 + + elif i+1 < len_l and l[i] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + i += 1 + value = float(l[i]) + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + + i += 1 + + if i < len_l and l[i] == ':': + res.second, res.microsecond = _parsems(l[i+1]) + i += 2 + + elif i < len_l and l[i] in ('-', '/', '.'): + sep = l[i] + ymd.append(value_repr) + i += 1 + + if i < len_l and not info.jump(l[i]): + try: + # 01-01[-01] + ymd.append(l[i]) + except ValueError: + # 01-Jan[-01] + value = info.month(l[i]) + + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + else: + return None, None + + i += 1 + + if i < len_l and l[i] == sep: + # We have three members + i += 1 + value = info.month(l[i]) + + if value is not None: + ymd.append(value) + mstridx = len(ymd)-1 + assert mstridx == -1 + else: + ymd.append(l[i]) + + i += 1 + elif i >= len_l or info.jump(l[i]): + if i+1 < len_l and info.ampm(l[i+1]) is not None: + # 12 am + res.hour = int(value) + + if res.hour < 12 and info.ampm(l[i+1]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i+1]) == 0: + res.hour = 0 + + i += 1 + else: + # Year, month or day + ymd.append(value) + i += 1 + elif info.ampm(l[i]) is not None: + + # 12am + res.hour = int(value) + + if res.hour < 12 and info.ampm(l[i]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i]) == 0: + res.hour = 0 + i += 1 + + elif not fuzzy: + return None, None + else: + i += 1 + continue + + # Check weekday + value = info.weekday(l[i]) + if value is not None: + res.weekday = value + i += 1 + continue + + # Check month name + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + + i += 1 + if i < len_l: + if l[i] in ('-', '/'): + # Jan-01[-99] + sep = l[i] + i += 1 + ymd.append(l[i]) + i += 1 + + if i < len_l and l[i] == sep: + # Jan-01-99 + i += 1 + ymd.append(l[i]) + i += 1 + + elif (i+3 < len_l and l[i] == l[i+2] == ' ' + and info.pertain(l[i+1])): + # Jan of 01 + # In this case, 01 is clearly year + try: + value = int(l[i+3]) + except ValueError: + # Wrong guess + pass + else: + # Convert it here to become unambiguous + ymd.append(str(info.convertyear(value))) + i += 4 + continue + + # Check am/pm + value = info.ampm(l[i]) + if value is not None: + # For fuzzy parsing, 'a' or 'am' (both valid English words) + # may erroneously trigger the AM/PM flag. Deal with that + # here. + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and res.ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if res.hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with ' + + 'AM or PM flag.') + elif not 0 <= res.hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for ' + + '12-hour clock.') + + if val_is_ampm: + if value == 1 and res.hour < 12: + res.hour += 12 + elif value == 0 and res.hour == 12: + res.hour = 0 + + res.ampm = value + + i += 1 + continue + + # Check for a timezone name + if (res.hour is not None and len(l[i]) <= 5 and + res.tzname is None and res.tzoffset is None and + not [x for x in l[i] if x not in + string.ascii_uppercase]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + i += 1 + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i < len_l and l[i] in ('+', '-'): + l[i] = ('+', '-')[l[i] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + continue + + # Check for a numbered timezone + if res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + i += 1 + len_li = len(l[i]) + + if len_li == 4: + # -0300 + res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + res.tzoffset = int(l[i])*3600+int(l[i+2])*60 + i += 2 + elif len_li <= 2: + # -[0]3 + res.tzoffset = int(l[i][:2])*3600 + else: + return None, None + i += 1 + + res.tzoffset *= signal + + # Look for a timezone name between parenthesis + if (i+3 < len_l and + info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and + 3 <= len(l[i+2]) <= 5 and + not [x for x in l[i+2] + if x not in string.ascii_uppercase]): + # -0300 (BRST) + res.tzname = l[i+2] + i += 4 + continue + + # Check jumps + if not (info.jump(l[i]) or fuzzy): + return None, None + + if last_skipped_token_i == i - 1: + # recombine the tokens + skipped_tokens[-1] += l[i] + else: + # just append + skipped_tokens.append(l[i]) + last_skipped_token_i = i + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst) + if year is not None: + res.year = year + res.century_specified = ymd.century_specified + + if month is not None: + res.month = month + + if day is not None: + res.day = day + + except (IndexError, ValueError, AssertionError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + return res, tuple(skipped_tokens) + else: + return res, None + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in minutes or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2011, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = _timelex.split(tzstr) + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2])*3600 + + int(l[i][2:])*60)*signal) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i])*3600+int(l[i+2])*60)*signal) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2])*3600*signal) + else: + return None + i += 1 + if res.dstabbr: + break + else: + break + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + i += 2 + if l[i] == '-': + value = int(l[i+1])*-1 + i += 1 + else: + value = int(l[i]) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i])-1) % 7 + else: + x.day = int(l[i]) + i += 2 + x.time = int(l[i]) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + i += 1 + else: + signal = 1 + res.dstoffset = (res.stdoffset+int(l[i]))*signal + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + i += 1 + x.month = int(l[i]) + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.weekday = (int(l[i])-1) % 7 + else: + # year day (zero based) + x.yday = int(l[i])+1 + + i += 1 + + if i < len_l and l[i] == '/': + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + x.time = int(l[i])*3600+int(l[i+2])*60 + i += 2 + if i+1 < len_l and l[i+1] == ':': + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2])*3600) + else: + return None + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +def _parsems(value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + +# vim:ts=4:sw=4:et diff --git a/app/lib/dateutil/relativedelta.py b/app/lib/dateutil/relativedelta.py new file mode 100644 index 0000000..7e3bd12 --- /dev/null +++ b/app/lib/dateutil/relativedelta.py @@ -0,0 +1,531 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is based on the specification of the excellent + work done by M.-A. Lemburg in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an aritmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding aritmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc). These instances may + receive a parameter N, specifying the Nth weekday, which could + be positive or negative (like MO(+1) or MO(-2). Not specifying + it is the same as specifying +1. You can also use an integer, + where 0=MO. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + Here is the behavior of operations with relativedelta: + + 1. Calculate the absolute year, using the 'year' argument, or the + original datetime year, if the argument is not present. + + 2. Add the relative 'years' argument to the absolute year. + + 3. Do steps 1 and 2 for month/months. + + 4. Calculate the absolute day, using the 'day' argument, or the + original datetime day, if the argument is not present. Then, + subtract from the day until it fits in the year and month + found after their operations. + + 5. Add the relative 'days' argument to the absolute day. Notice + that the 'weeks' argument is multiplied by 7 and added to + 'days'. + + 6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, + microsecond/microseconds. + + 7. If the 'weekday' argument is present, calculate the weekday, + with the given (wday, nth) tuple. wday is the index of the + weekday (0-6, 0=Mon), and nth is the number of weeks to add + forward or backward, depending on its signal. Notice that if + the calculated date is already Monday, for example, using + (0, 1) or (0, -1) won't change the day. + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Relative information + self.years = years + self.months = months + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return self.days // 7 + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=1, hours=14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=other.year or self.year, + month=other.month or self.month, + day=other.day or self.day, + weekday=other.weekday or self.weekday, + hour=other.hour or self.hour, + minute=other.minute or self.minute, + second=other.second or self.second, + microsecond=(other.microsecond or + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=self.year or other.year, + month=self.month or other.month, + day=self.day or other.day, + weekday=self.weekday or other.weekday, + hour=self.hour or other.hour, + minute=self.minute or other.minute, + second=self.second or other.second, + microsecond=self.microsecond or other.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + __hash__ = None + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/app/lib/dateutil/rrule.py b/app/lib/dateutil/rrule.py new file mode 100644 index 0000000..da94351 --- /dev/null +++ b/app/lib/dateutil/rrule.py @@ -0,0 +1,1607 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread, range +import heapq + +from ._common import weekday as weekdaybase + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + yield d + + if count is not None: + n += 1 + if n >= count: + break + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + How many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10. + :param until: + If given, this must be a datetime instance, that will specify the + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if count and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 2445" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted([x for x in bymonthday if x > 0])) + self._bynmonthday = tuple(sorted([x for x in bymonthday if x < 0])) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = tuple() + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = tuple() + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = set((dtstart.hour,)) + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = set((dtstart.minute,)) + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC2445, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC2445-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append(';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + +class _rrulestr(object): + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + exdatevals.append(value) + elif name == "DTSTART": + for parm in parms: + raise ValueError("unsupported DTSTART parm: "+parm) + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + rset.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/app/lib/dateutil/tz/__init__.py b/app/lib/dateutil/tz/__init__.py new file mode 100644 index 0000000..1cba7b9 --- /dev/null +++ b/app/lib/dateutil/tz/__init__.py @@ -0,0 +1,4 @@ +from .tz import * + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] diff --git a/app/lib/dateutil/tz/_common.py b/app/lib/dateutil/tz/_common.py new file mode 100644 index 0000000..212e8ce --- /dev/null +++ b/app/lib/dateutil/tz/_common.py @@ -0,0 +1,380 @@ +from six import PY3 +from six.moves import _thread + +from datetime import datetime, timedelta, tzinfo +import copy + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None and not PY3: + name = name.encode() + + return name + + return adjust_encoding + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + ..versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + ..versionadded:: 2.6.0 + """ + __slots__ = () + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + ..versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + ..versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.dateime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + if delta: + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurance, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.dateime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + ..versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +def _total_seconds(td): + # Python 2.6 doesn't have a total_seconds() method on timedelta objects + return ((td.seconds + td.days * 86400) * 1000000 + + td.microseconds) // 1000000 + +_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds) diff --git a/app/lib/dateutil/tz/tz.py b/app/lib/dateutil/tz/tz.py new file mode 100644 index 0000000..6bee291 --- /dev/null +++ b/app/lib/dateutil/tz/tz.py @@ -0,0 +1,1464 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:`datetime.tzinfo` type. There are classes to handle tzfile format files +(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ +environment string (in all known formats), given ranges (with help from +relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect +import copy + +from operator import itemgetter + +from contextlib import contextmanager + +from six import string_types, PY3 +from ._common import tzname_in_python2, _tzinfo, _total_seconds +from ._common import tzrangebase, enfold + +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object. + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = _total_seconds(offset) + except (TypeError, AttributeError): + pass + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(_total_seconds(self._offset))) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if not isinstance(other, tzlocal): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + `_ for more information. Time zone + files can be compiled from the `IANA Time Zone database files + `_ with the `zic time zone compiler + `_ + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _ContextWrapper(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but read anyway for correct file position) + if leapcnt: + leap = struct.unpack(">%dl" % (leapcnt*2), + fileobj.read(leapcnt*8)) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = 60 * ((gmtoff + 30) // 60) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = None + for i, tti in enumerate(out.trans_idx): + if not tti.isdst: + offset = tti.offset + laststdoffset = offset + else: + if laststdoffset is not None: + # Store the DST offset as well and update it in the list + tti.dstoffset = tti.offset - laststdoffset + out.trans_idx[i] = tti + + offset = laststdoffset or 0 + + out.trans_list[i] += offset + + # In case we missed any DST offsets on the way in for some reason, make + # a second pass over the list, looking for the /next/ DST offset. + laststdoffset = None + for i in reversed(range(len(out.trans_idx))): + tti = out.trans_idx[i] + if tti.isdst: + if not (tti.dstoffset or laststdoffset is None): + tti.dstoffset = tti.offset - laststdoffset + else: + laststdoffset = tti.offset + + if not isinstance(tti.dstoffset, datetime.timedelta): + tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset) + + out.trans_idx[i] = tti + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + + return out + + def _find_last_transition(self, dt): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + idx = bisect.bisect_right(self._trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) == len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # Get the current datetime as a timestamp + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To specify, + for example, that DST starts at 2AM on the 2nd Sunday in March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent representing + the time and time of year that daylight savings time ends, with the + same specification method as in ``start``. One note is that this should + point to the first time in the *standard* zone, so if a transition + occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM, + set the `hours` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = _total_seconds(stdoffset) + except (TypeError, AttributeError): + pass + + try: + dstoffset = _total_seconds(dstoffset) + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`) + or a stream emitting unicode characters (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil import parser + + self._s = s + + res = parser._parsetz(s) + if res is None: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))] + except ValueError: + pass + + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + file_opened_here = True + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _ContextWrapper(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def gettz(name=None): + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[:-1] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except WindowsError: + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name must have at least one offset to be a tzstr + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in ``tz``. + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds + since January 1, 1970, ignoring the time zone. + """ + return _total_seconds((dt.replace(tzinfo=None) - EPOCH)) + +class _ContextWrapper(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/app/lib/dateutil/tz/win.py b/app/lib/dateutil/tz/win.py new file mode 100644 index 0000000..9f4e551 --- /dev/null +++ b/app/lib/dateutil/tz/win.py @@ -0,0 +1,332 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzname_in_python2, _tzinfo +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing `tzres.dll`, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + ..note: + Offsets found in the registry are generally of the form + `@tzres.dll,-114`. The offset in this case if 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + # multiple contexts only possible in 2.7 and 3.1, we still support 2.6 + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/app/lib/dateutil/tzwin.py b/app/lib/dateutil/tzwin.py new file mode 100644 index 0000000..55cd910 --- /dev/null +++ b/app/lib/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * \ No newline at end of file diff --git a/app/lib/dateutil/zoneinfo/__init__.py b/app/lib/dateutil/zoneinfo/__init__.py new file mode 100644 index 0000000..7145e05 --- /dev/null +++ b/app/lib/dateutil/zoneinfo/__init__.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +import logging +import os +import warnings +import tempfile +import shutil +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO +from contextlib import closing + +from dateutil.tz import tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + +# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but +# it's close enough for python2.6 +tar_open = TarFile.open +if not hasattr(TarFile, '__exit__'): + def tar_open(*args, **kwargs): + return closing(TarFile.open(*args, **kwargs)) + + +class tzfile(tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with tar_open(fileobj=zonefile_stream, mode='r') as tf: + # dict comprehension does not work on python2.6 + # TODO: get back to the nicer syntax when we ditch python2.6 + # self.zones = {zf.name: tzfile(tf.extractfile(zf), + # filename = zf.name) + # for zf in tf.getmembers() if zf.isfile()} + self.zones = dict((zf.name, tzfile(tf.extractfile(zf), + filename=zf.name)) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN) + # deal with links: They'll point to their parent object. Less + # waste of memory + # links = {zl.name: self.zones[zl.linkname] + # for zl in tf.getmembers() if zl.islnk() or zl.issym()} + links = dict((zl.name, self.zones[zl.linkname]) + for zl in tf.getmembers() if + zl.islnk() or zl.issym()) + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = dict() + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = list() + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata + + diff --git a/app/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/app/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz new file mode 100644 index 0000000..1d15597 Binary files /dev/null and b/app/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ diff --git a/app/lib/dateutil/zoneinfo/rebuild.py b/app/lib/dateutil/zoneinfo/rebuild.py new file mode 100644 index 0000000..a66c1d9 --- /dev/null +++ b/app/lib/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,51 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call + +from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ftp.iana.org/tz. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with tar_open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with tar_open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/app/lib/flask/__init__.py b/app/lib/flask/__init__.py new file mode 100644 index 0000000..2fcb356 --- /dev/null +++ b/app/lib/flask/__init__.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +""" + flask + ~~~~~ + + A microframework based on Werkzeug. It's extensively documented + and follows best practice patterns. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +__version__ = '0.12.1' + +# utilities we import from Werkzeug and Jinja2 that are unused +# in the module but are exported as public interface. +from werkzeug.exceptions import abort +from werkzeug.utils import redirect +from jinja2 import Markup, escape + +from .app import Flask, Request, Response +from .config import Config +from .helpers import url_for, flash, send_file, send_from_directory, \ + get_flashed_messages, get_template_attribute, make_response, safe_join, \ + stream_with_context +from .globals import current_app, g, request, session, _request_ctx_stack, \ + _app_ctx_stack +from .ctx import has_request_context, has_app_context, \ + after_this_request, copy_current_request_context +from .blueprints import Blueprint +from .templating import render_template, render_template_string + +# the signals +from .signals import signals_available, template_rendered, request_started, \ + request_finished, got_request_exception, request_tearing_down, \ + appcontext_tearing_down, appcontext_pushed, \ + appcontext_popped, message_flashed, before_render_template + +# We're not exposing the actual json module but a convenient wrapper around +# it. +from . import json + +# This was the only thing that Flask used to export at one point and it had +# a more generic name. +jsonify = json.jsonify + +# backwards compat, goes away in 1.0 +from .sessions import SecureCookieSession as Session +json_available = True diff --git a/app/lib/flask/__main__.py b/app/lib/flask/__main__.py new file mode 100644 index 0000000..cbefccd --- /dev/null +++ b/app/lib/flask/__main__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +""" + flask.__main__ + ~~~~~~~~~~~~~~ + + Alias for flask.run for the command line. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + + +if __name__ == '__main__': + from .cli import main + main(as_module=True) diff --git a/app/lib/flask/_compat.py b/app/lib/flask/_compat.py new file mode 100644 index 0000000..071628f --- /dev/null +++ b/app/lib/flask/_compat.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +""" + flask._compat + ~~~~~~~~~~~~~ + + Some py2/py3 compatibility support based on a stripped down + version of six so we don't have to depend on a specific version + of it. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import sys + +PY2 = sys.version_info[0] == 2 +_identity = lambda x: x + + +if not PY2: + text_type = str + string_types = (str,) + integer_types = (int,) + + iterkeys = lambda d: iter(d.keys()) + itervalues = lambda d: iter(d.values()) + iteritems = lambda d: iter(d.items()) + + from io import StringIO + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + implements_to_string = _identity + +else: + text_type = unicode + string_types = (str, unicode) + integer_types = (int, long) + + iterkeys = lambda d: d.iterkeys() + itervalues = lambda d: d.itervalues() + iteritems = lambda d: d.iteritems() + + from cStringIO import StringIO + + exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') + + def implements_to_string(cls): + cls.__unicode__ = cls.__str__ + cls.__str__ = lambda x: x.__unicode__().encode('utf-8') + return cls + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a + # dummy metaclass for one level of class instantiation that replaces + # itself with the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +# Certain versions of pypy have a bug where clearing the exception stack +# breaks the __exit__ function in a very peculiar way. The second level of +# exception blocks is necessary because pypy seems to forget to check if an +# exception happened until the next bytecode instruction? +# +# Relevant PyPy bugfix commit: +# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301 +# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later +# versions. +# +# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug. +BROKEN_PYPY_CTXMGR_EXIT = False +if hasattr(sys, 'pypy_version_info'): + class _Mgr(object): + def __enter__(self): + return self + def __exit__(self, *args): + if hasattr(sys, 'exc_clear'): + # Python 3 (PyPy3) doesn't have exc_clear + sys.exc_clear() + try: + try: + with _Mgr(): + raise AssertionError() + except: + raise + except TypeError: + BROKEN_PYPY_CTXMGR_EXIT = True + except AssertionError: + pass diff --git a/src/lib/flask/app.py b/app/lib/flask/app.py similarity index 80% rename from src/lib/flask/app.py rename to app/lib/flask/app.py index 98ecb10..1404e17 100644 --- a/src/lib/flask/app.py +++ b/app/lib/flask/app.py @@ -5,10 +5,9 @@ This module implements the central WSGI application object. - :copyright: (c) 2011 by Armin Ronacher. + :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ - import os import sys from threading import Lock @@ -19,17 +18,17 @@ from werkzeug.datastructures import ImmutableDict from werkzeug.routing import Map, Rule, RequestRedirect, BuildError from werkzeug.exceptions import HTTPException, InternalServerError, \ - MethodNotAllowed, BadRequest + MethodNotAllowed, BadRequest, default_exceptions from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \ - locked_cached_property, _endpoint_from_view_func, find_package -from . import json + locked_cached_property, _endpoint_from_view_func, find_package, \ + get_debug_flag +from . import json, cli from .wrappers import Request, Response from .config import ConfigAttribute, Config from .ctx import RequestContext, AppContext, _AppCtxGlobals from .globals import _request_ctx_stack, request, session, g from .sessions import SecureCookieSessionInterface -from .module import blueprint_is_module from .templating import DispatchingJinjaLoader, Environment, \ _default_template_ctx_processor from .signals import request_started, request_finished, got_request_exception, \ @@ -39,6 +38,9 @@ # a lock used for logger initialization _logger_lock = Lock() +# a singleton sentinel value for parameter defaults +_sentinel = object() + def _make_timedelta(value): if not isinstance(value, timedelta): @@ -72,21 +74,21 @@ class Flask(_PackageBoundObject): The name of the package is used to resolve resources from inside the package or the folder the module is contained in depending on if the package parameter resolves to an actual python package (a folder with - an `__init__.py` file inside) or a standard module (just a `.py` file). + an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). For more information about resource loading, see :func:`open_resource`. Usually you create a :class:`Flask` instance in your main module or - in the `__init__.py` file of your package like this:: + in the :file:`__init__.py` file of your package like this:: from flask import Flask app = Flask(__name__) .. admonition:: About the First Parameter - The idea of the first parameter is to give Flask an idea what + The idea of the first parameter is to give Flask an idea of what belongs to your application. This name is used to find resources - on the file system, can be used by extensions to improve debugging + on the filesystem, can be used by extensions to improve debugging information and a lot more. So it's important what you provide there. If you are using a single @@ -94,7 +96,7 @@ class Flask(_PackageBoundObject): using a package, it's usually recommended to hardcode the name of your package there. - For example if your application is defined in `yourapplication/app.py` + For example if your application is defined in :file:`yourapplication/app.py` you should create it with one of the two versions below:: app = Flask('yourapplication') @@ -118,6 +120,9 @@ class Flask(_PackageBoundObject): The `instance_path` and `instance_relative_config` parameters were added. + .. versionadded:: 0.11 + The `root_path` parameter was added. + :param import_name: the name of the application package :param static_url_path: can be used to specify a different path for the static files on the web. Defaults to the name @@ -133,10 +138,15 @@ class Flask(_PackageBoundObject): By default the folder ``'instance'`` next to the package or module is assumed to be the instance path. - :param instance_relative_config: if set to `True` relative filenames + :param instance_relative_config: if set to ``True`` relative filenames for loading the config are assumed to be relative to the instance path instead of the application root. + :param root_path: Flask by default will automatically calculate the path + to the root of the application. In certain situations + this cannot be achieved (for instance if the package + is a Python 3 namespace package) and needs to be + manually defined. """ #: The class that is used for request objects. See :class:`~flask.Request` @@ -147,18 +157,23 @@ class Flask(_PackageBoundObject): #: :class:`~flask.Response` for more information. response_class = Response + #: The class that is used for the Jinja environment. + #: + #: .. versionadded:: 0.11 + jinja_environment = Environment + #: The class that is used for the :data:`~flask.g` instance. #: #: Example use cases for a custom class: #: #: 1. Store arbitrary attributes on flask.g. #: 2. Add a property for lazy per-request database connectors. - #: 3. Return None instead of AttributeError on expected attributes. + #: 3. Return None instead of AttributeError on unexpected attributes. #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. #: #: In Flask 0.9 this property was called `request_globals_class` but it #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the - #: flask.g object is not application context scoped. + #: flask.g object is now application context scoped. #: #: .. versionadded:: 0.10 app_ctx_globals_class = _AppCtxGlobals @@ -175,16 +190,27 @@ def _set_request_globals_class(self, value): _set_request_globals_class) del _get_request_globals_class, _set_request_globals_class - #: The debug flag. Set this to `True` to enable debugging of the + #: The class that is used for the ``config`` attribute of this app. + #: Defaults to :class:`~flask.Config`. + #: + #: Example use cases for a custom class: + #: + #: 1. Default values for certain config options. + #: 2. Access to config values through attributes in addition to keys. + #: + #: .. versionadded:: 0.11 + config_class = Config + + #: The debug flag. Set this to ``True`` to enable debugging of the #: application. In debug mode the debugger will kick in when an unhandled #: exception occurs and the integrated server will automatically reload #: the application if changes in the code are detected. #: - #: This attribute can also be configured from the config with the `DEBUG` - #: configuration key. Defaults to `False`. + #: This attribute can also be configured from the config with the ``DEBUG`` + #: configuration key. Defaults to ``False``. debug = ConfigAttribute('DEBUG') - #: The testing flag. Set this to `True` to enable the test mode of + #: The testing flag. Set this to ``True`` to enable the test mode of #: Flask extensions (and in the future probably also Flask itself). #: For example this might activate unittest helpers that have an #: additional runtime cost which should not be enabled by default. @@ -193,7 +219,7 @@ def _set_request_globals_class(self, value): #: default it's implicitly enabled. #: #: This attribute can also be configured from the config with the - #: `TESTING` configuration key. Defaults to `False`. + #: ``TESTING`` configuration key. Defaults to ``False``. testing = ConfigAttribute('TESTING') #: If a secret key is set, cryptographic components can use this to @@ -201,13 +227,13 @@ def _set_request_globals_class(self, value): #: when you want to use the secure cookie for instance. #: #: This attribute can also be configured from the config with the - #: `SECRET_KEY` configuration key. Defaults to `None`. + #: ``SECRET_KEY`` configuration key. Defaults to ``None``. secret_key = ConfigAttribute('SECRET_KEY') #: The secure cookie uses this for the name of the session cookie. #: #: This attribute can also be configured from the config with the - #: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'`` + #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'`` session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME') #: A :class:`~datetime.timedelta` which is used to set the expiration @@ -215,11 +241,21 @@ def _set_request_globals_class(self, value): #: permanent session survive for roughly one month. #: #: This attribute can also be configured from the config with the - #: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to + #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to #: ``timedelta(days=31)`` permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME', get_converter=_make_timedelta) + #: A :class:`~datetime.timedelta` which is used as default cache_timeout + #: for the :func:`send_file` functions. The default is 12 hours. + #: + #: This attribute can also be configured from the config with the + #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration + #: variable can also be set with an integer value used as seconds. + #: Defaults to ``timedelta(hours=12)`` + send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT', + get_converter=_make_timedelta) + #: Enable this if you want to use the X-Sendfile feature. Keep in #: mind that the server has to support this. This only affects files #: sent with the :func:`send_file` method. @@ -227,7 +263,7 @@ def _set_request_globals_class(self, value): #: .. versionadded:: 0.2 #: #: This attribute can also be configured from the config with the - #: `USE_X_SENDFILE` configuration key. Defaults to `False`. + #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``. use_x_sendfile = ConfigAttribute('USE_X_SENDFILE') #: The name of the logger to use. By default the logger name is the @@ -236,23 +272,6 @@ def _set_request_globals_class(self, value): #: .. versionadded:: 0.4 logger_name = ConfigAttribute('LOGGER_NAME') - #: Enable the deprecated module support? This is active by default - #: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules - #: will be removed in favor of Blueprints - enable_modules = True - - #: The logging format used for the debug logger. This is only used when - #: the application is in debug mode, otherwise the attached logging - #: handler does the formatting. - #: - #: .. versionadded:: 0.3 - debug_log_format = ( - '-' * 80 + '\n' + - '%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' + - '%(message)s\n' + - '-' * 80 - ) - #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`. #: #: .. versionadded:: 0.10 @@ -270,7 +289,7 @@ def _set_request_globals_class(self, value): #: Default configuration parameters. default_config = ImmutableDict({ - 'DEBUG': False, + 'DEBUG': get_debug_flag(default=False), 'TESTING': False, 'PROPAGATE_EXCEPTIONS': None, 'PRESERVE_CONTEXT_ON_EXCEPTION': None, @@ -278,6 +297,7 @@ def _set_request_globals_class(self, value): 'PERMANENT_SESSION_LIFETIME': timedelta(days=31), 'USE_X_SENDFILE': False, 'LOGGER_NAME': None, + 'LOGGER_HANDLER_POLICY': 'always', 'SERVER_NAME': None, 'APPLICATION_ROOT': None, 'SESSION_COOKIE_NAME': 'session', @@ -285,14 +305,18 @@ def _set_request_globals_class(self, value): 'SESSION_COOKIE_PATH': None, 'SESSION_COOKIE_HTTPONLY': True, 'SESSION_COOKIE_SECURE': False, + 'SESSION_REFRESH_EACH_REQUEST': True, 'MAX_CONTENT_LENGTH': None, - 'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours + 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12), 'TRAP_BAD_REQUEST_ERRORS': False, 'TRAP_HTTP_EXCEPTIONS': False, + 'EXPLAIN_TEMPLATE_LOADING': False, 'PREFERRED_URL_SCHEME': 'http', 'JSON_AS_ASCII': True, 'JSON_SORT_KEYS': True, 'JSONIFY_PRETTYPRINT_REGULAR': True, + 'JSONIFY_MIMETYPE': 'application/json', + 'TEMPLATES_AUTO_RELOAD': None, }) #: The rule object to use for URL rules created. This is used by @@ -314,9 +338,11 @@ def _set_request_globals_class(self, value): def __init__(self, import_name, static_path=None, static_url_path=None, static_folder='static', template_folder='templates', - instance_path=None, instance_relative_config=False): + instance_path=None, instance_relative_config=False, + root_path=None): _PackageBoundObject.__init__(self, import_name, - template_folder=template_folder) + template_folder=template_folder, + root_path=root_path) if static_path is not None: from warnings import warn warn(DeprecationWarning('static_path is now called ' @@ -357,11 +383,11 @@ def __init__(self, import_name, static_path=None, static_url_path=None, # :attr:`error_handler_spec` shall be used now. self._error_handlers = {} - #: A dictionary of all registered error handlers. The key is `None` + #: A dictionary of all registered error handlers. The key is ``None`` #: for error handlers active on the application, otherwise the key is #: the name of the blueprint. Each key points to another dictionary - #: where they key is the status code of the http exception. The - #: special key `None` points to a list of tuples where the first item + #: where the key is the status code of the http exception. The + #: special key ``None`` points to a list of tuples where the first item #: is the class for the instance check and the second the error handler #: function. #: @@ -372,7 +398,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: A list of functions that are called when :meth:`url_for` raises a #: :exc:`~werkzeug.routing.BuildError`. Each function registered here #: is called with `error`, `endpoint` and `values`. If a function - #: returns `None` or raises a `BuildError` the next function is + #: returns ``None`` or raises a :exc:`BuildError` the next function is #: tried. #: #: .. versionadded:: 0.9 @@ -380,7 +406,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: A dictionary with lists of functions that should be called at the #: beginning of the request. The key of the dictionary is the name of - #: the blueprint this function is active for, `None` for all requests. + #: the blueprint this function is active for, ``None`` for all requests. #: This can for example be used to open database connections or #: getting hold of the currently logged in user. To register a #: function here, use the :meth:`before_request` decorator. @@ -395,16 +421,15 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: A dictionary with lists of functions that should be called after #: each request. The key of the dictionary is the name of the blueprint - #: this function is active for, `None` for all requests. This can for - #: example be used to open database connections or getting hold of the - #: currently logged in user. To register a function here, use the - #: :meth:`after_request` decorator. + #: this function is active for, ``None`` for all requests. This can for + #: example be used to close database connections. To register a function + #: here, use the :meth:`after_request` decorator. self.after_request_funcs = {} #: A dictionary with lists of functions that are called after #: each request, even if an exception has occurred. The key of the #: dictionary is the name of the blueprint this function is active for, - #: `None` for all requests. These functions are not allowed to modify + #: ``None`` for all requests. These functions are not allowed to modify #: the request, and their return values are ignored. If an exception #: occurred while processing the request, it gets passed to each #: teardown_request function. To register a function here, use the @@ -424,7 +449,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: A dictionary with lists of functions that can be used as URL #: value processor functions. Whenever a URL is built these functions #: are called to modify the dictionary of values in place. The key - #: `None` here is used for application wide + #: ``None`` here is used for application wide #: callbacks, otherwise the key is the name of the blueprint. #: Each of these functions has the chance to modify the dictionary #: @@ -432,7 +457,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None, self.url_value_preprocessors = {} #: A dictionary with lists of functions that can be used as URL value - #: preprocessors. The key `None` here is used for application wide + #: preprocessors. The key ``None`` here is used for application wide #: callbacks, otherwise the key is the name of the blueprint. #: Each of these functions has the chance to modify the dictionary #: of URL values before they are used as the keyword arguments of the @@ -445,7 +470,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: A dictionary with list of functions that are called without argument #: to populate the template context. The key of the dictionary is the - #: name of the blueprint this function is active for, `None` for all + #: name of the blueprint this function is active for, ``None`` for all #: requests. Each returns a dictionary that the template context is #: updated with. To register a function here, use the #: :meth:`context_processor` decorator. @@ -453,12 +478,19 @@ def __init__(self, import_name, static_path=None, static_url_path=None, None: [_default_template_ctx_processor] } + #: A list of shell context processor functions that should be run + #: when a shell context is created. + #: + #: .. versionadded:: 0.11 + self.shell_context_processors = [] + #: all the attached blueprints in a dictionary by name. Blueprints #: can be attached multiple times so this dictionary does not tell #: you how often they got attached. #: #: .. versionadded:: 0.7 self.blueprints = {} + self._blueprint_order = [] #: a place where extensions can store application specific state. For #: example this is where an extension could store database engines and @@ -469,8 +501,8 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: app.extensions = {} #: app.extensions['extensionname'] = SomeObject() #: - #: The key must match the name of the `flaskext` module. For example in - #: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be + #: The key must match the name of the extension module. For example in + #: case of a "Flask-Foo" extension in `flask_foo`, the key would be #: ``'foo'``. #: #: .. versionadded:: 0.7 @@ -486,7 +518,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None, #: def to_python(self, value): #: return value.split(',') #: def to_url(self, values): - #: return ','.join(BaseConverter.to_url(value) + #: return ','.join(super(ListConverter, self).to_url(value) #: for value in values) #: #: app = Flask(__name__) @@ -508,6 +540,14 @@ def __init__(self, import_name, static_path=None, static_url_path=None, endpoint='static', view_func=self.send_static_file) + #: The click command line context for this application. Commands + #: registered here show up in the :command:`flask` command once the + #: application has been discovered. The default commands are + #: provided by Flask itself and can be overridden. + #: + #: This is an instance of a :class:`click.Group` object. + self.cli = cli.AppGroup(self.name) + def _get_error_handlers(self): from warnings import warn warn(DeprecationWarning('error_handlers is deprecated, use the ' @@ -538,7 +578,7 @@ def name(self): @property def propagate_exceptions(self): - """Returns the value of the `PROPAGATE_EXCEPTIONS` configuration + """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration value in case it's set, otherwise a sensible default is returned. .. versionadded:: 0.7 @@ -550,7 +590,7 @@ def propagate_exceptions(self): @property def preserve_context_on_exception(self): - """Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION` + """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration value in case it's set, otherwise a sensible default is returned. @@ -590,7 +630,7 @@ def jinja_env(self): @property def got_first_request(self): - """This attribute is set to `True` if the application started + """This attribute is set to ``True`` if the application started handling the first request. .. versionadded:: 0.8 @@ -609,7 +649,7 @@ def make_config(self, instance_relative=False): root_path = self.root_path if instance_relative: root_path = self.instance_path - return Config(root_path, self.default_config) + return self.config_class(root_path, self.default_config) def auto_find_instance_path(self): """Tries to locate the instance path if it was not provided to the @@ -643,11 +683,19 @@ def create_jinja_environment(self): this function to customize the behavior. .. versionadded:: 0.5 + .. versionchanged:: 0.11 + ``Environment.auto_reload`` set in accordance with + ``TEMPLATES_AUTO_RELOAD`` configuration option. """ options = dict(self.jinja_options) if 'autoescape' not in options: options['autoescape'] = self.select_jinja_autoescape - rv = Environment(self, **options) + if 'auto_reload' not in options: + if self.config['TEMPLATES_AUTO_RELOAD'] is not None: + options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD'] + else: + options['auto_reload'] = self.debug + rv = self.jinja_environment(self, **options) rv.globals.update( url_for=url_for, get_flashed_messages=get_flashed_messages, @@ -685,13 +733,13 @@ def init_jinja_globals(self): """ def select_jinja_autoescape(self, filename): - """Returns `True` if autoescaping should be active for the given - template name. + """Returns ``True`` if autoescaping should be active for the given + template name. If no template name is given, returns `True`. .. versionadded:: 0.5 """ if filename is None: - return False + return True return filename.endswith(('.html', '.htm', '.xml', '.xhtml')) def update_template_context(self, context): @@ -719,9 +767,26 @@ def update_template_context(self, context): # existing views. context.update(orig_ctx) + def make_shell_context(self): + """Returns the shell context for an interactive shell for this + application. This runs all the registered shell context + processors. + + .. versionadded:: 0.11 + """ + rv = {'app': self, 'g': g} + for processor in self.shell_context_processors: + rv.update(processor()) + return rv + def run(self, host=None, port=None, debug=None, **options): - """Runs the application on a local development server. If the - :attr:`debug` flag is set the server will automatically reload + """Runs the application on a local development server. + + Do not use ``run()`` in a production setting. It is not intended to + meet security and performance requirements for a production server. + Instead, see :ref:`deployment` for WSGI server recommendations. + + If the :attr:`debug` flag is set the server will automatically reload for code changes and show a debugger in case an exception happened. If you want to run the application in debug mode, but disable the @@ -729,13 +794,17 @@ def run(self, host=None, port=None, debug=None, **options): ``use_evalex=False`` as parameter. This will keep the debugger's traceback screen active, but disable code execution. + It is not recommended to use this function for development with + automatic reloading as this is badly supported. Instead you should + be using the :command:`flask` command line script's ``run`` support. + .. admonition:: Keep in Mind Flask will suppress any server error with a generic error page unless it is in debug mode. As such to enable just the interactive debugger without the code reloading, you have to invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. - Setting ``use_debugger`` to `True` without being in debug mode + Setting ``use_debugger`` to ``True`` without being in debug mode won't catch any exceptions because there won't be any to catch. @@ -772,11 +841,11 @@ def run(self, host=None, port=None, debug=None, **options): run_simple(host, port, self, **options) finally: # reset the first request information if the development server - # resetted normally. This makes it possible to restart the server + # reset normally. This makes it possible to restart the server # without reloader and that stuff from an interactive shell. self._got_first_request = False - def test_client(self, use_cookies=True): + def test_client(self, use_cookies=True, **kwargs): """Creates a test client for this application. For information about unit testing head over to :ref:`testing`. @@ -791,28 +860,46 @@ def test_client(self, use_cookies=True): app.testing = True client = app.test_client() - The test client can be used in a `with` block to defer the closing down - of the context until the end of the `with` block. This is useful if + The test client can be used in a ``with`` block to defer the closing down + of the context until the end of the ``with`` block. This is useful if you want to access the context locals for testing:: with app.test_client() as c: rv = c.get('/?vodka=42') assert request.args['vodka'] == '42' + Additionally, you may pass optional keyword arguments that will then + be passed to the application's :attr:`test_client_class` constructor. + For example:: + + from flask.testing import FlaskClient + + class CustomClient(FlaskClient): + def __init__(self, *args, **kwargs): + self._authentication = kwargs.pop("authentication") + super(CustomClient,self).__init__( *args, **kwargs) + + app.test_client_class = CustomClient + client = app.test_client(authentication='Basic ....') + See :class:`~flask.testing.FlaskClient` for more information. .. versionchanged:: 0.4 - added support for `with` block usage for the client. + added support for ``with`` block usage for the client. .. versionadded:: 0.7 The `use_cookies` parameter was added as well as the ability to override the client to be used by setting the :attr:`test_client_class` attribute. + + .. versionchanged:: 0.11 + Added `**kwargs` to support passing additional keyword arguments to + the constructor of :attr:`test_client_class`. """ cls = self.test_client_class if cls is None: from flask.testing import FlaskClient as cls - return cls(self, self.response_class, use_cookies=use_cookies) + return cls(self, self.response_class, use_cookies=use_cookies, **kwargs) def open_session(self, request): """Creates or opens a new session. Default implementation stores all @@ -844,32 +931,6 @@ def make_null_session(self): """ return self.session_interface.make_null_session(self) - def register_module(self, module, **options): - """Registers a module with this application. The keyword argument - of this function are the same as the ones for the constructor of the - :class:`Module` class and will override the values of the module if - provided. - - .. versionchanged:: 0.7 - The module system was deprecated in favor for the blueprint - system. - """ - assert blueprint_is_module(module), 'register_module requires ' \ - 'actual module objects. Please upgrade to blueprints though.' - if not self.enable_modules: - raise RuntimeError('Module support was disabled but code ' - 'attempted to register a module named %r' % module) - else: - from warnings import warn - warn(DeprecationWarning('Modules are deprecated. Upgrade to ' - 'using blueprints. Have a look into the documentation for ' - 'more information. If this module was registered by a ' - 'Flask-Extension upgrade the extension or contact the author ' - 'of that extension instead. (Registered %r)' % module), - stacklevel=2) - - self.register_blueprint(module, **options) - @setupmethod def register_blueprint(self, blueprint, **options): """Registers a blueprint on the application. @@ -885,9 +946,17 @@ def register_blueprint(self, blueprint, **options): (blueprint, self.blueprints[blueprint.name], blueprint.name) else: self.blueprints[blueprint.name] = blueprint + self._blueprint_order.append(blueprint) first_registration = True blueprint.register(self, options, first_registration) + def iter_blueprints(self): + """Iterates over all blueprints by the order they were registered. + + .. versionadded:: 0.11 + """ + return iter(self._blueprint_order) + @setupmethod def add_url_rule(self, rule, endpoint=None, view_func=None, **options): """Connects a URL rule. Works exactly like the :meth:`route` @@ -921,7 +990,7 @@ def index(): `view_func` parameter added. .. versionchanged:: 0.6 - `OPTIONS` is added automatically as method. + ``OPTIONS`` is added automatically as method. :param rule: the URL rule as string :param endpoint: the endpoint for the registered URL rule. Flask @@ -933,9 +1002,9 @@ def index(): :class:`~werkzeug.routing.Rule` object. A change to Werkzeug is handling of method options. methods is a list of methods this rule should be limited - to (`GET`, `POST` etc.). By default a rule - just listens for `GET` (and implicitly `HEAD`). - Starting with Flask 0.6, `OPTIONS` is implicitly + to (``GET``, ``POST`` etc.). By default a rule + just listens for ``GET`` (and implicitly ``HEAD``). + Starting with Flask 0.6, ``OPTIONS`` is implicitly added and handled by the standard request handling. """ if endpoint is None: @@ -945,10 +1014,13 @@ def index(): # if the methods are not given and the view_func object knows its # methods we can use that instead. If neither exists, we go with - # a tuple of only `GET` as default. + # a tuple of only ``GET`` as default. if methods is None: methods = getattr(view_func, 'methods', None) or ('GET',) - methods = set(methods) + if isinstance(methods, string_types): + raise TypeError('Allowed methods have to be iterables of strings, ' + 'for example: @app.route(..., methods=["POST"])') + methods = set(item.upper() for item in methods) # Methods that should always be added required_methods = set(getattr(view_func, 'required_methods', ())) @@ -968,18 +1040,13 @@ def index(): # Add the required methods now. methods |= required_methods - # due to a werkzeug bug we need to make sure that the defaults are - # None if they are an empty dictionary. This should not be necessary - # with Werkzeug 0.7 - options['defaults'] = options.get('defaults') or None - rule = self.url_rule_class(rule, methods=methods, **options) rule.provide_automatic_options = provide_automatic_options self.url_map.add(rule) if view_func is not None: old_func = self.view_functions.get(endpoint) - if old_func is not None and old_func is not view_func: + if old_func is not None and old_func != view_func: raise AssertionError('View function mapping is overwriting an ' 'existing endpoint function: %s' % endpoint) self.view_functions[endpoint] = view_func @@ -1003,9 +1070,9 @@ def index(): :class:`~werkzeug.routing.Rule` object. A change to Werkzeug is handling of method options. methods is a list of methods this rule should be limited - to (`GET`, `POST` etc.). By default a rule - just listens for `GET` (and implicitly `HEAD`). - Starting with Flask 0.6, `OPTIONS` is implicitly + to (``GET``, ``POST`` etc.). By default a rule + just listens for ``GET`` (and implicitly ``HEAD``). + Starting with Flask 0.6, ``OPTIONS`` is implicitly added and handled by the standard request handling. """ def decorator(f): @@ -1030,9 +1097,24 @@ def decorator(f): return f return decorator + @staticmethod + def _get_exc_class_and_code(exc_class_or_code): + """Ensure that we register only exceptions as handler keys""" + if isinstance(exc_class_or_code, integer_types): + exc_class = default_exceptions[exc_class_or_code] + else: + exc_class = exc_class_or_code + + assert issubclass(exc_class, Exception) + + if issubclass(exc_class, HTTPException): + return exc_class, exc_class.code + else: + return exc_class, None + @setupmethod def errorhandler(self, code_or_exception): - """A decorator that is used to register a function give a given + """A decorator that is used to register a function given an error code. Example:: @app.errorhandler(404) @@ -1057,15 +1139,21 @@ def page_not_found(error): however is discouraged as it requires fiddling with nested dictionaries and the special case for arbitrary exception types. - The first `None` refers to the active blueprint. If the error - handler should be application wide `None` shall be used. + The first ``None`` refers to the active blueprint. If the error + handler should be application wide ``None`` shall be used. + + .. versionadded:: 0.7 + Use :meth:`register_error_handler` instead of modifying + :attr:`error_handler_spec` directly, for application wide error + handlers. .. versionadded:: 0.7 One can now additionally also register custom exception types that do not necessarily have to be a subclass of the :class:`~werkzeug.exceptions.HTTPException` class. - :param code: the code as integer for the handler + :param code_or_exception: the code as integer for the handler, or + an arbitrary exception """ def decorator(f): self._register_error_handler(None, code_or_exception, f) @@ -1083,16 +1171,21 @@ def register_error_handler(self, code_or_exception, f): @setupmethod def _register_error_handler(self, key, code_or_exception, f): - if isinstance(code_or_exception, HTTPException): - code_or_exception = code_or_exception.code - if isinstance(code_or_exception, integer_types): - assert code_or_exception != 500 or key is None, \ - 'It is currently not possible to register a 500 internal ' \ - 'server error on a per-blueprint level.' - self.error_handler_spec.setdefault(key, {})[code_or_exception] = f - else: - self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \ - .append((code_or_exception, f)) + """ + :type key: None|str + :type code_or_exception: int|T<=Exception + :type f: callable + """ + if isinstance(code_or_exception, HTTPException): # old broken behavior + raise ValueError( + 'Tried to register a handler for an exception instance {0!r}. ' + 'Handlers can only be registered for exception classes or HTTP error codes.' + .format(code_or_exception)) + + exc_class, code = self._get_exc_class_and_code(code_or_exception) + + handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {}) + handlers[exc_class] = f @setupmethod def template_filter(self, name=None): @@ -1159,7 +1252,6 @@ def add_template_test(self, f, name=None): """ self.jinja_env.tests[name or f.__name__] = f - @setupmethod def template_global(self, name=None): """A decorator that is used to register a custom template global function. @@ -1194,7 +1286,13 @@ def add_template_global(self, f, name=None): @setupmethod def before_request(self, f): - """Registers a function to run before each request.""" + """Registers a function to run before each request. + + The function will be called without any arguments. + If the function returns a non-None value, it's handled as + if it was the return value from the view and further + request handling is stopped. + """ self.before_request_funcs.setdefault(None, []).append(f) return f @@ -1203,15 +1301,21 @@ def before_first_request(self, f): """Registers a function to be run before the first request to this instance of the application. + The function will be called without any arguments and its return + value is ignored. + .. versionadded:: 0.8 """ self.before_first_request_funcs.append(f) + return f @setupmethod def after_request(self, f): - """Register a function to be run after each request. Your function - must take one parameter, a :attr:`response_class` object and return - a new response object or the same (see :meth:`process_response`). + """Register a function to be run after each request. + + Your function must take one parameter, an instance of + :attr:`response_class` and return a new response object or the + same (see :meth:`process_response`). As of Flask 0.7 this function might not be executed at the end of the request in case an unhandled exception occurred. @@ -1246,10 +1350,12 @@ def teardown_request(self, f): When a teardown function was called because of a exception it will be passed an error object. + The return values of teardown functions are ignored. + .. admonition:: Debug Note In debug mode Flask will not tear down a request on an exception - immediately. Instead if will keep it alive so that the interactive + immediately. Instead it will keep it alive so that the interactive debugger can still access it. This behavior can be controlled by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable. """ @@ -1280,6 +1386,8 @@ def teardown_appcontext(self, f): When a teardown function was called because of an exception it will be passed an error object. + The return values of teardown functions are ignored. + .. versionadded:: 0.9 """ self.teardown_appcontext_funcs.append(f) @@ -1291,6 +1399,15 @@ def context_processor(self, f): self.template_context_processors[None].append(f) return f + @setupmethod + def shell_context_processor(self, f): + """Registers a shell context processor function. + + .. versionadded:: 0.11 + """ + self.shell_context_processors.append(f) + return f + @setupmethod def url_value_preprocessor(self, f): """Registers a function as URL value preprocessor for all view @@ -1309,6 +1426,33 @@ def url_defaults(self, f): self.url_default_functions.setdefault(None, []).append(f) return f + def _find_error_handler(self, e): + """Finds a registered error handler for the request’s blueprint. + Otherwise falls back to the app, returns None if not a suitable + handler is found. + """ + exc_class, code = self._get_exc_class_and_code(type(e)) + + def find_handler(handler_map): + if not handler_map: + return + for cls in exc_class.__mro__: + handler = handler_map.get(cls) + if handler is not None: + # cache for next time exc_class is raised + handler_map[exc_class] = handler + return handler + + # try blueprint handlers + handler = find_handler(self.error_handler_spec + .get(request.blueprint, {}) + .get(code)) + if handler is not None: + return handler + + # fall back to app handlers + return find_handler(self.error_handler_spec[None].get(code)) + def handle_http_exception(self, e): """Handles an HTTP exception. By default this will invoke the registered error handlers and fall back to returning the @@ -1316,27 +1460,24 @@ def handle_http_exception(self, e): .. versionadded:: 0.3 """ - handlers = self.error_handler_spec.get(request.blueprint) # Proxy exceptions don't have error codes. We want to always return # those unchanged as errors if e.code is None: return e - if handlers and e.code in handlers: - handler = handlers[e.code] - else: - handler = self.error_handler_spec[None].get(e.code) + + handler = self._find_error_handler(e) if handler is None: return e return handler(e) def trap_http_exception(self, e): """Checks if an HTTP exception should be trapped or not. By default - this will return `False` for all exceptions except for a bad request - key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It - also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`. + this will return ``False`` for all exceptions except for a bad request + key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It + also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``. This is called for all HTTP exceptions raised by a view function. - If it returns `True` for any exception the error handler for this + If it returns ``True`` for any exception the error handler for this exception is not called and it shows up as regular exception in the traceback. This is helpful for debugging implicitly raised HTTP exceptions. @@ -1366,19 +1507,15 @@ def handle_user_exception(self, e): # wants the traceback preserved in handle_http_exception. Of course # we cannot prevent users from trashing it themselves in a custom # trap_http_exception method so that's their fault then. + if isinstance(e, HTTPException) and not self.trap_http_exception(e): return self.handle_http_exception(e) - blueprint_handlers = () - handlers = self.error_handler_spec.get(request.blueprint) - if handlers is not None: - blueprint_handlers = handlers.get(None, ()) - app_handlers = self.error_handler_spec[None].get(None, ()) - for typecheck, handler in chain(blueprint_handlers, app_handlers): - if isinstance(e, typecheck): - return handler(e) + handler = self._find_error_handler(e) - reraise(exc_type, exc_value, tb) + if handler is None: + reraise(exc_type, exc_value, tb) + return handler(e) def handle_exception(self, e): """Default exception handling that kicks in when an exception @@ -1392,7 +1529,7 @@ def handle_exception(self, e): exc_type, exc_value, tb = sys.exc_info() got_request_exception.send(self, exception=e) - handler = self.error_handler_spec[None].get(500) + handler = self._find_error_handler(InternalServerError()) if self.propagate_exceptions: # if we want to repropagate the exception, we can attempt to @@ -1407,7 +1544,7 @@ def handle_exception(self, e): self.log_exception((exc_type, exc_value, tb)) if handler is None: return InternalServerError() - return handler(e) + return self.finalize_request(handler(e), from_error_handler=True) def log_exception(self, exc_info): """Logs an exception. This is called by :meth:`handle_exception` @@ -1475,9 +1612,30 @@ def full_dispatch_request(self): rv = self.dispatch_request() except Exception as e: rv = self.handle_user_exception(e) + return self.finalize_request(rv) + + def finalize_request(self, rv, from_error_handler=False): + """Given the return value from a view function this finalizes + the request by converting it into a response and invoking the + postprocessing functions. This is invoked for both normal + request dispatching as well as error handlers. + + Because this means that it might be called as a result of a + failure a special safe mode is available which can be enabled + with the `from_error_handler` flag. If enabled, failures in + response processing will be logged and otherwise ignored. + + :internal: + """ response = self.make_response(rv) - response = self.process_response(response) - request_finished.send(self, response=response) + try: + response = self.process_response(response) + request_finished.send(self, response=response) + except Exception: + if not from_error_handler: + raise + self.logger.exception('Request finalizing failed with an ' + 'error while handling an error') return response def try_trigger_before_first_request_functions(self): @@ -1492,14 +1650,14 @@ def try_trigger_before_first_request_functions(self): with self._before_request_lock: if self._got_first_request: return - self._got_first_request = True for func in self.before_first_request_funcs: func() + self._got_first_request = True def make_default_options_response(self): - """This method is called to create the default `OPTIONS` response. + """This method is called to create the default ``OPTIONS`` response. This can be changed through subclassing to change the default - behavior of `OPTIONS` responses. + behavior of ``OPTIONS`` responses. .. versionadded:: 0.7 """ @@ -1522,7 +1680,7 @@ def make_default_options_response(self): def should_ignore_error(self, error): """This is called to figure out if an error should be ignored or not as far as the teardown system is concerned. If this - function returns `True` then the teardown handlers will not be + function returns ``True`` then the teardown handlers will not be passed the error. .. versionadded:: 0.10 @@ -1546,9 +1704,10 @@ def make_response(self, rv): a WSGI function the function is called as WSGI application and buffered as response object :class:`tuple` A tuple in the form ``(response, status, - headers)`` where `response` is any of the + headers)`` or ``(response, headers)`` + where `response` is any of the types defined here, `status` is a string - or an integer and `headers` is a list of + or an integer and `headers` is a list or a dictionary with header values. ======================= =========================================== @@ -1558,29 +1717,33 @@ def make_response(self, rv): Previously a tuple was interpreted as the arguments for the response object. """ - status = headers = None + status_or_headers = headers = None if isinstance(rv, tuple): - rv, status, headers = rv + (None,) * (3 - len(rv)) + rv, status_or_headers, headers = rv + (None,) * (3 - len(rv)) if rv is None: raise ValueError('View function did not return a response') + if isinstance(status_or_headers, (dict, list)): + headers, status_or_headers = status_or_headers, None + if not isinstance(rv, self.response_class): # When we create a response object directly, we let the constructor # set the headers and status. We do this because there can be # some extra logic involved when creating these objects with # specific values (like default content type selection). if isinstance(rv, (text_type, bytes, bytearray)): - rv = self.response_class(rv, headers=headers, status=status) - headers = status = None + rv = self.response_class(rv, headers=headers, + status=status_or_headers) + headers = status_or_headers = None else: rv = self.response_class.force_type(rv, request.environ) - if status is not None: - if isinstance(status, string_types): - rv.status = status + if status_or_headers is not None: + if isinstance(status_or_headers, string_types): + rv.status = status_or_headers else: - rv.status_code = status + rv.status_code = status_or_headers if headers: rv.headers.extend(headers) @@ -1631,8 +1794,9 @@ def handle_url_build_error(self, error, endpoint, values): rv = handler(error, endpoint, values) if rv is not None: return rv - except BuildError as error: - pass + except BuildError as e: + # make error available outside except block (py3) + error = e # At this point we want to reraise the exception. If the error is # still the same one we can reraise it with the original traceback, @@ -1643,12 +1807,13 @@ def handle_url_build_error(self, error, endpoint, values): def preprocess_request(self): """Called before the actual request dispatching and will - call every as :meth:`before_request` decorated function. - If any of these function returns a value it's handled as + call each :meth:`before_request` decorated function, passing no + arguments. + If any of these functions returns a value, it's handled as if it was the return value from the view and further request handling is stopped. - This also triggers the :meth:`url_value_processor` functions before + This also triggers the :meth:`url_value_preprocessor` functions before the actual :meth:`before_request` functions are called. """ bp = _request_ctx_stack.top.request.blueprint @@ -1693,7 +1858,7 @@ def process_response(self, response): self.save_session(ctx.session, response) return response - def do_teardown_request(self, exc=None): + def do_teardown_request(self, exc=_sentinel): """Called after the actual request dispatching and will call every as :meth:`teardown_request` decorated function. This is not actually called by the :class:`Flask` object itself but is always @@ -1704,24 +1869,24 @@ def do_teardown_request(self, exc=None): Added the `exc` argument. Previously this was always using the current exception information. """ - if exc is None: + if exc is _sentinel: exc = sys.exc_info()[1] funcs = reversed(self.teardown_request_funcs.get(None, ())) bp = _request_ctx_stack.top.request.blueprint if bp is not None and bp in self.teardown_request_funcs: funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) for func in funcs: - rv = func(exc) + func(exc) request_tearing_down.send(self, exc=exc) - def do_teardown_appcontext(self, exc=None): + def do_teardown_appcontext(self, exc=_sentinel): """Called when an application context is popped. This works pretty much the same as :meth:`do_teardown_request` but for the application context. .. versionadded:: 0.9 """ - if exc is None: + if exc is _sentinel: exc = sys.exc_info()[1] for func in reversed(self.teardown_appcontext_funcs): func(exc) @@ -1745,15 +1910,15 @@ def app_context(self): def request_context(self, environ): """Creates a :class:`~flask.ctx.RequestContext` from the given environment and binds it to the current context. This must be used in - combination with the `with` statement because the request is only bound - to the current context for the duration of the `with` block. + combination with the ``with`` statement because the request is only bound + to the current context for the duration of the ``with`` block. Example usage:: with app.request_context(environ): do_something_with(request) - The object returned can also be used without the `with` statement + The object returned can also be used without the ``with`` statement which is useful for working in the shell. The example above is doing exactly the same as this code:: @@ -1765,7 +1930,7 @@ def request_context(self, environ): ctx.pop() .. versionchanged:: 0.3 - Added support for non-with statement usage and `with` statement + Added support for non-with statement usage and ``with`` statement is now passed the ctx object. :param environ: a WSGI environment @@ -1774,7 +1939,7 @@ def request_context(self, environ): def test_request_context(self, *args, **kwargs): """Creates a WSGI environment from the given values (see - :func:`werkzeug.test.EnvironBuilder` for more information, this + :class:`werkzeug.test.EnvironBuilder` for more information, this function accepts the same arguments). """ from flask.testing import make_test_environ_builder @@ -1817,20 +1982,16 @@ def wsgi_app(self, environ, start_response): response = self.full_dispatch_request() except Exception as e: error = e - response = self.make_response(self.handle_exception(e)) + response = self.handle_exception(e) + except: + error = sys.exc_info()[1] + raise return response(environ, start_response) finally: if self.should_ignore_error(error): error = None ctx.auto_pop(error) - @property - def modules(self): - from warnings import warn - warn(DeprecationWarning('Flask.modules is deprecated, use ' - 'Flask.blueprints instead'), stacklevel=2) - return self.blueprints - def __call__(self, environ, start_response): """Shortcut for :attr:`wsgi_app`.""" return self.wsgi_app(environ, start_response) diff --git a/app/lib/flask/blueprints.py b/app/lib/flask/blueprints.py new file mode 100644 index 0000000..586a1b0 --- /dev/null +++ b/app/lib/flask/blueprints.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" + flask.blueprints + ~~~~~~~~~~~~~~~~ + + Blueprints are the recommended way to implement larger or more + pluggable applications in Flask 0.7 and later. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +from functools import update_wrapper + +from .helpers import _PackageBoundObject, _endpoint_from_view_func + + +class BlueprintSetupState(object): + """Temporary holder object for registering a blueprint with the + application. An instance of this class is created by the + :meth:`~flask.Blueprint.make_setup_state` method and later passed + to all register callback functions. + """ + + def __init__(self, blueprint, app, options, first_registration): + #: a reference to the current application + self.app = app + + #: a reference to the blueprint that created this setup state. + self.blueprint = blueprint + + #: a dictionary with all options that were passed to the + #: :meth:`~flask.Flask.register_blueprint` method. + self.options = options + + #: as blueprints can be registered multiple times with the + #: application and not everything wants to be registered + #: multiple times on it, this attribute can be used to figure + #: out if the blueprint was registered in the past already. + self.first_registration = first_registration + + subdomain = self.options.get('subdomain') + if subdomain is None: + subdomain = self.blueprint.subdomain + + #: The subdomain that the blueprint should be active for, ``None`` + #: otherwise. + self.subdomain = subdomain + + url_prefix = self.options.get('url_prefix') + if url_prefix is None: + url_prefix = self.blueprint.url_prefix + + #: The prefix that should be used for all URLs defined on the + #: blueprint. + self.url_prefix = url_prefix + + #: A dictionary with URL defaults that is added to each and every + #: URL that was defined with the blueprint. + self.url_defaults = dict(self.blueprint.url_values_defaults) + self.url_defaults.update(self.options.get('url_defaults', ())) + + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """A helper method to register a rule (and optionally a view function) + to the application. The endpoint is automatically prefixed with the + blueprint's name. + """ + if self.url_prefix: + rule = self.url_prefix + rule + options.setdefault('subdomain', self.subdomain) + if endpoint is None: + endpoint = _endpoint_from_view_func(view_func) + defaults = self.url_defaults + if 'defaults' in options: + defaults = dict(defaults, **options.pop('defaults')) + self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), + view_func, defaults=defaults, **options) + + +class Blueprint(_PackageBoundObject): + """Represents a blueprint. A blueprint is an object that records + functions that will be called with the + :class:`~flask.blueprints.BlueprintSetupState` later to register functions + or other things on the main application. See :ref:`blueprints` for more + information. + + .. versionadded:: 0.7 + """ + + warn_on_modifications = False + _got_registered_once = False + + def __init__(self, name, import_name, static_folder=None, + static_url_path=None, template_folder=None, + url_prefix=None, subdomain=None, url_defaults=None, + root_path=None): + _PackageBoundObject.__init__(self, import_name, template_folder, + root_path=root_path) + self.name = name + self.url_prefix = url_prefix + self.subdomain = subdomain + self.static_folder = static_folder + self.static_url_path = static_url_path + self.deferred_functions = [] + if url_defaults is None: + url_defaults = {} + self.url_values_defaults = url_defaults + + def record(self, func): + """Registers a function that is called when the blueprint is + registered on the application. This function is called with the + state as argument as returned by the :meth:`make_setup_state` + method. + """ + if self._got_registered_once and self.warn_on_modifications: + from warnings import warn + warn(Warning('The blueprint was already registered once ' + 'but is getting modified now. These changes ' + 'will not show up.')) + self.deferred_functions.append(func) + + def record_once(self, func): + """Works like :meth:`record` but wraps the function in another + function that will ensure the function is only called once. If the + blueprint is registered a second time on the application, the + function passed is not called. + """ + def wrapper(state): + if state.first_registration: + func(state) + return self.record(update_wrapper(wrapper, func)) + + def make_setup_state(self, app, options, first_registration=False): + """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` + object that is later passed to the register callback functions. + Subclasses can override this to return a subclass of the setup state. + """ + return BlueprintSetupState(self, app, options, first_registration) + + def register(self, app, options, first_registration=False): + """Called by :meth:`Flask.register_blueprint` to register a blueprint + on the application. This can be overridden to customize the register + behavior. Keyword arguments from + :func:`~flask.Flask.register_blueprint` are directly forwarded to this + method in the `options` dictionary. + """ + self._got_registered_once = True + state = self.make_setup_state(app, options, first_registration) + if self.has_static_folder: + state.add_url_rule(self.static_url_path + '/', + view_func=self.send_static_file, + endpoint='static') + + for deferred in self.deferred_functions: + deferred(state) + + def route(self, rule, **options): + """Like :meth:`Flask.route` but for a blueprint. The endpoint for the + :func:`url_for` function is prefixed with the name of the blueprint. + """ + def decorator(f): + endpoint = options.pop("endpoint", f.__name__) + self.add_url_rule(rule, endpoint, f, **options) + return f + return decorator + + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for + the :func:`url_for` function is prefixed with the name of the blueprint. + """ + if endpoint: + assert '.' not in endpoint, "Blueprint endpoints should not contain dots" + self.record(lambda s: + s.add_url_rule(rule, endpoint, view_func, **options)) + + def endpoint(self, endpoint): + """Like :meth:`Flask.endpoint` but for a blueprint. This does not + prefix the endpoint with the blueprint name, this has to be done + explicitly by the user of this method. If the endpoint is prefixed + with a `.` it will be registered to the current blueprint, otherwise + it's an application independent endpoint. + """ + def decorator(f): + def register_endpoint(state): + state.app.view_functions[endpoint] = f + self.record_once(register_endpoint) + return f + return decorator + + def app_template_filter(self, name=None): + """Register a custom template filter, available application wide. Like + :meth:`Flask.template_filter` but for a blueprint. + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + def decorator(f): + self.add_app_template_filter(f, name=name) + return f + return decorator + + def add_app_template_filter(self, f, name=None): + """Register a custom template filter, available application wide. Like + :meth:`Flask.add_template_filter` but for a blueprint. Works exactly + like the :meth:`app_template_filter` decorator. + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + def register_template(state): + state.app.jinja_env.filters[name or f.__name__] = f + self.record_once(register_template) + + def app_template_test(self, name=None): + """Register a custom template test, available application wide. Like + :meth:`Flask.template_test` but for a blueprint. + + .. versionadded:: 0.10 + + :param name: the optional name of the test, otherwise the + function name will be used. + """ + def decorator(f): + self.add_app_template_test(f, name=name) + return f + return decorator + + def add_app_template_test(self, f, name=None): + """Register a custom template test, available application wide. Like + :meth:`Flask.add_template_test` but for a blueprint. Works exactly + like the :meth:`app_template_test` decorator. + + .. versionadded:: 0.10 + + :param name: the optional name of the test, otherwise the + function name will be used. + """ + def register_template(state): + state.app.jinja_env.tests[name or f.__name__] = f + self.record_once(register_template) + + def app_template_global(self, name=None): + """Register a custom template global, available application wide. Like + :meth:`Flask.template_global` but for a blueprint. + + .. versionadded:: 0.10 + + :param name: the optional name of the global, otherwise the + function name will be used. + """ + def decorator(f): + self.add_app_template_global(f, name=name) + return f + return decorator + + def add_app_template_global(self, f, name=None): + """Register a custom template global, available application wide. Like + :meth:`Flask.add_template_global` but for a blueprint. Works exactly + like the :meth:`app_template_global` decorator. + + .. versionadded:: 0.10 + + :param name: the optional name of the global, otherwise the + function name will be used. + """ + def register_template(state): + state.app.jinja_env.globals[name or f.__name__] = f + self.record_once(register_template) + + def before_request(self, f): + """Like :meth:`Flask.before_request` but for a blueprint. This function + is only executed before each request that is handled by a function of + that blueprint. + """ + self.record_once(lambda s: s.app.before_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def before_app_request(self, f): + """Like :meth:`Flask.before_request`. Such a function is executed + before each request, even if outside of a blueprint. + """ + self.record_once(lambda s: s.app.before_request_funcs + .setdefault(None, []).append(f)) + return f + + def before_app_first_request(self, f): + """Like :meth:`Flask.before_first_request`. Such a function is + executed before the first request to the application. + """ + self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) + return f + + def after_request(self, f): + """Like :meth:`Flask.after_request` but for a blueprint. This function + is only executed after each request that is handled by a function of + that blueprint. + """ + self.record_once(lambda s: s.app.after_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def after_app_request(self, f): + """Like :meth:`Flask.after_request` but for a blueprint. Such a function + is executed after each request, even if outside of the blueprint. + """ + self.record_once(lambda s: s.app.after_request_funcs + .setdefault(None, []).append(f)) + return f + + def teardown_request(self, f): + """Like :meth:`Flask.teardown_request` but for a blueprint. This + function is only executed when tearing down requests handled by a + function of that blueprint. Teardown request functions are executed + when the request context is popped, even when no actual request was + performed. + """ + self.record_once(lambda s: s.app.teardown_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def teardown_app_request(self, f): + """Like :meth:`Flask.teardown_request` but for a blueprint. Such a + function is executed when tearing down each request, even if outside of + the blueprint. + """ + self.record_once(lambda s: s.app.teardown_request_funcs + .setdefault(None, []).append(f)) + return f + + def context_processor(self, f): + """Like :meth:`Flask.context_processor` but for a blueprint. This + function is only executed for requests handled by a blueprint. + """ + self.record_once(lambda s: s.app.template_context_processors + .setdefault(self.name, []).append(f)) + return f + + def app_context_processor(self, f): + """Like :meth:`Flask.context_processor` but for a blueprint. Such a + function is executed each request, even if outside of the blueprint. + """ + self.record_once(lambda s: s.app.template_context_processors + .setdefault(None, []).append(f)) + return f + + def app_errorhandler(self, code): + """Like :meth:`Flask.errorhandler` but for a blueprint. This + handler is used for all requests, even if outside of the blueprint. + """ + def decorator(f): + self.record_once(lambda s: s.app.errorhandler(code)(f)) + return f + return decorator + + def url_value_preprocessor(self, f): + """Registers a function as URL value preprocessor for this + blueprint. It's called before the view functions are called and + can modify the url values provided. + """ + self.record_once(lambda s: s.app.url_value_preprocessors + .setdefault(self.name, []).append(f)) + return f + + def url_defaults(self, f): + """Callback function for URL defaults for this blueprint. It's called + with the endpoint and values and should update the values passed + in place. + """ + self.record_once(lambda s: s.app.url_default_functions + .setdefault(self.name, []).append(f)) + return f + + def app_url_value_preprocessor(self, f): + """Same as :meth:`url_value_preprocessor` but application wide. + """ + self.record_once(lambda s: s.app.url_value_preprocessors + .setdefault(None, []).append(f)) + return f + + def app_url_defaults(self, f): + """Same as :meth:`url_defaults` but application wide. + """ + self.record_once(lambda s: s.app.url_default_functions + .setdefault(None, []).append(f)) + return f + + def errorhandler(self, code_or_exception): + """Registers an error handler that becomes active for this blueprint + only. Please be aware that routing does not happen local to a + blueprint so an error handler for 404 usually is not handled by + a blueprint unless it is caused inside a view function. Another + special case is the 500 internal server error which is always looked + up from the application. + + Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator + of the :class:`~flask.Flask` object. + """ + def decorator(f): + self.record_once(lambda s: s.app._register_error_handler( + self.name, code_or_exception, f)) + return f + return decorator + + def register_error_handler(self, code_or_exception, f): + """Non-decorator version of the :meth:`errorhandler` error attach + function, akin to the :meth:`~flask.Flask.register_error_handler` + application-wide function of the :class:`~flask.Flask` object but + for error handlers limited to this blueprint. + + .. versionadded:: 0.11 + """ + self.record_once(lambda s: s.app._register_error_handler( + self.name, code_or_exception, f)) diff --git a/app/lib/flask/cli.py b/app/lib/flask/cli.py new file mode 100644 index 0000000..074ee76 --- /dev/null +++ b/app/lib/flask/cli.py @@ -0,0 +1,517 @@ +# -*- coding: utf-8 -*- +""" + flask.cli + ~~~~~~~~~ + + A simple command line application to run flask apps. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import sys +from threading import Lock, Thread +from functools import update_wrapper + +import click + +from ._compat import iteritems, reraise +from .helpers import get_debug_flag +from . import __version__ + +class NoAppException(click.UsageError): + """Raised if an application cannot be found or loaded.""" + + +def find_best_app(module): + """Given a module instance this tries to find the best possible + application in the module or raises an exception. + """ + from . import Flask + + # Search for the most common names first. + for attr_name in 'app', 'application': + app = getattr(module, attr_name, None) + if app is not None and isinstance(app, Flask): + return app + + # Otherwise find the only object that is a Flask instance. + matches = [v for k, v in iteritems(module.__dict__) + if isinstance(v, Flask)] + + if len(matches) == 1: + return matches[0] + raise NoAppException('Failed to find application in module "%s". Are ' + 'you sure it contains a Flask application? Maybe ' + 'you wrapped it in a WSGI middleware or you are ' + 'using a factory function.' % module.__name__) + + +def prepare_exec_for_file(filename): + """Given a filename this will try to calculate the python path, add it + to the search path and return the actual module name that is expected. + """ + module = [] + + # Chop off file extensions or package markers + if os.path.split(filename)[1] == '__init__.py': + filename = os.path.dirname(filename) + elif filename.endswith('.py'): + filename = filename[:-3] + else: + raise NoAppException('The file provided (%s) does exist but is not a ' + 'valid Python file. This means that it cannot ' + 'be used as application. Please change the ' + 'extension to .py' % filename) + filename = os.path.realpath(filename) + + dirpath = filename + while 1: + dirpath, extra = os.path.split(dirpath) + module.append(extra) + if not os.path.isfile(os.path.join(dirpath, '__init__.py')): + break + + sys.path.insert(0, dirpath) + return '.'.join(module[::-1]) + + +def locate_app(app_id): + """Attempts to locate the application.""" + __traceback_hide__ = True + if ':' in app_id: + module, app_obj = app_id.split(':', 1) + else: + module = app_id + app_obj = None + + try: + __import__(module) + except ImportError: + # Reraise the ImportError if it occurred within the imported module. + # Determine this by checking whether the trace has a depth > 1. + if sys.exc_info()[-1].tb_next: + raise + else: + raise NoAppException('The file/path provided (%s) does not appear' + ' to exist. Please verify the path is ' + 'correct. If app is not on PYTHONPATH, ' + 'ensure the extension is .py' % module) + + mod = sys.modules[module] + if app_obj is None: + app = find_best_app(mod) + else: + app = getattr(mod, app_obj, None) + if app is None: + raise RuntimeError('Failed to find application in module "%s"' + % module) + + return app + + +def find_default_import_path(): + app = os.environ.get('FLASK_APP') + if app is None: + return + if os.path.isfile(app): + return prepare_exec_for_file(app) + return app + + +def get_version(ctx, param, value): + if not value or ctx.resilient_parsing: + return + message = 'Flask %(version)s\nPython %(python_version)s' + click.echo(message % { + 'version': __version__, + 'python_version': sys.version, + }, color=ctx.color) + ctx.exit() + +version_option = click.Option(['--version'], + help='Show the flask version', + expose_value=False, + callback=get_version, + is_flag=True, is_eager=True) + +class DispatchingApp(object): + """Special application that dispatches to a Flask application which + is imported by name in a background thread. If an error happens + it is recorded and shown as part of the WSGI handling which in case + of the Werkzeug debugger means that it shows up in the browser. + """ + + def __init__(self, loader, use_eager_loading=False): + self.loader = loader + self._app = None + self._lock = Lock() + self._bg_loading_exc_info = None + if use_eager_loading: + self._load_unlocked() + else: + self._load_in_background() + + def _load_in_background(self): + def _load_app(): + __traceback_hide__ = True + with self._lock: + try: + self._load_unlocked() + except Exception: + self._bg_loading_exc_info = sys.exc_info() + t = Thread(target=_load_app, args=()) + t.start() + + def _flush_bg_loading_exception(self): + __traceback_hide__ = True + exc_info = self._bg_loading_exc_info + if exc_info is not None: + self._bg_loading_exc_info = None + reraise(*exc_info) + + def _load_unlocked(self): + __traceback_hide__ = True + self._app = rv = self.loader() + self._bg_loading_exc_info = None + return rv + + def __call__(self, environ, start_response): + __traceback_hide__ = True + if self._app is not None: + return self._app(environ, start_response) + self._flush_bg_loading_exception() + with self._lock: + if self._app is not None: + rv = self._app + else: + rv = self._load_unlocked() + return rv(environ, start_response) + + +class ScriptInfo(object): + """Help object to deal with Flask applications. This is usually not + necessary to interface with as it's used internally in the dispatching + to click. In future versions of Flask this object will most likely play + a bigger role. Typically it's created automatically by the + :class:`FlaskGroup` but you can also manually create it and pass it + onwards as click object. + """ + + def __init__(self, app_import_path=None, create_app=None): + if create_app is None: + if app_import_path is None: + app_import_path = find_default_import_path() + self.app_import_path = app_import_path + else: + app_import_path = None + + #: Optionally the import path for the Flask application. + self.app_import_path = app_import_path + #: Optionally a function that is passed the script info to create + #: the instance of the application. + self.create_app = create_app + #: A dictionary with arbitrary data that can be associated with + #: this script info. + self.data = {} + self._loaded_app = None + + def load_app(self): + """Loads the Flask app (if not yet loaded) and returns it. Calling + this multiple times will just result in the already loaded app to + be returned. + """ + __traceback_hide__ = True + if self._loaded_app is not None: + return self._loaded_app + if self.create_app is not None: + rv = self.create_app(self) + else: + if not self.app_import_path: + raise NoAppException( + 'Could not locate Flask application. You did not provide ' + 'the FLASK_APP environment variable.\n\nFor more ' + 'information see ' + 'http://flask.pocoo.org/docs/latest/quickstart/') + rv = locate_app(self.app_import_path) + debug = get_debug_flag() + if debug is not None: + rv.debug = debug + self._loaded_app = rv + return rv + + +pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True) + + +def with_appcontext(f): + """Wraps a callback so that it's guaranteed to be executed with the + script's application context. If callbacks are registered directly + to the ``app.cli`` object then they are wrapped with this function + by default unless it's disabled. + """ + @click.pass_context + def decorator(__ctx, *args, **kwargs): + with __ctx.ensure_object(ScriptInfo).load_app().app_context(): + return __ctx.invoke(f, *args, **kwargs) + return update_wrapper(decorator, f) + + +class AppGroup(click.Group): + """This works similar to a regular click :class:`~click.Group` but it + changes the behavior of the :meth:`command` decorator so that it + automatically wraps the functions in :func:`with_appcontext`. + + Not to be confused with :class:`FlaskGroup`. + """ + + def command(self, *args, **kwargs): + """This works exactly like the method of the same name on a regular + :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` + unless it's disabled by passing ``with_appcontext=False``. + """ + wrap_for_ctx = kwargs.pop('with_appcontext', True) + def decorator(f): + if wrap_for_ctx: + f = with_appcontext(f) + return click.Group.command(self, *args, **kwargs)(f) + return decorator + + def group(self, *args, **kwargs): + """This works exactly like the method of the same name on a regular + :class:`click.Group` but it defaults the group class to + :class:`AppGroup`. + """ + kwargs.setdefault('cls', AppGroup) + return click.Group.group(self, *args, **kwargs) + + +class FlaskGroup(AppGroup): + """Special subclass of the :class:`AppGroup` group that supports + loading more commands from the configured Flask app. Normally a + developer does not have to interface with this class but there are + some very advanced use cases for which it makes sense to create an + instance of this. + + For information as of why this is useful see :ref:`custom-scripts`. + + :param add_default_commands: if this is True then the default run and + shell commands wil be added. + :param add_version_option: adds the ``--version`` option. + :param create_app: an optional callback that is passed the script info + and returns the loaded app. + """ + + def __init__(self, add_default_commands=True, create_app=None, + add_version_option=True, **extra): + params = list(extra.pop('params', None) or ()) + + if add_version_option: + params.append(version_option) + + AppGroup.__init__(self, params=params, **extra) + self.create_app = create_app + + if add_default_commands: + self.add_command(run_command) + self.add_command(shell_command) + + self._loaded_plugin_commands = False + + def _load_plugin_commands(self): + if self._loaded_plugin_commands: + return + try: + import pkg_resources + except ImportError: + self._loaded_plugin_commands = True + return + + for ep in pkg_resources.iter_entry_points('flask.commands'): + self.add_command(ep.load(), ep.name) + self._loaded_plugin_commands = True + + def get_command(self, ctx, name): + self._load_plugin_commands() + + # We load built-in commands first as these should always be the + # same no matter what the app does. If the app does want to + # override this it needs to make a custom instance of this group + # and not attach the default commands. + # + # This also means that the script stays functional in case the + # application completely fails. + rv = AppGroup.get_command(self, ctx, name) + if rv is not None: + return rv + + info = ctx.ensure_object(ScriptInfo) + try: + rv = info.load_app().cli.get_command(ctx, name) + if rv is not None: + return rv + except NoAppException: + pass + + def list_commands(self, ctx): + self._load_plugin_commands() + + # The commands available is the list of both the application (if + # available) plus the builtin commands. + rv = set(click.Group.list_commands(self, ctx)) + info = ctx.ensure_object(ScriptInfo) + try: + rv.update(info.load_app().cli.list_commands(ctx)) + except Exception: + # Here we intentionally swallow all exceptions as we don't + # want the help page to break if the app does not exist. + # If someone attempts to use the command we try to create + # the app again and this will give us the error. + pass + return sorted(rv) + + def main(self, *args, **kwargs): + obj = kwargs.get('obj') + if obj is None: + obj = ScriptInfo(create_app=self.create_app) + kwargs['obj'] = obj + kwargs.setdefault('auto_envvar_prefix', 'FLASK') + return AppGroup.main(self, *args, **kwargs) + + +@click.command('run', short_help='Runs a development server.') +@click.option('--host', '-h', default='127.0.0.1', + help='The interface to bind to.') +@click.option('--port', '-p', default=5000, + help='The port to bind to.') +@click.option('--reload/--no-reload', default=None, + help='Enable or disable the reloader. By default the reloader ' + 'is active if debug is enabled.') +@click.option('--debugger/--no-debugger', default=None, + help='Enable or disable the debugger. By default the debugger ' + 'is active if debug is enabled.') +@click.option('--eager-loading/--lazy-loader', default=None, + help='Enable or disable eager loading. By default eager ' + 'loading is enabled if the reloader is disabled.') +@click.option('--with-threads/--without-threads', default=False, + help='Enable or disable multithreading.') +@pass_script_info +def run_command(info, host, port, reload, debugger, eager_loading, + with_threads): + """Runs a local development server for the Flask application. + + This local server is recommended for development purposes only but it + can also be used for simple intranet deployments. By default it will + not support any sort of concurrency at all to simplify debugging. This + can be changed with the --with-threads option which will enable basic + multithreading. + + The reloader and debugger are by default enabled if the debug flag of + Flask is enabled and disabled otherwise. + """ + from werkzeug.serving import run_simple + + debug = get_debug_flag() + if reload is None: + reload = bool(debug) + if debugger is None: + debugger = bool(debug) + if eager_loading is None: + eager_loading = not reload + + app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) + + # Extra startup messages. This depends a bit on Werkzeug internals to + # not double execute when the reloader kicks in. + if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': + # If we have an import path we can print it out now which can help + # people understand what's being served. If we do not have an + # import path because the app was loaded through a callback then + # we won't print anything. + if info.app_import_path is not None: + print(' * Serving Flask app "%s"' % info.app_import_path) + if debug is not None: + print(' * Forcing debug mode %s' % (debug and 'on' or 'off')) + + run_simple(host, port, app, use_reloader=reload, + use_debugger=debugger, threaded=with_threads) + + +@click.command('shell', short_help='Runs a shell in the app context.') +@with_appcontext +def shell_command(): + """Runs an interactive Python shell in the context of a given + Flask application. The application will populate the default + namespace of this shell according to it's configuration. + + This is useful for executing small snippets of management code + without having to manually configuring the application. + """ + import code + from flask.globals import _app_ctx_stack + app = _app_ctx_stack.top.app + banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % ( + sys.version, + sys.platform, + app.import_name, + app.debug and ' [debug]' or '', + app.instance_path, + ) + ctx = {} + + # Support the regular Python interpreter startup script if someone + # is using it. + startup = os.environ.get('PYTHONSTARTUP') + if startup and os.path.isfile(startup): + with open(startup, 'r') as f: + eval(compile(f.read(), startup, 'exec'), ctx) + + ctx.update(app.make_shell_context()) + + code.interact(banner=banner, local=ctx) + + +cli = FlaskGroup(help="""\ +This shell command acts as general utility script for Flask applications. + +It loads the application configured (through the FLASK_APP environment +variable) and then provides commands either provided by the application or +Flask itself. + +The most useful commands are the "run" and "shell" command. + +Example usage: + +\b + %(prefix)s%(cmd)s FLASK_APP=hello.py + %(prefix)s%(cmd)s FLASK_DEBUG=1 + %(prefix)sflask run +""" % { + 'cmd': os.name == 'posix' and 'export' or 'set', + 'prefix': os.name == 'posix' and '$ ' or '', +}) + + +def main(as_module=False): + this_module = __package__ + '.cli' + args = sys.argv[1:] + + if as_module: + if sys.version_info >= (2, 7): + name = 'python -m ' + this_module.rsplit('.', 1)[0] + else: + name = 'python -m ' + this_module + + # This module is always executed as "python -m flask.run" and as such + # we need to ensure that we restore the actual command line so that + # the reloader can properly operate. + sys.argv = ['-m', this_module] + sys.argv[1:] + else: + name = None + + cli.main(args=args, prog_name=name) + + +if __name__ == '__main__': + main(as_module=True) diff --git a/app/lib/flask/config.py b/app/lib/flask/config.py new file mode 100644 index 0000000..697add7 --- /dev/null +++ b/app/lib/flask/config.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +""" + flask.config + ~~~~~~~~~~~~ + + Implements the configuration related objects. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import types +import errno + +from werkzeug.utils import import_string +from ._compat import string_types, iteritems +from . import json + + +class ConfigAttribute(object): + """Makes an attribute forward to the config""" + + def __init__(self, name, get_converter=None): + self.__name__ = name + self.get_converter = get_converter + + def __get__(self, obj, type=None): + if obj is None: + return self + rv = obj.config[self.__name__] + if self.get_converter is not None: + rv = self.get_converter(rv) + return rv + + def __set__(self, obj, value): + obj.config[self.__name__] = value + + +class Config(dict): + """Works exactly like a dict but provides ways to fill it from files + or special dictionaries. There are two common patterns to populate the + config. + + Either you can fill the config from a config file:: + + app.config.from_pyfile('yourconfig.cfg') + + Or alternatively you can define the configuration options in the + module that calls :meth:`from_object` or provide an import path to + a module that should be loaded. It is also possible to tell it to + use the same module and with that provide the configuration values + just before the call:: + + DEBUG = True + SECRET_KEY = 'development key' + app.config.from_object(__name__) + + In both cases (loading from any Python file or loading from modules), + only uppercase keys are added to the config. This makes it possible to use + lowercase values in the config file for temporary values that are not added + to the config or to define the config keys in the same file that implements + the application. + + Probably the most interesting way to load configurations is from an + environment variable pointing to a file:: + + app.config.from_envvar('YOURAPPLICATION_SETTINGS') + + In this case before launching the application you have to set this + environment variable to the file you want to use. On Linux and OS X + use the export statement:: + + export YOURAPPLICATION_SETTINGS='/path/to/config/file' + + On windows use `set` instead. + + :param root_path: path to which files are read relative from. When the + config object is created by the application, this is + the application's :attr:`~flask.Flask.root_path`. + :param defaults: an optional dictionary of default values + """ + + def __init__(self, root_path, defaults=None): + dict.__init__(self, defaults or {}) + self.root_path = root_path + + def from_envvar(self, variable_name, silent=False): + """Loads a configuration from an environment variable pointing to + a configuration file. This is basically just a shortcut with nicer + error messages for this line of code:: + + app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) + + :param variable_name: name of the environment variable + :param silent: set to ``True`` if you want silent failure for missing + files. + :return: bool. ``True`` if able to load config, ``False`` otherwise. + """ + rv = os.environ.get(variable_name) + if not rv: + if silent: + return False + raise RuntimeError('The environment variable %r is not set ' + 'and as such configuration could not be ' + 'loaded. Set this variable and make it ' + 'point to a configuration file' % + variable_name) + return self.from_pyfile(rv, silent=silent) + + def from_pyfile(self, filename, silent=False): + """Updates the values in the config from a Python file. This function + behaves as if the file was imported as module with the + :meth:`from_object` function. + + :param filename: the filename of the config. This can either be an + absolute filename or a filename relative to the + root path. + :param silent: set to ``True`` if you want silent failure for missing + files. + + .. versionadded:: 0.7 + `silent` parameter. + """ + filename = os.path.join(self.root_path, filename) + d = types.ModuleType('config') + d.__file__ = filename + try: + with open(filename, mode='rb') as config_file: + exec(compile(config_file.read(), filename, 'exec'), d.__dict__) + except IOError as e: + if silent and e.errno in (errno.ENOENT, errno.EISDIR): + return False + e.strerror = 'Unable to load configuration file (%s)' % e.strerror + raise + self.from_object(d) + return True + + def from_object(self, obj): + """Updates the values from the given object. An object can be of one + of the following two types: + + - a string: in this case the object with that name will be imported + - an actual object reference: that object is used directly + + Objects are usually either modules or classes. :meth:`from_object` + loads only the uppercase attributes of the module/class. A ``dict`` + object will not work with :meth:`from_object` because the keys of a + ``dict`` are not attributes of the ``dict`` class. + + Example of module-based configuration:: + + app.config.from_object('yourapplication.default_config') + from yourapplication import default_config + app.config.from_object(default_config) + + You should not use this function to load the actual configuration but + rather configuration defaults. The actual config should be loaded + with :meth:`from_pyfile` and ideally from a location not within the + package because the package might be installed system wide. + + See :ref:`config-dev-prod` for an example of class-based configuration + using :meth:`from_object`. + + :param obj: an import name or object + """ + if isinstance(obj, string_types): + obj = import_string(obj) + for key in dir(obj): + if key.isupper(): + self[key] = getattr(obj, key) + + def from_json(self, filename, silent=False): + """Updates the values in the config from a JSON file. This function + behaves as if the JSON object was a dictionary and passed to the + :meth:`from_mapping` function. + + :param filename: the filename of the JSON file. This can either be an + absolute filename or a filename relative to the + root path. + :param silent: set to ``True`` if you want silent failure for missing + files. + + .. versionadded:: 0.11 + """ + filename = os.path.join(self.root_path, filename) + + try: + with open(filename) as json_file: + obj = json.loads(json_file.read()) + except IOError as e: + if silent and e.errno in (errno.ENOENT, errno.EISDIR): + return False + e.strerror = 'Unable to load configuration file (%s)' % e.strerror + raise + return self.from_mapping(obj) + + def from_mapping(self, *mapping, **kwargs): + """Updates the config like :meth:`update` ignoring items with non-upper + keys. + + .. versionadded:: 0.11 + """ + mappings = [] + if len(mapping) == 1: + if hasattr(mapping[0], 'items'): + mappings.append(mapping[0].items()) + else: + mappings.append(mapping[0]) + elif len(mapping) > 1: + raise TypeError( + 'expected at most 1 positional argument, got %d' % len(mapping) + ) + mappings.append(kwargs.items()) + for mapping in mappings: + for (key, value) in mapping: + if key.isupper(): + self[key] = value + return True + + def get_namespace(self, namespace, lowercase=True, trim_namespace=True): + """Returns a dictionary containing a subset of configuration options + that match the specified namespace/prefix. Example usage:: + + app.config['IMAGE_STORE_TYPE'] = 'fs' + app.config['IMAGE_STORE_PATH'] = '/var/app/images' + app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' + image_store_config = app.config.get_namespace('IMAGE_STORE_') + + The resulting dictionary `image_store_config` would look like:: + + { + 'type': 'fs', + 'path': '/var/app/images', + 'base_url': 'http://img.website.com' + } + + This is often useful when configuration options map directly to + keyword arguments in functions or class constructors. + + :param namespace: a configuration namespace + :param lowercase: a flag indicating if the keys of the resulting + dictionary should be lowercase + :param trim_namespace: a flag indicating if the keys of the resulting + dictionary should not include the namespace + + .. versionadded:: 0.11 + """ + rv = {} + for k, v in iteritems(self): + if not k.startswith(namespace): + continue + if trim_namespace: + key = k[len(namespace):] + else: + key = k + if lowercase: + key = key.lower() + rv[key] = v + return rv + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/src/lib/flask/ctx.py b/app/lib/flask/ctx.py similarity index 81% rename from src/lib/flask/ctx.py rename to app/lib/flask/ctx.py index f134237..480d9c5 100644 --- a/src/lib/flask/ctx.py +++ b/app/lib/flask/ctx.py @@ -5,20 +5,22 @@ Implements the objects required to keep the context. - :copyright: (c) 2011 by Armin Ronacher. + :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ -from __future__ import with_statement - import sys from functools import update_wrapper from werkzeug.exceptions import HTTPException from .globals import _request_ctx_stack, _app_ctx_stack -from .module import blueprint_is_module from .signals import appcontext_pushed, appcontext_popped +from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise + + +# a singleton sentinel value for parameter defaults +_sentinel = object() class _AppCtxGlobals(object): @@ -27,6 +29,15 @@ class _AppCtxGlobals(object): def get(self, name, default=None): return self.__dict__.get(name, default) + def pop(self, name, default=_sentinel): + if default is _sentinel: + return self.__dict__.pop(name) + else: + return self.__dict__.pop(name, default) + + def setdefault(self, name, default=None): + return self.__dict__.setdefault(name, default) + def __contains__(self, item): return item in self.__dict__ @@ -163,17 +174,21 @@ def __init__(self, app): def push(self): """Binds the app context to the current context.""" self._refcnt += 1 + if hasattr(sys, 'exc_clear'): + sys.exc_clear() _app_ctx_stack.push(self) appcontext_pushed.send(self.app) - def pop(self, exc=None): + def pop(self, exc=_sentinel): """Pops the app context.""" - self._refcnt -= 1 - if self._refcnt <= 0: - if exc is None: - exc = sys.exc_info()[1] - self.app.do_teardown_appcontext(exc) - rv = _app_ctx_stack.pop() + try: + self._refcnt -= 1 + if self._refcnt <= 0: + if exc is _sentinel: + exc = sys.exc_info()[1] + self.app.do_teardown_appcontext(exc) + finally: + rv = _app_ctx_stack.pop() assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ % (rv, self) appcontext_popped.send(self.app) @@ -185,6 +200,9 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, tb): self.pop(exc_value) + if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: + reraise(exc_type, exc_value, tb) + class RequestContext(object): """The request context contains all request relevant information. It is @@ -204,8 +222,8 @@ class RequestContext(object): for you. In debug mode the request context is kept around if exceptions happen so that interactive debuggers have a chance to introspect the data. With 0.4 this can also be forced for requests - that did not fail and outside of `DEBUG` mode. By setting - ``'flask._preserve_context'`` to `True` on the WSGI environment the + that did not fail and outside of ``DEBUG`` mode. By setting + ``'flask._preserve_context'`` to ``True`` on the WSGI environment the context will not pop itself at the end of the request. This is used by the :meth:`~flask.Flask.test_client` for example to implement the deferred cleanup functionality. @@ -246,16 +264,6 @@ def __init__(self, app, environ, request=None): self.match_request() - # XXX: Support for deprecated functionality. This is going away with - # Flask 1.0 - blueprint = self.request.blueprint - if blueprint is not None: - # better safe than sorry, we don't want to break code that - # already worked - bp = app.blueprints.get(blueprint) - if bp is not None and blueprint_is_module(bp): - self.request._is_old_module = True - def _get_g(self): return _app_ctx_stack.top.g def _set_g(self, value): @@ -296,7 +304,7 @@ def push(self): # information under debug situations. However if someone forgets to # pop that context again we want to make sure that on the next push # it's invalidated, otherwise we run at risk that something leaks - # memory. This is usually only a problem in testsuite since this + # memory. This is usually only a problem in test suite since this # functionality is not active in production environments. top = _request_ctx_stack.top if top is not None and top.preserved: @@ -312,6 +320,9 @@ def push(self): else: self._implicit_app_ctx_stack.append(None) + if hasattr(sys, 'exc_clear'): + sys.exc_clear() + _request_ctx_stack.push(self) # Open the session at the moment that the request context is @@ -322,7 +333,7 @@ def push(self): if self.session is None: self.session = self.app.make_null_session() - def pop(self, exc=None): + def pop(self, exc=_sentinel): """Pops the request context and unbinds it by doing that. This will also trigger the execution of functions registered by the :meth:`~flask.Flask.teardown_request` decorator. @@ -332,38 +343,40 @@ def pop(self, exc=None): """ app_ctx = self._implicit_app_ctx_stack.pop() - clear_request = False - if not self._implicit_app_ctx_stack: - self.preserved = False - self._preserved_exc = None - if exc is None: - exc = sys.exc_info()[1] - self.app.do_teardown_request(exc) - - # If this interpreter supports clearing the exception information - # we do that now. This will only go into effect on Python 2.x, - # on 3.x it disappears automatically at the end of the exception - # stack. - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - - request_close = getattr(self.request, 'close', None) - if request_close is not None: - request_close() - clear_request = True - - rv = _request_ctx_stack.pop() - assert rv is self, 'Popped wrong request context. (%r instead of %r)' \ - % (rv, self) - - # get rid of circular dependencies at the end of the request - # so that we don't require the GC to be active. - if clear_request: - rv.request.environ['werkzeug.request'] = None - - # Get rid of the app as well if necessary. - if app_ctx is not None: - app_ctx.pop(exc) + try: + clear_request = False + if not self._implicit_app_ctx_stack: + self.preserved = False + self._preserved_exc = None + if exc is _sentinel: + exc = sys.exc_info()[1] + self.app.do_teardown_request(exc) + + # If this interpreter supports clearing the exception information + # we do that now. This will only go into effect on Python 2.x, + # on 3.x it disappears automatically at the end of the exception + # stack. + if hasattr(sys, 'exc_clear'): + sys.exc_clear() + + request_close = getattr(self.request, 'close', None) + if request_close is not None: + request_close() + clear_request = True + finally: + rv = _request_ctx_stack.pop() + + # get rid of circular dependencies at the end of the request + # so that we don't require the GC to be active. + if clear_request: + rv.request.environ['werkzeug.request'] = None + + # Get rid of the app as well if necessary. + if app_ctx is not None: + app_ctx.pop(exc) + + assert rv is self, 'Popped wrong request context. ' \ + '(%r instead of %r)' % (rv, self) def auto_pop(self, exc): if self.request.environ.get('flask._preserve_context') or \ @@ -385,6 +398,9 @@ def __exit__(self, exc_type, exc_value, tb): # See flask.testing for how this works. self.auto_pop(exc_value) + if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: + reraise(exc_type, exc_value, tb) + def __repr__(self): return '<%s \'%s\' [%s] of %s>' % ( self.__class__.__name__, diff --git a/app/lib/flask/debughelpers.py b/app/lib/flask/debughelpers.py new file mode 100644 index 0000000..90710dd --- /dev/null +++ b/app/lib/flask/debughelpers.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +""" + flask.debughelpers + ~~~~~~~~~~~~~~~~~~ + + Various helpers to make the development experience better. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +from ._compat import implements_to_string, text_type +from .app import Flask +from .blueprints import Blueprint +from .globals import _request_ctx_stack + + +class UnexpectedUnicodeError(AssertionError, UnicodeError): + """Raised in places where we want some better error reporting for + unexpected unicode or binary data. + """ + + +@implements_to_string +class DebugFilesKeyError(KeyError, AssertionError): + """Raised from request.files during debugging. The idea is that it can + provide a better error message than just a generic KeyError/BadRequest. + """ + + def __init__(self, request, key): + form_matches = request.form.getlist(key) + buf = ['You tried to access the file "%s" in the request.files ' + 'dictionary but it does not exist. The mimetype for the request ' + 'is "%s" instead of "multipart/form-data" which means that no ' + 'file contents were transmitted. To fix this error you should ' + 'provide enctype="multipart/form-data" in your form.' % + (key, request.mimetype)] + if form_matches: + buf.append('\n\nThe browser instead transmitted some file names. ' + 'This was submitted: %s' % ', '.join('"%s"' % x + for x in form_matches)) + self.msg = ''.join(buf) + + def __str__(self): + return self.msg + + +class FormDataRoutingRedirect(AssertionError): + """This exception is raised by Flask in debug mode if it detects a + redirect caused by the routing system when the request method is not + GET, HEAD or OPTIONS. Reasoning: form data will be dropped. + """ + + def __init__(self, request): + exc = request.routing_exception + buf = ['A request was sent to this URL (%s) but a redirect was ' + 'issued automatically by the routing system to "%s".' + % (request.url, exc.new_url)] + + # In case just a slash was appended we can be extra helpful + if request.base_url + '/' == exc.new_url.split('?')[0]: + buf.append(' The URL was defined with a trailing slash so ' + 'Flask will automatically redirect to the URL ' + 'with the trailing slash if it was accessed ' + 'without one.') + + buf.append(' Make sure to directly send your %s-request to this URL ' + 'since we can\'t make browsers or HTTP clients redirect ' + 'with form data reliably or without user interaction.' % + request.method) + buf.append('\n\nNote: this exception is only raised in debug mode') + AssertionError.__init__(self, ''.join(buf).encode('utf-8')) + + +def attach_enctype_error_multidict(request): + """Since Flask 0.8 we're monkeypatching the files object in case a + request is detected that does not use multipart form data but the files + object is accessed. + """ + oldcls = request.files.__class__ + class newcls(oldcls): + def __getitem__(self, key): + try: + return oldcls.__getitem__(self, key) + except KeyError: + if key not in request.form: + raise + raise DebugFilesKeyError(request, key) + newcls.__name__ = oldcls.__name__ + newcls.__module__ = oldcls.__module__ + request.files.__class__ = newcls + + +def _dump_loader_info(loader): + yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__) + for key, value in sorted(loader.__dict__.items()): + if key.startswith('_'): + continue + if isinstance(value, (tuple, list)): + if not all(isinstance(x, (str, text_type)) for x in value): + continue + yield '%s:' % key + for item in value: + yield ' - %s' % item + continue + elif not isinstance(value, (str, text_type, int, float, bool)): + continue + yield '%s: %r' % (key, value) + + +def explain_template_loading_attempts(app, template, attempts): + """This should help developers understand what failed""" + info = ['Locating template "%s":' % template] + total_found = 0 + blueprint = None + reqctx = _request_ctx_stack.top + if reqctx is not None and reqctx.request.blueprint is not None: + blueprint = reqctx.request.blueprint + + for idx, (loader, srcobj, triple) in enumerate(attempts): + if isinstance(srcobj, Flask): + src_info = 'application "%s"' % srcobj.import_name + elif isinstance(srcobj, Blueprint): + src_info = 'blueprint "%s" (%s)' % (srcobj.name, + srcobj.import_name) + else: + src_info = repr(srcobj) + + info.append('% 5d: trying loader of %s' % ( + idx + 1, src_info)) + + for line in _dump_loader_info(loader): + info.append(' %s' % line) + + if triple is None: + detail = 'no match' + else: + detail = 'found (%r)' % (triple[1] or '') + total_found += 1 + info.append(' -> %s' % detail) + + seems_fishy = False + if total_found == 0: + info.append('Error: the template could not be found.') + seems_fishy = True + elif total_found > 1: + info.append('Warning: multiple loaders returned a match for the template.') + seems_fishy = True + + if blueprint is not None and seems_fishy: + info.append(' The template was looked up from an endpoint that ' + 'belongs to the blueprint "%s".' % blueprint) + info.append(' Maybe you did not place a template in the right folder?') + info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') + + app.logger.info('\n'.join(info)) diff --git a/app/lib/flask/ext/__init__.py b/app/lib/flask/ext/__init__.py new file mode 100644 index 0000000..051f44a --- /dev/null +++ b/app/lib/flask/ext/__init__.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +""" + flask.ext + ~~~~~~~~~ + + Redirect imports for extensions. This module basically makes it possible + for us to transition from flaskext.foo to flask_foo without having to + force all extensions to upgrade at the same time. + + When a user does ``from flask.ext.foo import bar`` it will attempt to + import ``from flask_foo import bar`` first and when that fails it will + try to import ``from flaskext.foo import bar``. + + We're switching from namespace packages because it was just too painful for + everybody involved. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + + +def setup(): + from ..exthook import ExtensionImporter + importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__) + importer.install() + + +setup() +del setup diff --git a/src/lib/flask/exthook.py b/app/lib/flask/exthook.py similarity index 85% rename from src/lib/flask/exthook.py rename to app/lib/flask/exthook.py index d0d814c..d884280 100644 --- a/src/lib/flask/exthook.py +++ b/app/lib/flask/exthook.py @@ -16,14 +16,21 @@ This is used by `flask.ext`. - :copyright: (c) 2011 by Armin Ronacher. + :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import sys import os +import warnings from ._compat import reraise +class ExtDeprecationWarning(DeprecationWarning): + pass + +warnings.simplefilter('always', ExtDeprecationWarning) + + class ExtensionImporter(object): """This importer redirects imports from this submodule to other locations. This makes it possible to transition from the old flaskext.name to the @@ -49,13 +56,21 @@ def install(self): sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self] def find_module(self, fullname, path=None): - if fullname.startswith(self.prefix): + if fullname.startswith(self.prefix) and \ + fullname != 'flask.ext.ExtDeprecationWarning': return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] + modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff] + + warnings.warn( + "Importing flask.ext.{x} is deprecated, use flask_{x} instead." + .format(x=modname), ExtDeprecationWarning, stacklevel=2 + ) + for path in self.module_choices: realname = path % modname try: @@ -83,6 +98,14 @@ def load_module(self, fullname): module = sys.modules[fullname] = sys.modules[realname] if '.' not in modname: setattr(sys.modules[self.wrapper_module], modname, module) + + if realname.startswith('flaskext.'): + warnings.warn( + "Detected extension named flaskext.{x}, please rename it " + "to flask_{x}. The old form is deprecated." + .format(x=modname), ExtDeprecationWarning + ) + return module raise ImportError('No module named %s' % fullname) @@ -111,7 +134,7 @@ def is_important_frame(self, important_module, tb): if module_name == important_module: return True - # Some python versions will will clean up modules so early that the + # Some python versions will clean up modules so early that the # module name at that point is no longer set. Try guessing from # the filename then. filename = os.path.abspath(tb.tb_frame.f_code.co_filename) diff --git a/app/lib/flask/globals.py b/app/lib/flask/globals.py new file mode 100644 index 0000000..0b70a3e --- /dev/null +++ b/app/lib/flask/globals.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +""" + flask.globals + ~~~~~~~~~~~~~ + + Defines all the global objects that are proxies to the current + active context. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from functools import partial +from werkzeug.local import LocalStack, LocalProxy + + +_request_ctx_err_msg = '''\ +Working outside of request context. + +This typically means that you attempted to use functionality that needed +an active HTTP request. Consult the documentation on testing for +information about how to avoid this problem.\ +''' +_app_ctx_err_msg = '''\ +Working outside of application context. + +This typically means that you attempted to use functionality that needed +to interface with the current application object in a way. To solve +this set up an application context with app.app_context(). See the +documentation for more information.\ +''' + + +def _lookup_req_object(name): + top = _request_ctx_stack.top + if top is None: + raise RuntimeError(_request_ctx_err_msg) + return getattr(top, name) + + +def _lookup_app_object(name): + top = _app_ctx_stack.top + if top is None: + raise RuntimeError(_app_ctx_err_msg) + return getattr(top, name) + + +def _find_app(): + top = _app_ctx_stack.top + if top is None: + raise RuntimeError(_app_ctx_err_msg) + return top.app + + +# context locals +_request_ctx_stack = LocalStack() +_app_ctx_stack = LocalStack() +current_app = LocalProxy(_find_app) +request = LocalProxy(partial(_lookup_req_object, 'request')) +session = LocalProxy(partial(_lookup_req_object, 'session')) +g = LocalProxy(partial(_lookup_app_object, 'g')) diff --git a/app/lib/flask/helpers.py b/app/lib/flask/helpers.py new file mode 100644 index 0000000..c6c2cdd --- /dev/null +++ b/app/lib/flask/helpers.py @@ -0,0 +1,960 @@ +# -*- coding: utf-8 -*- +""" + flask.helpers + ~~~~~~~~~~~~~ + + Implements various helpers. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import sys +import pkgutil +import posixpath +import mimetypes +from time import time +from zlib import adler32 +from threading import RLock +from werkzeug.routing import BuildError +from functools import update_wrapper + +try: + from werkzeug.urls import url_quote +except ImportError: + from urlparse import quote as url_quote + +from werkzeug.datastructures import Headers, Range +from werkzeug.exceptions import BadRequest, NotFound, \ + RequestedRangeNotSatisfiable + +# this was moved in 0.7 +try: + from werkzeug.wsgi import wrap_file +except ImportError: + from werkzeug.utils import wrap_file + +from jinja2 import FileSystemLoader + +from .signals import message_flashed +from .globals import session, _request_ctx_stack, _app_ctx_stack, \ + current_app, request +from ._compat import string_types, text_type + + +# sentinel +_missing = object() + + +# what separators does this operating system provide that are not a slash? +# this is used by the send_from_directory function to ensure that nobody is +# able to access files from outside the filesystem. +_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] + if sep not in (None, '/')) + + +def get_debug_flag(default=None): + val = os.environ.get('FLASK_DEBUG') + if not val: + return default + return val not in ('0', 'false', 'no') + + +def _endpoint_from_view_func(view_func): + """Internal helper that returns the default endpoint for a given + function. This always is the function name. + """ + assert view_func is not None, 'expected view func if endpoint ' \ + 'is not provided.' + return view_func.__name__ + + +def stream_with_context(generator_or_function): + """Request contexts disappear when the response is started on the server. + This is done for efficiency reasons and to make it less likely to encounter + memory leaks with badly written WSGI middlewares. The downside is that if + you are using streamed responses, the generator cannot access request bound + information any more. + + This function however can help you keep the context around for longer:: + + from flask import stream_with_context, request, Response + + @app.route('/stream') + def streamed_response(): + @stream_with_context + def generate(): + yield 'Hello ' + yield request.args['name'] + yield '!' + return Response(generate()) + + Alternatively it can also be used around a specific generator:: + + from flask import stream_with_context, request, Response + + @app.route('/stream') + def streamed_response(): + def generate(): + yield 'Hello ' + yield request.args['name'] + yield '!' + return Response(stream_with_context(generate())) + + .. versionadded:: 0.9 + """ + try: + gen = iter(generator_or_function) + except TypeError: + def decorator(*args, **kwargs): + gen = generator_or_function(*args, **kwargs) + return stream_with_context(gen) + return update_wrapper(decorator, generator_or_function) + + def generator(): + ctx = _request_ctx_stack.top + if ctx is None: + raise RuntimeError('Attempted to stream with context but ' + 'there was no context in the first place to keep around.') + with ctx: + # Dummy sentinel. Has to be inside the context block or we're + # not actually keeping the context around. + yield None + + # The try/finally is here so that if someone passes a WSGI level + # iterator in we're still running the cleanup logic. Generators + # don't need that because they are closed on their destruction + # automatically. + try: + for item in gen: + yield item + finally: + if hasattr(gen, 'close'): + gen.close() + + # The trick is to start the generator. Then the code execution runs until + # the first dummy None is yielded at which point the context was already + # pushed. This item is discarded. Then when the iteration continues the + # real generator is executed. + wrapped_g = generator() + next(wrapped_g) + return wrapped_g + + +def make_response(*args): + """Sometimes it is necessary to set additional headers in a view. Because + views do not have to return response objects but can return a value that + is converted into a response object by Flask itself, it becomes tricky to + add headers to it. This function can be called instead of using a return + and you will get a response object which you can use to attach headers. + + If view looked like this and you want to add a new header:: + + def index(): + return render_template('index.html', foo=42) + + You can now do something like this:: + + def index(): + response = make_response(render_template('index.html', foo=42)) + response.headers['X-Parachutes'] = 'parachutes are cool' + return response + + This function accepts the very same arguments you can return from a + view function. This for example creates a response with a 404 error + code:: + + response = make_response(render_template('not_found.html'), 404) + + The other use case of this function is to force the return value of a + view function into a response which is helpful with view + decorators:: + + response = make_response(view_function()) + response.headers['X-Parachutes'] = 'parachutes are cool' + + Internally this function does the following things: + + - if no arguments are passed, it creates a new response argument + - if one argument is passed, :meth:`flask.Flask.make_response` + is invoked with it. + - if more than one argument is passed, the arguments are passed + to the :meth:`flask.Flask.make_response` function as tuple. + + .. versionadded:: 0.6 + """ + if not args: + return current_app.response_class() + if len(args) == 1: + args = args[0] + return current_app.make_response(args) + + +def url_for(endpoint, **values): + """Generates a URL to the given endpoint with the method provided. + + Variable arguments that are unknown to the target endpoint are appended + to the generated URL as query arguments. If the value of a query argument + is ``None``, the whole pair is skipped. In case blueprints are active + you can shortcut references to the same blueprint by prefixing the + local endpoint with a dot (``.``). + + This will reference the index function local to the current blueprint:: + + url_for('.index') + + For more information, head over to the :ref:`Quickstart `. + + To integrate applications, :class:`Flask` has a hook to intercept URL build + errors through :attr:`Flask.url_build_error_handlers`. The `url_for` + function results in a :exc:`~werkzeug.routing.BuildError` when the current + app does not have a URL for the given endpoint and values. When it does, the + :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if + it is not ``None``, which can return a string to use as the result of + `url_for` (instead of `url_for`'s default to raise the + :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. + An example:: + + def external_url_handler(error, endpoint, values): + "Looks up an external URL when `url_for` cannot build a URL." + # This is an example of hooking the build_error_handler. + # Here, lookup_url is some utility function you've built + # which looks up the endpoint in some external URL registry. + url = lookup_url(endpoint, **values) + if url is None: + # External lookup did not have a URL. + # Re-raise the BuildError, in context of original traceback. + exc_type, exc_value, tb = sys.exc_info() + if exc_value is error: + raise exc_type, exc_value, tb + else: + raise error + # url_for will use this result, instead of raising BuildError. + return url + + app.url_build_error_handlers.append(external_url_handler) + + Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and + `endpoint` and `values` are the arguments passed into `url_for`. Note + that this is for building URLs outside the current application, and not for + handling 404 NotFound errors. + + .. versionadded:: 0.10 + The `_scheme` parameter was added. + + .. versionadded:: 0.9 + The `_anchor` and `_method` parameters were added. + + .. versionadded:: 0.9 + Calls :meth:`Flask.handle_build_error` on + :exc:`~werkzeug.routing.BuildError`. + + :param endpoint: the endpoint of the URL (name of the function) + :param values: the variable arguments of the URL rule + :param _external: if set to ``True``, an absolute URL is generated. Server + address can be changed via ``SERVER_NAME`` configuration variable which + defaults to `localhost`. + :param _scheme: a string specifying the desired URL scheme. The `_external` + parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default + behavior uses the same scheme as the current request, or + ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no + request context is available. As of Werkzeug 0.10, this also can be set + to an empty string to build protocol-relative URLs. + :param _anchor: if provided this is added as anchor to the URL. + :param _method: if provided this explicitly specifies an HTTP method. + """ + appctx = _app_ctx_stack.top + reqctx = _request_ctx_stack.top + if appctx is None: + raise RuntimeError('Attempted to generate a URL without the ' + 'application context being pushed. This has to be ' + 'executed when application context is available.') + + # If request specific information is available we have some extra + # features that support "relative" URLs. + if reqctx is not None: + url_adapter = reqctx.url_adapter + blueprint_name = request.blueprint + if not reqctx.request._is_old_module: + if endpoint[:1] == '.': + if blueprint_name is not None: + endpoint = blueprint_name + endpoint + else: + endpoint = endpoint[1:] + else: + # TODO: get rid of this deprecated functionality in 1.0 + if '.' not in endpoint: + if blueprint_name is not None: + endpoint = blueprint_name + '.' + endpoint + elif endpoint.startswith('.'): + endpoint = endpoint[1:] + external = values.pop('_external', False) + + # Otherwise go with the url adapter from the appctx and make + # the URLs external by default. + else: + url_adapter = appctx.url_adapter + if url_adapter is None: + raise RuntimeError('Application was not able to create a URL ' + 'adapter for request independent URL generation. ' + 'You might be able to fix this by setting ' + 'the SERVER_NAME config variable.') + external = values.pop('_external', True) + + anchor = values.pop('_anchor', None) + method = values.pop('_method', None) + scheme = values.pop('_scheme', None) + appctx.app.inject_url_defaults(endpoint, values) + + # This is not the best way to deal with this but currently the + # underlying Werkzeug router does not support overriding the scheme on + # a per build call basis. + old_scheme = None + if scheme is not None: + if not external: + raise ValueError('When specifying _scheme, _external must be True') + old_scheme = url_adapter.url_scheme + url_adapter.url_scheme = scheme + + try: + try: + rv = url_adapter.build(endpoint, values, method=method, + force_external=external) + finally: + if old_scheme is not None: + url_adapter.url_scheme = old_scheme + except BuildError as error: + # We need to inject the values again so that the app callback can + # deal with that sort of stuff. + values['_external'] = external + values['_anchor'] = anchor + values['_method'] = method + return appctx.app.handle_url_build_error(error, endpoint, values) + + if anchor is not None: + rv += '#' + url_quote(anchor) + return rv + + +def get_template_attribute(template_name, attribute): + """Loads a macro (or variable) a template exports. This can be used to + invoke a macro from within Python code. If you for example have a + template named :file:`_cider.html` with the following contents: + + .. sourcecode:: html+jinja + + {% macro hello(name) %}Hello {{ name }}!{% endmacro %} + + You can access this from Python code like this:: + + hello = get_template_attribute('_cider.html', 'hello') + return hello('World') + + .. versionadded:: 0.2 + + :param template_name: the name of the template + :param attribute: the name of the variable of macro to access + """ + return getattr(current_app.jinja_env.get_template(template_name).module, + attribute) + + +def flash(message, category='message'): + """Flashes a message to the next request. In order to remove the + flashed message from the session and to display it to the user, + the template has to call :func:`get_flashed_messages`. + + .. versionchanged:: 0.3 + `category` parameter added. + + :param message: the message to be flashed. + :param category: the category for the message. The following values + are recommended: ``'message'`` for any kind of message, + ``'error'`` for errors, ``'info'`` for information + messages and ``'warning'`` for warnings. However any + kind of string can be used as category. + """ + # Original implementation: + # + # session.setdefault('_flashes', []).append((category, message)) + # + # This assumed that changes made to mutable structures in the session are + # are always in sync with the session object, which is not true for session + # implementations that use external storage for keeping their keys/values. + flashes = session.get('_flashes', []) + flashes.append((category, message)) + session['_flashes'] = flashes + message_flashed.send(current_app._get_current_object(), + message=message, category=category) + + +def get_flashed_messages(with_categories=False, category_filter=[]): + """Pulls all flashed messages from the session and returns them. + Further calls in the same request to the function will return + the same messages. By default just the messages are returned, + but when `with_categories` is set to ``True``, the return value will + be a list of tuples in the form ``(category, message)`` instead. + + Filter the flashed messages to one or more categories by providing those + categories in `category_filter`. This allows rendering categories in + separate html blocks. The `with_categories` and `category_filter` + arguments are distinct: + + * `with_categories` controls whether categories are returned with message + text (``True`` gives a tuple, where ``False`` gives just the message text). + * `category_filter` filters the messages down to only those matching the + provided categories. + + See :ref:`message-flashing-pattern` for examples. + + .. versionchanged:: 0.3 + `with_categories` parameter added. + + .. versionchanged:: 0.9 + `category_filter` parameter added. + + :param with_categories: set to ``True`` to also receive categories. + :param category_filter: whitelist of categories to limit return values + """ + flashes = _request_ctx_stack.top.flashes + if flashes is None: + _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ + if '_flashes' in session else [] + if category_filter: + flashes = list(filter(lambda f: f[0] in category_filter, flashes)) + if not with_categories: + return [x[1] for x in flashes] + return flashes + + +def send_file(filename_or_fp, mimetype=None, as_attachment=False, + attachment_filename=None, add_etags=True, + cache_timeout=None, conditional=False, last_modified=None): + """Sends the contents of a file to the client. This will use the + most efficient method available and configured. By default it will + try to use the WSGI server's file_wrapper support. Alternatively + you can set the application's :attr:`~Flask.use_x_sendfile` attribute + to ``True`` to directly emit an ``X-Sendfile`` header. This however + requires support of the underlying webserver for ``X-Sendfile``. + + By default it will try to guess the mimetype for you, but you can + also explicitly provide one. For extra security you probably want + to send certain files as attachment (HTML for instance). The mimetype + guessing requires a `filename` or an `attachment_filename` to be + provided. + + ETags will also be attached automatically if a `filename` is provided. You + can turn this off by setting `add_etags=False`. + + If `conditional=True` and `filename` is provided, this method will try to + upgrade the response stream to support range requests. This will allow + the request to be answered with partial content response. + + Please never pass filenames to this function from user sources; + you should use :func:`send_from_directory` instead. + + .. versionadded:: 0.2 + + .. versionadded:: 0.5 + The `add_etags`, `cache_timeout` and `conditional` parameters were + added. The default behavior is now to attach etags. + + .. versionchanged:: 0.7 + mimetype guessing and etag support for file objects was + deprecated because it was unreliable. Pass a filename if you are + able to, otherwise attach an etag yourself. This functionality + will be removed in Flask 1.0 + + .. versionchanged:: 0.9 + cache_timeout pulls its default from application config, when None. + + .. versionchanged:: 0.12 + The filename is no longer automatically inferred from file objects. If + you want to use automatic mimetype and etag support, pass a filepath via + `filename_or_fp` or `attachment_filename`. + + .. versionchanged:: 0.12 + The `attachment_filename` is preferred over `filename` for MIME-type + detection. + + :param filename_or_fp: the filename of the file to send in `latin-1`. + This is relative to the :attr:`~Flask.root_path` + if a relative path is specified. + Alternatively a file object might be provided in + which case ``X-Sendfile`` might not work and fall + back to the traditional method. Make sure that the + file pointer is positioned at the start of data to + send before calling :func:`send_file`. + :param mimetype: the mimetype of the file if provided. If a file path is + given, auto detection happens as fallback, otherwise an + error will be raised. + :param as_attachment: set to ``True`` if you want to send this file with + a ``Content-Disposition: attachment`` header. + :param attachment_filename: the filename for the attachment if it + differs from the file's filename. + :param add_etags: set to ``False`` to disable attaching of etags. + :param conditional: set to ``True`` to enable conditional responses. + + :param cache_timeout: the timeout in seconds for the headers. When ``None`` + (default), this value is set by + :meth:`~Flask.get_send_file_max_age` of + :data:`~flask.current_app`. + :param last_modified: set the ``Last-Modified`` header to this value, + a :class:`~datetime.datetime` or timestamp. + If a file was passed, this overrides its mtime. + """ + mtime = None + fsize = None + if isinstance(filename_or_fp, string_types): + filename = filename_or_fp + if not os.path.isabs(filename): + filename = os.path.join(current_app.root_path, filename) + file = None + if attachment_filename is None: + attachment_filename = os.path.basename(filename) + else: + file = filename_or_fp + filename = None + + if mimetype is None: + if attachment_filename is not None: + mimetype = mimetypes.guess_type(attachment_filename)[0] \ + or 'application/octet-stream' + + if mimetype is None: + raise ValueError( + 'Unable to infer MIME-type because no filename is available. ' + 'Please set either `attachment_filename`, pass a filepath to ' + '`filename_or_fp` or set your own MIME-type via `mimetype`.' + ) + + headers = Headers() + if as_attachment: + if attachment_filename is None: + raise TypeError('filename unavailable, required for ' + 'sending as attachment') + headers.add('Content-Disposition', 'attachment', + filename=attachment_filename) + + if current_app.use_x_sendfile and filename: + if file is not None: + file.close() + headers['X-Sendfile'] = filename + fsize = os.path.getsize(filename) + headers['Content-Length'] = fsize + data = None + else: + if file is None: + file = open(filename, 'rb') + mtime = os.path.getmtime(filename) + fsize = os.path.getsize(filename) + headers['Content-Length'] = fsize + data = wrap_file(request.environ, file) + + rv = current_app.response_class(data, mimetype=mimetype, headers=headers, + direct_passthrough=True) + + if last_modified is not None: + rv.last_modified = last_modified + elif mtime is not None: + rv.last_modified = mtime + + rv.cache_control.public = True + if cache_timeout is None: + cache_timeout = current_app.get_send_file_max_age(filename) + if cache_timeout is not None: + rv.cache_control.max_age = cache_timeout + rv.expires = int(time() + cache_timeout) + + if add_etags and filename is not None: + from warnings import warn + + try: + rv.set_etag('%s-%s-%s' % ( + os.path.getmtime(filename), + os.path.getsize(filename), + adler32( + filename.encode('utf-8') if isinstance(filename, text_type) + else filename + ) & 0xffffffff + )) + except OSError: + warn('Access %s failed, maybe it does not exist, so ignore etags in ' + 'headers' % filename, stacklevel=2) + + if conditional: + if callable(getattr(Range, 'to_content_range_header', None)): + # Werkzeug supports Range Requests + # Remove this test when support for Werkzeug <0.12 is dropped + try: + rv = rv.make_conditional(request, accept_ranges=True, + complete_length=fsize) + except RequestedRangeNotSatisfiable: + file.close() + raise + else: + rv = rv.make_conditional(request) + # make sure we don't send x-sendfile for servers that + # ignore the 304 status code for x-sendfile. + if rv.status_code == 304: + rv.headers.pop('x-sendfile', None) + return rv + + +def safe_join(directory, *pathnames): + """Safely join `directory` and zero or more untrusted `pathnames` + components. + + Example usage:: + + @app.route('/wiki/') + def wiki_page(filename): + filename = safe_join(app.config['WIKI_FOLDER'], filename) + with open(filename, 'rb') as fd: + content = fd.read() # Read and process the file content... + + :param directory: the trusted base directory. + :param pathnames: the untrusted pathnames relative to that directory. + :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed + paths fall out of its boundaries. + """ + for filename in pathnames: + if filename != '': + filename = posixpath.normpath(filename) + for sep in _os_alt_seps: + if sep in filename: + raise NotFound() + if os.path.isabs(filename) or \ + filename == '..' or \ + filename.startswith('../'): + raise NotFound() + directory = os.path.join(directory, filename) + return directory + + +def send_from_directory(directory, filename, **options): + """Send a file from a given directory with :func:`send_file`. This + is a secure way to quickly expose static files from an upload folder + or something similar. + + Example usage:: + + @app.route('/uploads/') + def download_file(filename): + return send_from_directory(app.config['UPLOAD_FOLDER'], + filename, as_attachment=True) + + .. admonition:: Sending files and Performance + + It is strongly recommended to activate either ``X-Sendfile`` support in + your webserver or (if no authentication happens) to tell the webserver + to serve files for the given path on its own without calling into the + web application for improved performance. + + .. versionadded:: 0.5 + + :param directory: the directory where all the files are stored. + :param filename: the filename relative to that directory to + download. + :param options: optional keyword arguments that are directly + forwarded to :func:`send_file`. + """ + filename = safe_join(directory, filename) + if not os.path.isabs(filename): + filename = os.path.join(current_app.root_path, filename) + try: + if not os.path.isfile(filename): + raise NotFound() + except (TypeError, ValueError): + raise BadRequest() + options.setdefault('conditional', True) + return send_file(filename, **options) + + +def get_root_path(import_name): + """Returns the path to a package or cwd if that cannot be found. This + returns the path of a package or the folder that contains a module. + + Not to be confused with the package path returned by :func:`find_package`. + """ + # Module already imported and has a file attribute. Use that first. + mod = sys.modules.get(import_name) + if mod is not None and hasattr(mod, '__file__'): + return os.path.dirname(os.path.abspath(mod.__file__)) + + # Next attempt: check the loader. + loader = pkgutil.get_loader(import_name) + + # Loader does not exist or we're referring to an unloaded main module + # or a main module without path (interactive sessions), go with the + # current working directory. + if loader is None or import_name == '__main__': + return os.getcwd() + + # For .egg, zipimporter does not have get_filename until Python 2.7. + # Some other loaders might exhibit the same behavior. + if hasattr(loader, 'get_filename'): + filepath = loader.get_filename(import_name) + else: + # Fall back to imports. + __import__(import_name) + mod = sys.modules[import_name] + filepath = getattr(mod, '__file__', None) + + # If we don't have a filepath it might be because we are a + # namespace package. In this case we pick the root path from the + # first module that is contained in our package. + if filepath is None: + raise RuntimeError('No root path can be found for the provided ' + 'module "%s". This can happen because the ' + 'module came from an import hook that does ' + 'not provide file name information or because ' + 'it\'s a namespace package. In this case ' + 'the root path needs to be explicitly ' + 'provided.' % import_name) + + # filepath is import_name.py for a module, or __init__.py for a package. + return os.path.dirname(os.path.abspath(filepath)) + + +def _matching_loader_thinks_module_is_package(loader, mod_name): + """Given the loader that loaded a module and the module this function + attempts to figure out if the given module is actually a package. + """ + # If the loader can tell us if something is a package, we can + # directly ask the loader. + if hasattr(loader, 'is_package'): + return loader.is_package(mod_name) + # importlib's namespace loaders do not have this functionality but + # all the modules it loads are packages, so we can take advantage of + # this information. + elif (loader.__class__.__module__ == '_frozen_importlib' and + loader.__class__.__name__ == 'NamespaceLoader'): + return True + # Otherwise we need to fail with an error that explains what went + # wrong. + raise AttributeError( + ('%s.is_package() method is missing but is required by Flask of ' + 'PEP 302 import hooks. If you do not use import hooks and ' + 'you encounter this error please file a bug against Flask.') % + loader.__class__.__name__) + + +def find_package(import_name): + """Finds a package and returns the prefix (or None if the package is + not installed) as well as the folder that contains the package or + module as a tuple. The package path returned is the module that would + have to be added to the pythonpath in order to make it possible to + import the module. The prefix is the path below which a UNIX like + folder structure exists (lib, share etc.). + """ + root_mod_name = import_name.split('.')[0] + loader = pkgutil.get_loader(root_mod_name) + if loader is None or import_name == '__main__': + # import name is not found, or interactive/main module + package_path = os.getcwd() + else: + # For .egg, zipimporter does not have get_filename until Python 2.7. + if hasattr(loader, 'get_filename'): + filename = loader.get_filename(root_mod_name) + elif hasattr(loader, 'archive'): + # zipimporter's loader.archive points to the .egg or .zip + # archive filename is dropped in call to dirname below. + filename = loader.archive + else: + # At least one loader is missing both get_filename and archive: + # Google App Engine's HardenedModulesHook + # + # Fall back to imports. + __import__(import_name) + filename = sys.modules[import_name].__file__ + package_path = os.path.abspath(os.path.dirname(filename)) + + # In case the root module is a package we need to chop of the + # rightmost part. This needs to go through a helper function + # because of python 3.3 namespace packages. + if _matching_loader_thinks_module_is_package( + loader, root_mod_name): + package_path = os.path.dirname(package_path) + + site_parent, site_folder = os.path.split(package_path) + py_prefix = os.path.abspath(sys.prefix) + if package_path.startswith(py_prefix): + return py_prefix, package_path + elif site_folder.lower() == 'site-packages': + parent, folder = os.path.split(site_parent) + # Windows like installations + if folder.lower() == 'lib': + base_dir = parent + # UNIX like installations + elif os.path.basename(parent).lower() == 'lib': + base_dir = os.path.dirname(parent) + else: + base_dir = site_parent + return base_dir, package_path + return None, package_path + + +class locked_cached_property(object): + """A decorator that converts a function into a lazy property. The + function wrapped is called the first time to retrieve the result + and then that calculated result is used the next time you access + the value. Works like the one in Werkzeug but has a lock for + thread safety. + """ + + def __init__(self, func, name=None, doc=None): + self.__name__ = name or func.__name__ + self.__module__ = func.__module__ + self.__doc__ = doc or func.__doc__ + self.func = func + self.lock = RLock() + + def __get__(self, obj, type=None): + if obj is None: + return self + with self.lock: + value = obj.__dict__.get(self.__name__, _missing) + if value is _missing: + value = self.func(obj) + obj.__dict__[self.__name__] = value + return value + + +class _PackageBoundObject(object): + + def __init__(self, import_name, template_folder=None, root_path=None): + #: The name of the package or module. Do not change this once + #: it was set by the constructor. + self.import_name = import_name + + #: location of the templates. ``None`` if templates should not be + #: exposed. + self.template_folder = template_folder + + if root_path is None: + root_path = get_root_path(self.import_name) + + #: Where is the app root located? + self.root_path = root_path + + self._static_folder = None + self._static_url_path = None + + def _get_static_folder(self): + if self._static_folder is not None: + return os.path.join(self.root_path, self._static_folder) + def _set_static_folder(self, value): + self._static_folder = value + static_folder = property(_get_static_folder, _set_static_folder, doc=''' + The absolute path to the configured static folder. + ''') + del _get_static_folder, _set_static_folder + + def _get_static_url_path(self): + if self._static_url_path is not None: + return self._static_url_path + if self.static_folder is not None: + return '/' + os.path.basename(self.static_folder) + def _set_static_url_path(self, value): + self._static_url_path = value + static_url_path = property(_get_static_url_path, _set_static_url_path) + del _get_static_url_path, _set_static_url_path + + @property + def has_static_folder(self): + """This is ``True`` if the package bound object's container has a + folder for static files. + + .. versionadded:: 0.5 + """ + return self.static_folder is not None + + @locked_cached_property + def jinja_loader(self): + """The Jinja loader for this package bound object. + + .. versionadded:: 0.5 + """ + if self.template_folder is not None: + return FileSystemLoader(os.path.join(self.root_path, + self.template_folder)) + + def get_send_file_max_age(self, filename): + """Provides default cache_timeout for the :func:`send_file` functions. + + By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from + the configuration of :data:`~flask.current_app`. + + Static file functions such as :func:`send_from_directory` use this + function, and :func:`send_file` calls this function on + :data:`~flask.current_app` when the given cache_timeout is ``None``. If a + cache_timeout is given in :func:`send_file`, that timeout is used; + otherwise, this method is called. + + This allows subclasses to change the behavior when sending files based + on the filename. For example, to set the cache timeout for .js files + to 60 seconds:: + + class MyFlask(flask.Flask): + def get_send_file_max_age(self, name): + if name.lower().endswith('.js'): + return 60 + return flask.Flask.get_send_file_max_age(self, name) + + .. versionadded:: 0.9 + """ + return total_seconds(current_app.send_file_max_age_default) + + def send_static_file(self, filename): + """Function used internally to send static files from the static + folder to the browser. + + .. versionadded:: 0.5 + """ + if not self.has_static_folder: + raise RuntimeError('No static folder for this object') + # Ensure get_send_file_max_age is called in all cases. + # Here, we ensure get_send_file_max_age is called for Blueprints. + cache_timeout = self.get_send_file_max_age(filename) + return send_from_directory(self.static_folder, filename, + cache_timeout=cache_timeout) + + def open_resource(self, resource, mode='rb'): + """Opens a resource from the application's resource folder. To see + how this works, consider the following folder structure:: + + /myapplication.py + /schema.sql + /static + /style.css + /templates + /layout.html + /index.html + + If you want to open the :file:`schema.sql` file you would do the + following:: + + with app.open_resource('schema.sql') as f: + contents = f.read() + do_something_with(contents) + + :param resource: the name of the resource. To access resources within + subfolders use forward slashes as separator. + :param mode: resource file opening mode, default is 'rb'. + """ + if mode not in ('r', 'rb'): + raise ValueError('Resources can only be opened for reading') + return open(os.path.join(self.root_path, resource), mode) + + +def total_seconds(td): + """Returns the total seconds from a timedelta object. + + :param timedelta td: the timedelta to be converted in seconds + + :returns: number of seconds + :rtype: int + """ + return td.days * 60 * 60 * 24 + td.seconds diff --git a/app/lib/flask/json.py b/app/lib/flask/json.py new file mode 100644 index 0000000..16e0c29 --- /dev/null +++ b/app/lib/flask/json.py @@ -0,0 +1,269 @@ +# -*- coding: utf-8 -*- +""" + flask.jsonimpl + ~~~~~~~~~~~~~~ + + Implementation helpers for the JSON support in Flask. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import io +import uuid +from datetime import date +from .globals import current_app, request +from ._compat import text_type, PY2 + +from werkzeug.http import http_date +from jinja2 import Markup + +# Use the same json implementation as itsdangerous on which we +# depend anyways. +from itsdangerous import json as _json + + +# Figure out if simplejson escapes slashes. This behavior was changed +# from one version to another without reason. +_slash_escape = '\\/' not in _json.dumps('/') + + +__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump', + 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder', + 'jsonify'] + + +def _wrap_reader_for_text(fp, encoding): + if isinstance(fp.read(0), bytes): + fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) + return fp + + +def _wrap_writer_for_text(fp, encoding): + try: + fp.write('') + except TypeError: + fp = io.TextIOWrapper(fp, encoding) + return fp + + +class JSONEncoder(_json.JSONEncoder): + """The default Flask JSON encoder. This one extends the default simplejson + encoder by also supporting ``datetime`` objects, ``UUID`` as well as + ``Markup`` objects which are serialized as RFC 822 datetime strings (same + as the HTTP date format). In order to support more data types override the + :meth:`default` method. + """ + + def default(self, o): + """Implement this method in a subclass such that it returns a + serializable object for ``o``, or calls the base implementation (to + raise a :exc:`TypeError`). + + For example, to support arbitrary iterators, you could implement + default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + """ + if isinstance(o, date): + return http_date(o.timetuple()) + if isinstance(o, uuid.UUID): + return str(o) + if hasattr(o, '__html__'): + return text_type(o.__html__()) + return _json.JSONEncoder.default(self, o) + + +class JSONDecoder(_json.JSONDecoder): + """The default JSON decoder. This one does not change the behavior from + the default simplejson decoder. Consult the :mod:`json` documentation + for more information. This decoder is not only used for the load + functions of this module but also :attr:`~flask.Request`. + """ + + +def _dump_arg_defaults(kwargs): + """Inject default arguments for dump functions.""" + if current_app: + kwargs.setdefault('cls', current_app.json_encoder) + if not current_app.config['JSON_AS_ASCII']: + kwargs.setdefault('ensure_ascii', False) + kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) + else: + kwargs.setdefault('sort_keys', True) + kwargs.setdefault('cls', JSONEncoder) + + +def _load_arg_defaults(kwargs): + """Inject default arguments for load functions.""" + if current_app: + kwargs.setdefault('cls', current_app.json_decoder) + else: + kwargs.setdefault('cls', JSONDecoder) + + +def dumps(obj, **kwargs): + """Serialize ``obj`` to a JSON formatted ``str`` by using the application's + configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an + application on the stack. + + This function can return ``unicode`` strings or ascii-only bytestrings by + default which coerce into unicode strings automatically. That behavior by + default is controlled by the ``JSON_AS_ASCII`` configuration variable + and can be overridden by the simplejson ``ensure_ascii`` parameter. + """ + _dump_arg_defaults(kwargs) + encoding = kwargs.pop('encoding', None) + rv = _json.dumps(obj, **kwargs) + if encoding is not None and isinstance(rv, text_type): + rv = rv.encode(encoding) + return rv + + +def dump(obj, fp, **kwargs): + """Like :func:`dumps` but writes into a file object.""" + _dump_arg_defaults(kwargs) + encoding = kwargs.pop('encoding', None) + if encoding is not None: + fp = _wrap_writer_for_text(fp, encoding) + _json.dump(obj, fp, **kwargs) + + +def loads(s, **kwargs): + """Unserialize a JSON object from a string ``s`` by using the application's + configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an + application on the stack. + """ + _load_arg_defaults(kwargs) + if isinstance(s, bytes): + s = s.decode(kwargs.pop('encoding', None) or 'utf-8') + return _json.loads(s, **kwargs) + + +def load(fp, **kwargs): + """Like :func:`loads` but reads from a file object. + """ + _load_arg_defaults(kwargs) + if not PY2: + fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') + return _json.load(fp, **kwargs) + + +def htmlsafe_dumps(obj, **kwargs): + """Works exactly like :func:`dumps` but is safe for use in ``' + self.assertEqual( + r'"\u003c/script\u003e\u003cscript\u003e' + r'alert(\"gotcha\")\u003c/script\u003e"', + self.encoder.encode(bad_string)) + self.assertEqual( + bad_string, self.decoder.decode( + self.encoder.encode(bad_string))) diff --git a/app/lib/simplejson/tests/test_errors.py b/app/lib/simplejson/tests/test_errors.py new file mode 100644 index 0000000..8dede38 --- /dev/null +++ b/app/lib/simplejson/tests/test_errors.py @@ -0,0 +1,51 @@ +import sys, pickle +from unittest import TestCase + +import simplejson as json +from simplejson.compat import u, b + +class TestErrors(TestCase): + def test_string_keys_error(self): + data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}] + self.assertRaises(TypeError, json.dumps, data) + + def test_decode_error(self): + err = None + try: + json.loads('{}\na\nb') + except json.JSONDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected JSONDecodeError') + self.assertEqual(err.lineno, 2) + self.assertEqual(err.colno, 1) + self.assertEqual(err.endlineno, 3) + self.assertEqual(err.endcolno, 2) + + def test_scan_error(self): + err = None + for t in (u, b): + try: + json.loads(t('{"asdf": "')) + except json.JSONDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected JSONDecodeError') + self.assertEqual(err.lineno, 1) + self.assertEqual(err.colno, 10) + + def test_error_is_pickable(self): + err = None + try: + json.loads('{}\na\nb') + except json.JSONDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected JSONDecodeError') + s = pickle.dumps(err) + e = pickle.loads(s) + + self.assertEqual(err.msg, e.msg) + self.assertEqual(err.doc, e.doc) + self.assertEqual(err.pos, e.pos) + self.assertEqual(err.end, e.end) diff --git a/app/lib/simplejson/tests/test_fail.py b/app/lib/simplejson/tests/test_fail.py new file mode 100644 index 0000000..788f3a5 --- /dev/null +++ b/app/lib/simplejson/tests/test_fail.py @@ -0,0 +1,176 @@ +import sys +from unittest import TestCase + +import simplejson as json + +# 2007-10-05 +JSONDOCS = [ + # http://json.org/JSON_checker/test/fail1.json + '"A JSON payload should be an object or array, not a string."', + # http://json.org/JSON_checker/test/fail2.json + '["Unclosed array"', + # http://json.org/JSON_checker/test/fail3.json + '{unquoted_key: "keys must be quoted"}', + # http://json.org/JSON_checker/test/fail4.json + '["extra comma",]', + # http://json.org/JSON_checker/test/fail5.json + '["double extra comma",,]', + # http://json.org/JSON_checker/test/fail6.json + '[ , "<-- missing value"]', + # http://json.org/JSON_checker/test/fail7.json + '["Comma after the close"],', + # http://json.org/JSON_checker/test/fail8.json + '["Extra close"]]', + # http://json.org/JSON_checker/test/fail9.json + '{"Extra comma": true,}', + # http://json.org/JSON_checker/test/fail10.json + '{"Extra value after close": true} "misplaced quoted value"', + # http://json.org/JSON_checker/test/fail11.json + '{"Illegal expression": 1 + 2}', + # http://json.org/JSON_checker/test/fail12.json + '{"Illegal invocation": alert()}', + # http://json.org/JSON_checker/test/fail13.json + '{"Numbers cannot have leading zeroes": 013}', + # http://json.org/JSON_checker/test/fail14.json + '{"Numbers cannot be hex": 0x14}', + # http://json.org/JSON_checker/test/fail15.json + '["Illegal backslash escape: \\x15"]', + # http://json.org/JSON_checker/test/fail16.json + '[\\naked]', + # http://json.org/JSON_checker/test/fail17.json + '["Illegal backslash escape: \\017"]', + # http://json.org/JSON_checker/test/fail18.json + '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', + # http://json.org/JSON_checker/test/fail19.json + '{"Missing colon" null}', + # http://json.org/JSON_checker/test/fail20.json + '{"Double colon":: null}', + # http://json.org/JSON_checker/test/fail21.json + '{"Comma instead of colon", null}', + # http://json.org/JSON_checker/test/fail22.json + '["Colon instead of comma": false]', + # http://json.org/JSON_checker/test/fail23.json + '["Bad value", truth]', + # http://json.org/JSON_checker/test/fail24.json + "['single quote']", + # http://json.org/JSON_checker/test/fail25.json + '["\ttab\tcharacter\tin\tstring\t"]', + # http://json.org/JSON_checker/test/fail26.json + '["tab\\ character\\ in\\ string\\ "]', + # http://json.org/JSON_checker/test/fail27.json + '["line\nbreak"]', + # http://json.org/JSON_checker/test/fail28.json + '["line\\\nbreak"]', + # http://json.org/JSON_checker/test/fail29.json + '[0e]', + # http://json.org/JSON_checker/test/fail30.json + '[0e+]', + # http://json.org/JSON_checker/test/fail31.json + '[0e+-1]', + # http://json.org/JSON_checker/test/fail32.json + '{"Comma instead if closing brace": true,', + # http://json.org/JSON_checker/test/fail33.json + '["mismatch"}', + # http://code.google.com/p/simplejson/issues/detail?id=3 + u'["A\u001FZ control characters in string"]', + # misc based on coverage + '{', + '{]', + '{"foo": "bar"]', + '{"foo": "bar"', + 'nul', + 'nulx', + '-', + '-x', + '-e', + '-e0', + '-Infinite', + '-Inf', + 'Infinit', + 'Infinite', + 'NaM', + 'NuN', + 'falsy', + 'fal', + 'trug', + 'tru', + '1e', + '1ex', + '1e-', + '1e-x', +] + +SKIPS = { + 1: "why not have a string payload?", + 18: "spec doesn't specify any nesting limitations", +} + +class TestFail(TestCase): + def test_failures(self): + for idx, doc in enumerate(JSONDOCS): + idx = idx + 1 + if idx in SKIPS: + json.loads(doc) + continue + try: + json.loads(doc) + except json.JSONDecodeError: + pass + else: + self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) + + def test_array_decoder_issue46(self): + # http://code.google.com/p/simplejson/issues/detail?id=46 + for doc in [u'[,]', '[,]']: + try: + json.loads(doc) + except json.JSONDecodeError: + e = sys.exc_info()[1] + self.assertEqual(e.pos, 1) + self.assertEqual(e.lineno, 1) + self.assertEqual(e.colno, 2) + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '[,]'") + + def test_truncated_input(self): + test_cases = [ + ('', 'Expecting value', 0), + ('[', "Expecting value or ']'", 1), + ('[42', "Expecting ',' delimiter", 3), + ('[42,', 'Expecting value', 4), + ('["', 'Unterminated string starting at', 1), + ('["spam', 'Unterminated string starting at', 1), + ('["spam"', "Expecting ',' delimiter", 7), + ('["spam",', 'Expecting value', 8), + ('{', 'Expecting property name enclosed in double quotes', 1), + ('{"', 'Unterminated string starting at', 1), + ('{"spam', 'Unterminated string starting at', 1), + ('{"spam"', "Expecting ':' delimiter", 7), + ('{"spam":', 'Expecting value', 8), + ('{"spam":42', "Expecting ',' delimiter", 10), + ('{"spam":42,', 'Expecting property name enclosed in double quotes', + 11), + ('"', 'Unterminated string starting at', 0), + ('"spam', 'Unterminated string starting at', 0), + ('[,', "Expecting value", 1), + ] + for data, msg, idx in test_cases: + try: + json.loads(data) + except json.JSONDecodeError: + e = sys.exc_info()[1] + self.assertEqual( + e.msg[:len(msg)], + msg, + "%r doesn't start with %r for %r" % (e.msg, msg, data)) + self.assertEqual( + e.pos, idx, + "pos %r != %r for %r" % (e.pos, idx, data)) + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '%r'" % (data,)) diff --git a/app/lib/simplejson/tests/test_float.py b/app/lib/simplejson/tests/test_float.py new file mode 100644 index 0000000..e382ec2 --- /dev/null +++ b/app/lib/simplejson/tests/test_float.py @@ -0,0 +1,35 @@ +import math +from unittest import TestCase +from simplejson.compat import long_type, text_type +import simplejson as json +from simplejson.decoder import NaN, PosInf, NegInf + +class TestFloat(TestCase): + def test_degenerates_allow(self): + for inf in (PosInf, NegInf): + self.assertEqual(json.loads(json.dumps(inf)), inf) + # Python 2.5 doesn't have math.isnan + nan = json.loads(json.dumps(NaN)) + self.assertTrue((0 + nan) != nan) + + def test_degenerates_ignore(self): + for f in (PosInf, NegInf, NaN): + self.assertEqual(json.loads(json.dumps(f, ignore_nan=True)), None) + + def test_degenerates_deny(self): + for f in (PosInf, NegInf, NaN): + self.assertRaises(ValueError, json.dumps, f, allow_nan=False) + + def test_floats(self): + for num in [1617161771.7650001, math.pi, math.pi**100, + math.pi**-100, 3.1]: + self.assertEqual(float(json.dumps(num)), num) + self.assertEqual(json.loads(json.dumps(num)), num) + self.assertEqual(json.loads(text_type(json.dumps(num))), num) + + def test_ints(self): + for num in [1, long_type(1), 1<<32, 1<<64]: + self.assertEqual(json.dumps(num), str(num)) + self.assertEqual(int(json.dumps(num)), num) + self.assertEqual(json.loads(json.dumps(num)), num) + self.assertEqual(json.loads(text_type(json.dumps(num))), num) diff --git a/app/lib/simplejson/tests/test_for_json.py b/app/lib/simplejson/tests/test_for_json.py new file mode 100644 index 0000000..b791b88 --- /dev/null +++ b/app/lib/simplejson/tests/test_for_json.py @@ -0,0 +1,97 @@ +import unittest +import simplejson as json + + +class ForJson(object): + def for_json(self): + return {'for_json': 1} + + +class NestedForJson(object): + def for_json(self): + return {'nested': ForJson()} + + +class ForJsonList(object): + def for_json(self): + return ['list'] + + +class DictForJson(dict): + def for_json(self): + return {'alpha': 1} + + +class ListForJson(list): + def for_json(self): + return ['list'] + + +class TestForJson(unittest.TestCase): + def assertRoundTrip(self, obj, other, for_json=True): + if for_json is None: + # None will use the default + s = json.dumps(obj) + else: + s = json.dumps(obj, for_json=for_json) + self.assertEqual( + json.loads(s), + other) + + def test_for_json_encodes_stand_alone_object(self): + self.assertRoundTrip( + ForJson(), + ForJson().for_json()) + + def test_for_json_encodes_object_nested_in_dict(self): + self.assertRoundTrip( + {'hooray': ForJson()}, + {'hooray': ForJson().for_json()}) + + def test_for_json_encodes_object_nested_in_list_within_dict(self): + self.assertRoundTrip( + {'list': [0, ForJson(), 2, 3]}, + {'list': [0, ForJson().for_json(), 2, 3]}) + + def test_for_json_encodes_object_nested_within_object(self): + self.assertRoundTrip( + NestedForJson(), + {'nested': {'for_json': 1}}) + + def test_for_json_encodes_list(self): + self.assertRoundTrip( + ForJsonList(), + ForJsonList().for_json()) + + def test_for_json_encodes_list_within_object(self): + self.assertRoundTrip( + {'nested': ForJsonList()}, + {'nested': ForJsonList().for_json()}) + + def test_for_json_encodes_dict_subclass(self): + self.assertRoundTrip( + DictForJson(a=1), + DictForJson(a=1).for_json()) + + def test_for_json_encodes_list_subclass(self): + self.assertRoundTrip( + ListForJson(['l']), + ListForJson(['l']).for_json()) + + def test_for_json_ignored_if_not_true_with_dict_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + DictForJson(a=1), + {'a': 1}, + for_json=for_json) + + def test_for_json_ignored_if_not_true_with_list_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + ListForJson(['l']), + ['l'], + for_json=for_json) + + def test_raises_typeerror_if_for_json_not_true_with_object(self): + self.assertRaises(TypeError, json.dumps, ForJson()) + self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False) diff --git a/app/lib/simplejson/tests/test_indent.py b/app/lib/simplejson/tests/test_indent.py new file mode 100644 index 0000000..cea25a5 --- /dev/null +++ b/app/lib/simplejson/tests/test_indent.py @@ -0,0 +1,86 @@ +from unittest import TestCase +import textwrap + +import simplejson as json +from simplejson.compat import StringIO + +class TestIndent(TestCase): + def test_indent(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', + 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + \t[ + \t\t"blorpie" + \t], + \t[ + \t\t"whoops" + \t], + \t[], + \t"d-shtaeou", + \t"d-nthiouh", + \t"i-vhbjkhnth", + \t{ + \t\t"nifty": 87 + \t}, + \t{ + \t\t"field": "yes", + \t\t"morefield": false + \t} + ]""") + + + d1 = json.dumps(h) + d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': ')) + d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': ')) + d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + h3 = json.loads(d3) + h4 = json.loads(d4) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(h3, h) + self.assertEqual(h4, h) + self.assertEqual(d3, expect.replace('\t', ' ')) + self.assertEqual(d4, expect.replace('\t', ' ')) + # NOTE: Python 2.4 textwrap.dedent converts tabs to spaces, + # so the following is expected to fail. Python 2.4 is not a + # supported platform in simplejson 2.1.0+. + self.assertEqual(d2, expect) + + def test_indent0(self): + h = {3: 1} + def check(indent, expected): + d1 = json.dumps(h, indent=indent) + self.assertEqual(d1, expected) + + sio = StringIO() + json.dump(h, sio, indent=indent) + self.assertEqual(sio.getvalue(), expected) + + # indent=0 should emit newlines + check(0, '{\n"3": 1\n}') + # indent=None is more compact + check(None, '{"3": 1}') + + def test_separators(self): + lst = [1,2,3,4] + expect = '[\n1,\n2,\n3,\n4\n]' + expect_spaces = '[\n1, \n2, \n3, \n4\n]' + # Ensure that separators still works + self.assertEqual( + expect_spaces, + json.dumps(lst, indent=0, separators=(', ', ': '))) + # Force the new defaults + self.assertEqual( + expect, + json.dumps(lst, indent=0, separators=(',', ': '))) + # Added in 2.1.4 + self.assertEqual( + expect, + json.dumps(lst, indent=0)) diff --git a/app/lib/simplejson/tests/test_item_sort_key.py b/app/lib/simplejson/tests/test_item_sort_key.py new file mode 100644 index 0000000..b05bfc8 --- /dev/null +++ b/app/lib/simplejson/tests/test_item_sort_key.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import simplejson as json +from operator import itemgetter + +class TestItemSortKey(TestCase): + def test_simple_first(self): + a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}', + json.dumps(a, item_sort_key=json.simple_first)) + + def test_case(self): + a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumps(a, item_sort_key=itemgetter(0))) + self.assertEqual( + '{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumps(a, item_sort_key=lambda kv: kv[0].lower())) diff --git a/app/lib/simplejson/tests/test_iterable.py b/app/lib/simplejson/tests/test_iterable.py new file mode 100644 index 0000000..35d3e75 --- /dev/null +++ b/app/lib/simplejson/tests/test_iterable.py @@ -0,0 +1,31 @@ +import unittest +from simplejson.compat import StringIO + +import simplejson as json + +def iter_dumps(obj, **kw): + return ''.join(json.JSONEncoder(**kw).iterencode(obj)) + +def sio_dump(obj, **kw): + sio = StringIO() + json.dumps(obj, **kw) + return sio.getvalue() + +class TestIterable(unittest.TestCase): + def test_iterable(self): + for l in ([], [1], [1, 2], [1, 2, 3]): + for opts in [{}, {'indent': 2}]: + for dumps in (json.dumps, iter_dumps, sio_dump): + expect = dumps(l, **opts) + default_expect = dumps(sum(l), **opts) + # Default is False + self.assertRaises(TypeError, dumps, iter(l), **opts) + self.assertRaises(TypeError, dumps, iter(l), iterable_as_array=False, **opts) + self.assertEqual(expect, dumps(iter(l), iterable_as_array=True, **opts)) + # Ensure that the "default" gets called + self.assertEqual(default_expect, dumps(iter(l), default=sum, **opts)) + self.assertEqual(default_expect, dumps(iter(l), iterable_as_array=False, default=sum, **opts)) + # Ensure that the "default" does not get called + self.assertEqual( + expect, + dumps(iter(l), iterable_as_array=True, default=sum, **opts)) diff --git a/app/lib/simplejson/tests/test_namedtuple.py b/app/lib/simplejson/tests/test_namedtuple.py new file mode 100644 index 0000000..4387894 --- /dev/null +++ b/app/lib/simplejson/tests/test_namedtuple.py @@ -0,0 +1,122 @@ +from __future__ import absolute_import +import unittest +import simplejson as json +from simplejson.compat import StringIO + +try: + from collections import namedtuple +except ImportError: + class Value(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'value': self[0]} + class Point(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'x': self[0], 'y': self[1]} +else: + Value = namedtuple('Value', ['value']) + Point = namedtuple('Point', ['x', 'y']) + +class DuckValue(object): + def __init__(self, *args): + self.value = Value(*args) + + def _asdict(self): + return self.value._asdict() + +class DuckPoint(object): + def __init__(self, *args): + self.point = Point(*args) + + def _asdict(self): + return self.point._asdict() + +class DeadDuck(object): + _asdict = None + +class DeadDict(dict): + _asdict = None + +CONSTRUCTORS = [ + lambda v: v, + lambda v: [v], + lambda v: [{'key': v}], +] + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dumps(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + self.assertEqual(d, json.loads(json.dumps(v))) + self.assertEqual( + d, + json.loads(json.dumps(v, namedtuple_as_object=True))) + self.assertEqual(d, json.loads(json.dumps(v, tuple_as_array=False))) + self.assertEqual( + d, + json.loads(json.dumps(v, namedtuple_as_object=True, + tuple_as_array=False))) + + def test_namedtuple_dumps_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + self.assertEqual( + l, + json.loads(json.dumps(v, namedtuple_as_object=False))) + self.assertRaises(TypeError, json.dumps, v, + tuple_as_array=False, namedtuple_as_object=False) + + def test_namedtuple_dump(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + sio = StringIO() + json.dump(v, sio) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dump(v, sio, namedtuple_as_object=True) + self.assertEqual( + d, + json.loads(sio.getvalue())) + sio = StringIO() + json.dump(v, sio, tuple_as_array=False) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dump(v, sio, namedtuple_as_object=True, + tuple_as_array=False) + self.assertEqual( + d, + json.loads(sio.getvalue())) + + def test_namedtuple_dump_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + sio = StringIO() + json.dump(v, sio, namedtuple_as_object=False) + self.assertEqual( + l, + json.loads(sio.getvalue())) + self.assertRaises(TypeError, json.dump, v, StringIO(), + tuple_as_array=False, namedtuple_as_object=False) + + def test_asdict_not_callable_dump(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dump, f(DeadDuck()), StringIO(), namedtuple_as_object=True) + sio = StringIO() + json.dump(f(DeadDict()), sio, namedtuple_as_object=True) + self.assertEqual( + json.dumps(f({})), + sio.getvalue()) + + def test_asdict_not_callable_dumps(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dumps, f(DeadDuck()), namedtuple_as_object=True) + self.assertEqual( + json.dumps(f({})), + json.dumps(f(DeadDict()), namedtuple_as_object=True)) diff --git a/app/lib/simplejson/tests/test_pass1.py b/app/lib/simplejson/tests/test_pass1.py new file mode 100644 index 0000000..f0b5b10 --- /dev/null +++ b/app/lib/simplejson/tests/test_pass1.py @@ -0,0 +1,71 @@ +from unittest import TestCase + +import simplejson as json + +# from http://json.org/JSON_checker/test/pass1.json +JSON = r''' +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E66, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066, +1e1, +0.1e1, +1e-1, +1e00,2e+00,2e-00 +,"rosebud"] +''' + +class TestPass1(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEqual(res, json.loads(out)) diff --git a/app/lib/simplejson/tests/test_pass2.py b/app/lib/simplejson/tests/test_pass2.py new file mode 100644 index 0000000..5d812b3 --- /dev/null +++ b/app/lib/simplejson/tests/test_pass2.py @@ -0,0 +1,14 @@ +from unittest import TestCase +import simplejson as json + +# from http://json.org/JSON_checker/test/pass2.json +JSON = r''' +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] +''' + +class TestPass2(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEqual(res, json.loads(out)) diff --git a/app/lib/simplejson/tests/test_pass3.py b/app/lib/simplejson/tests/test_pass3.py new file mode 100644 index 0000000..821d60b --- /dev/null +++ b/app/lib/simplejson/tests/test_pass3.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import simplejson as json + +# from http://json.org/JSON_checker/test/pass3.json +JSON = r''' +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} +''' + +class TestPass3(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumps(res) + self.assertEqual(res, json.loads(out)) diff --git a/app/lib/simplejson/tests/test_raw_json.py b/app/lib/simplejson/tests/test_raw_json.py new file mode 100644 index 0000000..1dfcc2c --- /dev/null +++ b/app/lib/simplejson/tests/test_raw_json.py @@ -0,0 +1,47 @@ +import unittest +import simplejson as json + +dct1 = { + 'key1': 'value1' +} + +dct2 = { + 'key2': 'value2', + 'd1': dct1 +} + +dct3 = { + 'key2': 'value2', + 'd1': json.dumps(dct1) +} + +dct4 = { + 'key2': 'value2', + 'd1': json.RawJSON(json.dumps(dct1)) +} + + +class TestRawJson(unittest.TestCase): + + def test_normal_str(self): + self.assertNotEqual(json.dumps(dct2), json.dumps(dct3)) + + def test_raw_json_str(self): + self.assertEqual(json.dumps(dct2), json.dumps(dct4)) + self.assertEqual(dct2, json.loads(json.dumps(dct4))) + + def test_list(self): + self.assertEqual( + json.dumps([dct2]), + json.dumps([json.RawJSON(json.dumps(dct2))])) + self.assertEqual( + [dct2], + json.loads(json.dumps([json.RawJSON(json.dumps(dct2))]))) + + def test_direct(self): + self.assertEqual( + json.dumps(dct2), + json.dumps(json.RawJSON(json.dumps(dct2)))) + self.assertEqual( + dct2, + json.loads(json.dumps(json.RawJSON(json.dumps(dct2))))) diff --git a/app/lib/simplejson/tests/test_recursion.py b/app/lib/simplejson/tests/test_recursion.py new file mode 100644 index 0000000..662eb66 --- /dev/null +++ b/app/lib/simplejson/tests/test_recursion.py @@ -0,0 +1,67 @@ +from unittest import TestCase + +import simplejson as json + +class JSONTestObject: + pass + + +class RecursiveJSONEncoder(json.JSONEncoder): + recurse = False + def default(self, o): + if o is JSONTestObject: + if self.recurse: + return [JSONTestObject] + else: + return 'JSONTestObject' + return json.JSONEncoder.default(o) + + +class TestRecursion(TestCase): + def test_listrecursion(self): + x = [] + x.append(x) + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on list recursion") + x = [] + y = [x] + x.append(y) + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on alternating list recursion") + y = [] + x = [y, y] + # ensure that the marker is cleared + json.dumps(x) + + def test_dictrecursion(self): + x = {} + x["test"] = x + try: + json.dumps(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on dict recursion") + x = {} + y = {"a": x, "b": x} + # ensure that the marker is cleared + json.dumps(y) + + def test_defaultrecursion(self): + enc = RecursiveJSONEncoder() + self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') + enc.recurse = True + try: + enc.encode(JSONTestObject) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on default recursion") diff --git a/app/lib/simplejson/tests/test_scanstring.py b/app/lib/simplejson/tests/test_scanstring.py new file mode 100644 index 0000000..3d98f0d --- /dev/null +++ b/app/lib/simplejson/tests/test_scanstring.py @@ -0,0 +1,194 @@ +import sys +from unittest import TestCase + +import simplejson as json +import simplejson.decoder +from simplejson.compat import b, PY3 + +class TestScanString(TestCase): + # The bytes type is intentionally not used in most of these tests + # under Python 3 because the decoder immediately coerces to str before + # calling scanstring. In Python 2 we are testing the code paths + # for both unicode and str. + # + # The reason this is done is because Python 3 would require + # entirely different code paths for parsing bytes and str. + # + def test_py_scanstring(self): + self._test_scanstring(simplejson.decoder.py_scanstring) + + def test_c_scanstring(self): + if not simplejson.decoder.c_scanstring: + return + self._test_scanstring(simplejson.decoder.c_scanstring) + + def _test_scanstring(self, scanstring): + if sys.maxunicode == 65535: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 6)) + else: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 5)) + + self.assertEqual( + scanstring('"\\u007b"', 1, None, True), + (u'{', 8)) + + self.assertEqual( + scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True), + (u'A JSON payload should be an object or array, not a string.', 60)) + + self.assertEqual( + scanstring('["Unclosed array"', 2, None, True), + (u'Unclosed array', 17)) + + self.assertEqual( + scanstring('["extra comma",]', 2, None, True), + (u'extra comma', 14)) + + self.assertEqual( + scanstring('["double extra comma",,]', 2, None, True), + (u'double extra comma', 21)) + + self.assertEqual( + scanstring('["Comma after the close"],', 2, None, True), + (u'Comma after the close', 24)) + + self.assertEqual( + scanstring('["Extra close"]]', 2, None, True), + (u'Extra close', 14)) + + self.assertEqual( + scanstring('{"Extra comma": true,}', 2, None, True), + (u'Extra comma', 14)) + + self.assertEqual( + scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True), + (u'Extra value after close', 26)) + + self.assertEqual( + scanstring('{"Illegal expression": 1 + 2}', 2, None, True), + (u'Illegal expression', 21)) + + self.assertEqual( + scanstring('{"Illegal invocation": alert()}', 2, None, True), + (u'Illegal invocation', 21)) + + self.assertEqual( + scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True), + (u'Numbers cannot have leading zeroes', 37)) + + self.assertEqual( + scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True), + (u'Numbers cannot be hex', 24)) + + self.assertEqual( + scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True), + (u'Too deep', 30)) + + self.assertEqual( + scanstring('{"Missing colon" null}', 2, None, True), + (u'Missing colon', 16)) + + self.assertEqual( + scanstring('{"Double colon":: null}', 2, None, True), + (u'Double colon', 15)) + + self.assertEqual( + scanstring('{"Comma instead of colon", null}', 2, None, True), + (u'Comma instead of colon', 25)) + + self.assertEqual( + scanstring('["Colon instead of comma": false]', 2, None, True), + (u'Colon instead of comma', 25)) + + self.assertEqual( + scanstring('["Bad value", truth]', 2, None, True), + (u'Bad value', 12)) + + for c in map(chr, range(0x00, 0x1f)): + self.assertEqual( + scanstring(c + '"', 0, None, False), + (c, 2)) + self.assertRaises( + ValueError, + scanstring, c + '"', 0, None, True) + + self.assertRaises(ValueError, scanstring, '', 0, None, True) + self.assertRaises(ValueError, scanstring, 'a', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u0', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u01', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u012', 0, None, True) + self.assertRaises(ValueError, scanstring, '\\u0123', 0, None, True) + if sys.maxunicode > 65535: + self.assertRaises(ValueError, + scanstring, '\\ud834\\u"', 0, None, True) + self.assertRaises(ValueError, + scanstring, '\\ud834\\x0123"', 0, None, True) + + def test_issue3623(self): + self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1, + "xxx") + self.assertRaises(UnicodeDecodeError, + json.encoder.encode_basestring_ascii, b("xx\xff")) + + def test_overflow(self): + # Python 2.5 does not have maxsize, Python 3 does not have maxint + maxsize = getattr(sys, 'maxsize', getattr(sys, 'maxint', None)) + assert maxsize is not None + self.assertRaises(OverflowError, json.decoder.scanstring, "xxx", + maxsize + 1) + + def test_surrogates(self): + scanstring = json.decoder.scanstring + + def assertScan(given, expect, test_utf8=True): + givens = [given] + if not PY3 and test_utf8: + givens.append(given.encode('utf8')) + for given in givens: + (res, count) = scanstring(given, 1, None, True) + self.assertEqual(len(given), count) + self.assertEqual(res, expect) + + assertScan( + u'"z\\ud834\\u0079x"', + u'z\ud834yx') + assertScan( + u'"z\\ud834\\udd20x"', + u'z\U0001d120x') + assertScan( + u'"z\\ud834\\ud834\\udd20x"', + u'z\ud834\U0001d120x') + assertScan( + u'"z\\ud834x"', + u'z\ud834x') + assertScan( + u'"z\\udd20x"', + u'z\udd20x') + assertScan( + u'"z\ud834x"', + u'z\ud834x') + # It may look strange to join strings together, but Python is drunk. + # https://gist.github.com/etrepum/5538443 + assertScan( + u'"z\\ud834\udd20x12345"', + u''.join([u'z\ud834', u'\udd20x12345'])) + assertScan( + u'"z\ud834\\udd20x"', + u''.join([u'z\ud834', u'\udd20x'])) + # these have different behavior given UTF8 input, because the surrogate + # pair may be joined (in maxunicode > 65535 builds) + assertScan( + u''.join([u'"z\ud834', u'\udd20x"']), + u''.join([u'z\ud834', u'\udd20x']), + test_utf8=False) + + self.assertRaises(ValueError, + scanstring, u'"z\\ud83x"', 1, None, True) + self.assertRaises(ValueError, + scanstring, u'"z\\ud834\\udd2x"', 1, None, True) diff --git a/app/lib/simplejson/tests/test_separators.py b/app/lib/simplejson/tests/test_separators.py new file mode 100644 index 0000000..91b4d4f --- /dev/null +++ b/app/lib/simplejson/tests/test_separators.py @@ -0,0 +1,42 @@ +import textwrap +from unittest import TestCase + +import simplejson as json + + +class TestSeparators(TestCase): + def test_separators(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ] , + [ + "whoops" + ] , + [] , + "d-shtaeou" , + "d-nthiouh" , + "i-vhbjkhnth" , + { + "nifty" : 87 + } , + { + "field" : "yes" , + "morefield" : false + } + ]""") + + + d1 = json.dumps(h) + d2 = json.dumps(h, indent=' ', sort_keys=True, separators=(' ,', ' : ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(d2, expect) diff --git a/app/lib/simplejson/tests/test_speedups.py b/app/lib/simplejson/tests/test_speedups.py new file mode 100644 index 0000000..0a2b63b --- /dev/null +++ b/app/lib/simplejson/tests/test_speedups.py @@ -0,0 +1,39 @@ +import sys +import unittest +from unittest import TestCase + +from simplejson import encoder, scanner + + +def has_speedups(): + return encoder.c_make_encoder is not None + + +def skip_if_speedups_missing(func): + def wrapper(*args, **kwargs): + if not has_speedups(): + if hasattr(unittest, 'SkipTest'): + raise unittest.SkipTest("C Extension not available") + else: + sys.stdout.write("C Extension not available") + return + return func(*args, **kwargs) + + return wrapper + + +class TestDecode(TestCase): + @skip_if_speedups_missing + def test_make_scanner(self): + self.assertRaises(AttributeError, scanner.c_make_scanner, 1) + + @skip_if_speedups_missing + def test_make_encoder(self): + self.assertRaises( + TypeError, + encoder.c_make_encoder, + None, + ("\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7" + "\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75"), + None + ) diff --git a/app/lib/simplejson/tests/test_str_subclass.py b/app/lib/simplejson/tests/test_str_subclass.py new file mode 100644 index 0000000..771eb67 --- /dev/null +++ b/app/lib/simplejson/tests/test_str_subclass.py @@ -0,0 +1,16 @@ +from unittest import TestCase + +import simplejson +from simplejson.compat import text_type, u + +# Tests for issue demonstrated in https://github.com/simplejson/simplejson/issues/144 +class WonkyTextSubclass(text_type): + def __getslice__(self, start, end): + return self.__class__('not what you wanted!') + +class TestStrSubclass(TestCase): + def test_dump_load(self): + for s in ['', '"hello"', 'text', u('\u005c')]: + self.assertEqual( + s, + simplejson.loads(simplejson.dumps(WonkyTextSubclass(s)))) diff --git a/app/lib/simplejson/tests/test_subclass.py b/app/lib/simplejson/tests/test_subclass.py new file mode 100644 index 0000000..2bae3b6 --- /dev/null +++ b/app/lib/simplejson/tests/test_subclass.py @@ -0,0 +1,37 @@ +from unittest import TestCase +import simplejson as json + +from decimal import Decimal + +class AlternateInt(int): + def __repr__(self): + return 'invalid json' + __str__ = __repr__ + + +class AlternateFloat(float): + def __repr__(self): + return 'invalid json' + __str__ = __repr__ + + +# class AlternateDecimal(Decimal): +# def __repr__(self): +# return 'invalid json' + + +class TestSubclass(TestCase): + def test_int(self): + self.assertEqual(json.dumps(AlternateInt(1)), '1') + self.assertEqual(json.dumps(AlternateInt(-1)), '-1') + self.assertEqual(json.loads(json.dumps({AlternateInt(1): 1})), {'1': 1}) + + def test_float(self): + self.assertEqual(json.dumps(AlternateFloat(1.0)), '1.0') + self.assertEqual(json.dumps(AlternateFloat(-1.0)), '-1.0') + self.assertEqual(json.loads(json.dumps({AlternateFloat(1.0): 1})), {'1.0': 1}) + + # NOTE: Decimal subclasses are not supported as-is + # def test_decimal(self): + # self.assertEqual(json.dumps(AlternateDecimal('1.0')), '1.0') + # self.assertEqual(json.dumps(AlternateDecimal('-1.0')), '-1.0') diff --git a/app/lib/simplejson/tests/test_tool.py b/app/lib/simplejson/tests/test_tool.py new file mode 100644 index 0000000..ac2a14c --- /dev/null +++ b/app/lib/simplejson/tests/test_tool.py @@ -0,0 +1,97 @@ +from __future__ import with_statement +import os +import sys +import textwrap +import unittest +import subprocess +import tempfile +try: + # Python 3.x + from test.support import strip_python_stderr +except ImportError: + # Python 2.6+ + try: + from test.test_support import strip_python_stderr + except ImportError: + # Python 2.5 + import re + def strip_python_stderr(stderr): + return re.sub( + r"\[\d+ refs\]\r?\n?$".encode(), + "".encode(), + stderr).strip() + +class TestTool(unittest.TestCase): + data = """ + + [["blorpie"],[ "whoops" ] , [ + ],\t"d-shtaeou",\r"d-nthiouh", + "i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field" + :"yes"} ] + """ + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ], + [ + "whoops" + ], + [], + "d-shtaeou", + "d-nthiouh", + "i-vhbjkhnth", + { + "nifty": 87 + }, + { + "field": "yes", + "morefield": false + } + ] + """) + + def runTool(self, args=None, data=None): + argv = [sys.executable, '-m', 'simplejson.tool'] + if args: + argv.extend(args) + proc = subprocess.Popen(argv, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = proc.communicate(data) + self.assertEqual(strip_python_stderr(err), ''.encode()) + self.assertEqual(proc.returncode, 0) + return out + + def test_stdin_stdout(self): + self.assertEqual( + self.runTool(data=self.data.encode()), + self.expect.encode()) + + def test_infile_stdout(self): + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + self.assertEqual( + self.runTool(args=[infile.name]), + self.expect.encode()) + + def test_infile_outfile(self): + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + # outfile will get overwritten by tool, so the delete + # may not work on some platforms. Do it manually. + outfile = tempfile.NamedTemporaryFile() + try: + self.assertEqual( + self.runTool(args=[infile.name, outfile.name]), + ''.encode()) + with open(outfile.name, 'rb') as f: + self.assertEqual(f.read(), self.expect.encode()) + finally: + outfile.close() + if os.path.exists(outfile.name): + os.unlink(outfile.name) diff --git a/app/lib/simplejson/tests/test_tuple.py b/app/lib/simplejson/tests/test_tuple.py new file mode 100644 index 0000000..4ad7b0e --- /dev/null +++ b/app/lib/simplejson/tests/test_tuple.py @@ -0,0 +1,47 @@ +import unittest + +from simplejson.compat import StringIO +import simplejson as json + +class TestTuples(unittest.TestCase): + def test_tuple_array_dumps(self): + t = (1, 2, 3) + expect = json.dumps(list(t)) + # Default is True + self.assertEqual(expect, json.dumps(t)) + self.assertEqual(expect, json.dumps(t, tuple_as_array=True)) + self.assertRaises(TypeError, json.dumps, t, tuple_as_array=False) + # Ensure that the "default" does not get called + self.assertEqual(expect, json.dumps(t, default=repr)) + self.assertEqual(expect, json.dumps(t, tuple_as_array=True, + default=repr)) + # Ensure that the "default" gets called + self.assertEqual( + json.dumps(repr(t)), + json.dumps(t, tuple_as_array=False, default=repr)) + + def test_tuple_array_dump(self): + t = (1, 2, 3) + expect = json.dumps(list(t)) + # Default is True + sio = StringIO() + json.dump(t, sio) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dump(t, sio, tuple_as_array=True) + self.assertEqual(expect, sio.getvalue()) + self.assertRaises(TypeError, json.dump, t, StringIO(), + tuple_as_array=False) + # Ensure that the "default" does not get called + sio = StringIO() + json.dump(t, sio, default=repr) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dump(t, sio, tuple_as_array=True, default=repr) + self.assertEqual(expect, sio.getvalue()) + # Ensure that the "default" gets called + sio = StringIO() + json.dump(t, sio, tuple_as_array=False, default=repr) + self.assertEqual( + json.dumps(repr(t)), + sio.getvalue()) diff --git a/app/lib/simplejson/tests/test_unicode.py b/app/lib/simplejson/tests/test_unicode.py new file mode 100644 index 0000000..3b37f65 --- /dev/null +++ b/app/lib/simplejson/tests/test_unicode.py @@ -0,0 +1,153 @@ +import sys +import codecs +from unittest import TestCase + +import simplejson as json +from simplejson.compat import unichr, text_type, b, u, BytesIO + +class TestUnicode(TestCase): + def test_encoding1(self): + encoder = json.JSONEncoder(encoding='utf-8') + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = encoder.encode(u) + js = encoder.encode(s) + self.assertEqual(ju, js) + + def test_encoding2(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = json.dumps(u, encoding='utf-8') + js = json.dumps(s, encoding='utf-8') + self.assertEqual(ju, js) + + def test_encoding3(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps(u) + self.assertEqual(j, '"\\u03b1\\u03a9"') + + def test_encoding4(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps([u]) + self.assertEqual(j, '["\\u03b1\\u03a9"]') + + def test_encoding5(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps(u, ensure_ascii=False) + self.assertEqual(j, u'"' + u + u'"') + + def test_encoding6(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumps([u], ensure_ascii=False) + self.assertEqual(j, u'["' + u + u'"]') + + def test_big_unicode_encode(self): + u = u'\U0001d120' + self.assertEqual(json.dumps(u), '"\\ud834\\udd20"') + self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"') + + def test_big_unicode_decode(self): + u = u'z\U0001d120x' + self.assertEqual(json.loads('"' + u + '"'), u) + self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u) + + def test_unicode_decode(self): + for i in range(0, 0xd7ff): + u = unichr(i) + #s = '"\\u{0:04x}"'.format(i) + s = '"\\u%04x"' % (i,) + self.assertEqual(json.loads(s), u) + + def test_object_pairs_hook_with_unicode(self): + s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' + p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4), + (u"qrt", 5), (u"pad", 6), (u"hoy", 7)] + self.assertEqual(json.loads(s), eval(s)) + self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) + od = json.loads(s, object_pairs_hook=json.OrderedDict) + self.assertEqual(od, json.OrderedDict(p)) + self.assertEqual(type(od), json.OrderedDict) + # the object_pairs_hook takes priority over the object_hook + self.assertEqual(json.loads(s, + object_pairs_hook=json.OrderedDict, + object_hook=lambda x: None), + json.OrderedDict(p)) + + + def test_default_encoding(self): + self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), + {'a': u'\xe9'}) + + def test_unicode_preservation(self): + self.assertEqual(type(json.loads(u'""')), text_type) + self.assertEqual(type(json.loads(u'"a"')), text_type) + self.assertEqual(type(json.loads(u'["a"]')[0]), text_type) + + def test_ensure_ascii_false_returns_unicode(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + self.assertEqual(type(json.dumps([], ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumps(0, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumps({}, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumps("", ensure_ascii=False)), text_type) + + def test_ensure_ascii_false_bytestring_encoding(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + doc1 = {u'quux': b('Arr\xc3\xaat sur images')} + doc2 = {u'quux': u('Arr\xeat sur images')} + doc_ascii = '{"quux": "Arr\\u00eat sur images"}' + doc_unicode = u'{"quux": "Arr\xeat sur images"}' + self.assertEqual(json.dumps(doc1), doc_ascii) + self.assertEqual(json.dumps(doc2), doc_ascii) + self.assertEqual(json.dumps(doc1, ensure_ascii=False), doc_unicode) + self.assertEqual(json.dumps(doc2, ensure_ascii=False), doc_unicode) + + def test_ensure_ascii_linebreak_encoding(self): + # http://timelessrepo.com/json-isnt-a-javascript-subset + s1 = u'\u2029\u2028' + s2 = s1.encode('utf8') + expect = '"\\u2029\\u2028"' + self.assertEqual(json.dumps(s1), expect) + self.assertEqual(json.dumps(s2), expect) + self.assertEqual(json.dumps(s1, ensure_ascii=False), expect) + self.assertEqual(json.dumps(s2, ensure_ascii=False), expect) + + def test_invalid_escape_sequences(self): + # incomplete escape sequence + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1234') + # invalid escape sequence + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123x"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12x4"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1x34"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ux234"') + if sys.maxunicode > 65535: + # invalid escape sequence for low surrogate + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000x"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00x0"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0x00"') + self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\ux000"') + + def test_ensure_ascii_still_works(self): + # in the ascii range, ensure that everything is the same + for c in map(unichr, range(0, 127)): + self.assertEqual( + json.dumps(c, ensure_ascii=False), + json.dumps(c)) + snowman = u'\N{SNOWMAN}' + self.assertEqual( + json.dumps(c, ensure_ascii=False), + '"' + c + '"') + + def test_strip_bom(self): + content = u"\u3053\u3093\u306b\u3061\u308f" + json_doc = codecs.BOM_UTF8 + b(json.dumps(content)) + self.assertEqual(json.load(BytesIO(json_doc)), content) + for doc in json_doc, json_doc.decode('utf8'): + self.assertEqual(json.loads(doc), content) diff --git a/app/lib/simplejson/tool.py b/app/lib/simplejson/tool.py new file mode 100644 index 0000000..062e8e2 --- /dev/null +++ b/app/lib/simplejson/tool.py @@ -0,0 +1,42 @@ +r"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | python -m simplejson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m simplejson.tool + Expecting property name: line 1 column 2 (char 2) + +""" +from __future__ import with_statement +import sys +import simplejson as json + +def main(): + if len(sys.argv) == 1: + infile = sys.stdin + outfile = sys.stdout + elif len(sys.argv) == 2: + infile = open(sys.argv[1], 'r') + outfile = sys.stdout + elif len(sys.argv) == 3: + infile = open(sys.argv[1], 'r') + outfile = open(sys.argv[2], 'w') + else: + raise SystemExit(sys.argv[0] + " [infile [outfile]]") + with infile: + try: + obj = json.load(infile, + object_pairs_hook=json.OrderedDict, + use_decimal=True) + except ValueError: + raise SystemExit(sys.exc_info()[1]) + with outfile: + json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True) + outfile.write('\n') + + +if __name__ == '__main__': + main() diff --git a/app/lib/six-1.10.0.dist-info/DESCRIPTION.rst b/app/lib/six-1.10.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..2e2607d --- /dev/null +++ b/app/lib/six-1.10.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,18 @@ +Six is a Python 2 and 3 compatibility library. It provides utility functions +for smoothing over the differences between the Python versions with the goal of +writing Python code that is compatible on both Python versions. See the +documentation for more information on what is provided. + +Six supports every Python version since 2.6. It is contained in only one Python +file, so it can be easily copied into your project. (The copyright and license +notice must be retained.) + +Online documentation is at https://pythonhosted.org/six/. + +Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also +be found there. + +For questions about six or porting in general, email the python-porting mailing +list: https://mail.python.org/mailman/listinfo/python-porting + + diff --git a/app/lib/six-1.10.0.dist-info/METADATA b/app/lib/six-1.10.0.dist-info/METADATA new file mode 100644 index 0000000..4fc3d07 --- /dev/null +++ b/app/lib/six-1.10.0.dist-info/METADATA @@ -0,0 +1,34 @@ +Metadata-Version: 2.0 +Name: six +Version: 1.10.0 +Summary: Python 2 and 3 compatibility utilities +Home-page: http://pypi.python.org/pypi/six/ +Author: Benjamin Peterson +Author-email: benjamin@python.org +License: MIT +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities + +Six is a Python 2 and 3 compatibility library. It provides utility functions +for smoothing over the differences between the Python versions with the goal of +writing Python code that is compatible on both Python versions. See the +documentation for more information on what is provided. + +Six supports every Python version since 2.6. It is contained in only one Python +file, so it can be easily copied into your project. (The copyright and license +notice must be retained.) + +Online documentation is at https://pythonhosted.org/six/. + +Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also +be found there. + +For questions about six or porting in general, email the python-porting mailing +list: https://mail.python.org/mailman/listinfo/python-porting + + diff --git a/app/lib/six-1.10.0.dist-info/RECORD b/app/lib/six-1.10.0.dist-info/RECORD new file mode 100644 index 0000000..6350c4e --- /dev/null +++ b/app/lib/six-1.10.0.dist-info/RECORD @@ -0,0 +1,8 @@ +six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +six-1.10.0.dist-info/DESCRIPTION.rst,sha256=QWBtSTT2zzabwJv1NQbTfClSX13m-Qc6tqU4TRL1RLs,774 +six-1.10.0.dist-info/METADATA,sha256=5HceJsUnHof2IRamlCKO2MwNjve1eSP4rLzVQDfwpCQ,1283 +six-1.10.0.dist-info/RECORD,, +six-1.10.0.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 +six-1.10.0.dist-info/metadata.json,sha256=jtOeeTBubYDChl_5Ql5ZPlKoHgg6rdqRIjOz1e5Ek2U,658 +six-1.10.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4 +__pycache__/six.cpython-34.pyc,, diff --git a/app/lib/six-1.10.0.dist-info/WHEEL b/app/lib/six-1.10.0.dist-info/WHEEL new file mode 100644 index 0000000..0de529b --- /dev/null +++ b/app/lib/six-1.10.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.26.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/six-1.10.0.dist-info/metadata.json b/app/lib/six-1.10.0.dist-info/metadata.json new file mode 100644 index 0000000..21f9f6c --- /dev/null +++ b/app/lib/six-1.10.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"generator": "bdist_wheel (0.26.0)", "summary": "Python 2 and 3 compatibility utilities", "classifiers": ["Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Utilities"], "extensions": {"python.details": {"project_urls": {"Home": "http://pypi.python.org/pypi/six/"}, "contacts": [{"email": "benjamin@python.org", "name": "Benjamin Peterson", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "MIT", "metadata_version": "2.0", "name": "six", "version": "1.10.0"} \ No newline at end of file diff --git a/app/lib/six-1.10.0.dist-info/top_level.txt b/app/lib/six-1.10.0.dist-info/top_level.txt new file mode 100644 index 0000000..ffe2fce --- /dev/null +++ b/app/lib/six-1.10.0.dist-info/top_level.txt @@ -0,0 +1 @@ +six diff --git a/app/lib/six.py b/app/lib/six.py new file mode 100644 index 0000000..190c023 --- /dev/null +++ b/app/lib/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/app/lib/sqlalchemy/__init__.py b/app/lib/sqlalchemy/__init__.py new file mode 100644 index 0000000..a2116e0 --- /dev/null +++ b/app/lib/sqlalchemy/__init__.py @@ -0,0 +1,146 @@ +# sqlalchemy/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +from .sql import ( + alias, + all_, + and_, + any_, + asc, + between, + bindparam, + case, + cast, + collate, + column, + delete, + desc, + distinct, + except_, + except_all, + exists, + extract, + false, + func, + funcfilter, + insert, + intersect, + intersect_all, + join, + lateral, + literal, + literal_column, + modifier, + not_, + null, + or_, + outerjoin, + outparam, + over, + select, + subquery, + table, + tablesample, + text, + true, + tuple_, + type_coerce, + union, + union_all, + update, + within_group, + ) + +from .types import ( + ARRAY, + BIGINT, + BINARY, + BLOB, + BOOLEAN, + BigInteger, + Binary, + Boolean, + CHAR, + CLOB, + DATE, + DATETIME, + DECIMAL, + Date, + DateTime, + Enum, + FLOAT, + Float, + INT, + INTEGER, + Integer, + Interval, + JSON, + LargeBinary, + NCHAR, + NVARCHAR, + NUMERIC, + Numeric, + PickleType, + REAL, + SMALLINT, + SmallInteger, + String, + TEXT, + TIME, + TIMESTAMP, + Text, + Time, + TypeDecorator, + Unicode, + UnicodeText, + VARBINARY, + VARCHAR, + ) + + +from .schema import ( + CheckConstraint, + Column, + ColumnDefault, + Constraint, + DefaultClause, + FetchedValue, + ForeignKey, + ForeignKeyConstraint, + Index, + MetaData, + PassiveDefault, + PrimaryKeyConstraint, + Sequence, + Table, + ThreadLocalMetaData, + UniqueConstraint, + DDL, + BLANK_SCHEMA +) + + +from .inspection import inspect +from .engine import create_engine, engine_from_config + +__version__ = '1.1.9' + + +def __go(lcls): + global __all__ + + from . import events + from . import util as _sa_util + + import inspect as _inspect + + __all__ = sorted(name for name, obj in lcls.items() + if not (name.startswith('_') or _inspect.ismodule(obj))) + + _sa_util.dependencies.resolve_all("sqlalchemy") +__go(locals()) diff --git a/app/lib/sqlalchemy/connectors/__init__.py b/app/lib/sqlalchemy/connectors/__init__.py new file mode 100644 index 0000000..5cf06d8 --- /dev/null +++ b/app/lib/sqlalchemy/connectors/__init__.py @@ -0,0 +1,10 @@ +# connectors/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +class Connector(object): + pass diff --git a/app/lib/sqlalchemy/connectors/mxodbc.py b/app/lib/sqlalchemy/connectors/mxodbc.py new file mode 100644 index 0000000..32e7e18 --- /dev/null +++ b/app/lib/sqlalchemy/connectors/mxodbc.py @@ -0,0 +1,150 @@ +# connectors/mxodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +Provide a SQLALchemy connector for the eGenix mxODBC commercial +Python adapter for ODBC. This is not a free product, but eGenix +provides SQLAlchemy with a license for use in continuous integration +testing. + +This has been tested for use with mxODBC 3.1.2 on SQL Server 2005 +and 2008, using the SQL Server Native driver. However, it is +possible for this to be used on other database platforms. + +For more info on mxODBC, see http://www.egenix.com/ + +""" + +import sys +import re +import warnings + +from . import Connector + + +class MxODBCConnector(Connector): + driver = 'mxodbc' + + supports_sane_multi_rowcount = False + supports_unicode_statements = True + supports_unicode_binds = True + + supports_native_decimal = True + + @classmethod + def dbapi(cls): + # this classmethod will normally be replaced by an instance + # attribute of the same name, so this is normally only called once. + cls._load_mx_exceptions() + platform = sys.platform + if platform == 'win32': + from mx.ODBC import Windows as module + # this can be the string "linux2", and possibly others + elif 'linux' in platform: + from mx.ODBC import unixODBC as module + elif platform == 'darwin': + from mx.ODBC import iODBC as module + else: + raise ImportError("Unrecognized platform for mxODBC import") + return module + + @classmethod + def _load_mx_exceptions(cls): + """ Import mxODBC exception classes into the module namespace, + as if they had been imported normally. This is done here + to avoid requiring all SQLAlchemy users to install mxODBC. + """ + global InterfaceError, ProgrammingError + from mx.ODBC import InterfaceError + from mx.ODBC import ProgrammingError + + def on_connect(self): + def connect(conn): + conn.stringformat = self.dbapi.MIXED_STRINGFORMAT + conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT + conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT + conn.errorhandler = self._error_handler() + return connect + + def _error_handler(self): + """ Return a handler that adjusts mxODBC's raised Warnings to + emit Python standard warnings. + """ + from mx.ODBC.Error import Warning as MxOdbcWarning + + def error_handler(connection, cursor, errorclass, errorvalue): + if issubclass(errorclass, MxOdbcWarning): + errorclass.__bases__ = (Warning,) + warnings.warn(message=str(errorvalue), + category=errorclass, + stacklevel=2) + else: + raise errorclass(errorvalue) + return error_handler + + def create_connect_args(self, url): + """ Return a tuple of *args,**kwargs for creating a connection. + + The mxODBC 3.x connection constructor looks like this: + + connect(dsn, user='', password='', + clear_auto_commit=1, errorhandler=None) + + This method translates the values in the provided uri + into args and kwargs needed to instantiate an mxODBC Connection. + + The arg 'errorhandler' is not used by SQLAlchemy and will + not be populated. + + """ + opts = url.translate_connect_args(username='user') + opts.update(url.query) + args = opts.pop('host') + opts.pop('port', None) + opts.pop('database', None) + return (args,), opts + + def is_disconnect(self, e, connection, cursor): + # TODO: eGenix recommends checking connection.closed here + # Does that detect dropped connections ? + if isinstance(e, self.dbapi.ProgrammingError): + return "connection already closed" in str(e) + elif isinstance(e, self.dbapi.Error): + return '[08S01]' in str(e) + else: + return False + + def _get_server_version_info(self, connection): + # eGenix suggests using conn.dbms_version instead + # of what we're doing here + dbapi_con = connection.connection + version = [] + r = re.compile(r'[.\-]') + # 18 == pyodbc.SQL_DBMS_VER + for n in r.split(dbapi_con.getinfo(18)[1]): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + + def _get_direct(self, context): + if context: + native_odbc_execute = context.execution_options.\ + get('native_odbc_execute', 'auto') + # default to direct=True in all cases, is more generally + # compatible especially with SQL Server + return False if native_odbc_execute is True else True + else: + return True + + def do_executemany(self, cursor, statement, parameters, context=None): + cursor.executemany( + statement, parameters, direct=self._get_direct(context)) + + def do_execute(self, cursor, statement, parameters, context=None): + cursor.execute(statement, parameters, direct=self._get_direct(context)) diff --git a/app/lib/sqlalchemy/connectors/pyodbc.py b/app/lib/sqlalchemy/connectors/pyodbc.py new file mode 100644 index 0000000..ee8445d --- /dev/null +++ b/app/lib/sqlalchemy/connectors/pyodbc.py @@ -0,0 +1,196 @@ +# connectors/pyodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from . import Connector +from .. import util + + +import sys +import re + + +class PyODBCConnector(Connector): + driver = 'pyodbc' + + supports_sane_multi_rowcount = False + + if util.py2k: + # PyODBC unicode is broken on UCS-4 builds + supports_unicode = sys.maxunicode == 65535 + supports_unicode_statements = supports_unicode + + supports_native_decimal = True + default_paramstyle = 'named' + + # for non-DSN connections, this *may* be used to + # hold the desired driver name + pyodbc_driver_name = None + + # will be set to True after initialize() + # if the freetds.so is detected + freetds = False + + # will be set to the string version of + # the FreeTDS driver if freetds is detected + freetds_driver_version = None + + # will be set to True after initialize() + # if the libessqlsrv.so is detected + easysoft = False + + def __init__(self, supports_unicode_binds=None, **kw): + super(PyODBCConnector, self).__init__(**kw) + self._user_supports_unicode_binds = supports_unicode_binds + + @classmethod + def dbapi(cls): + return __import__('pyodbc') + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + opts.update(url.query) + + keys = opts + + query = url.query + + connect_args = {} + for param in ('ansi', 'unicode_results', 'autocommit'): + if param in keys: + connect_args[param] = util.asbool(keys.pop(param)) + + if 'odbc_connect' in keys: + connectors = [util.unquote_plus(keys.pop('odbc_connect'))] + else: + def check_quote(token): + if ";" in str(token): + token = "'%s'" % token + return token + + keys = dict( + (k, check_quote(v)) for k, v in keys.items() + ) + + dsn_connection = 'dsn' in keys or \ + ('host' in keys and 'database' not in keys) + if dsn_connection: + connectors = ['dsn=%s' % (keys.pop('host', '') or + keys.pop('dsn', ''))] + else: + port = '' + if 'port' in keys and 'port' not in query: + port = ',%d' % int(keys.pop('port')) + + connectors = [] + driver = keys.pop('driver', self.pyodbc_driver_name) + if driver is None: + util.warn( + "No driver name specified; " + "this is expected by PyODBC when using " + "DSN-less connections") + else: + connectors.append("DRIVER={%s}" % driver) + + connectors.extend( + [ + 'Server=%s%s' % (keys.pop('host', ''), port), + 'Database=%s' % keys.pop('database', '') + ]) + + user = keys.pop("user", None) + if user: + connectors.append("UID=%s" % user) + connectors.append("PWD=%s" % keys.pop('password', '')) + else: + connectors.append("Trusted_Connection=Yes") + + # if set to 'Yes', the ODBC layer will try to automagically + # convert textual data from your database encoding to your + # client encoding. This should obviously be set to 'No' if + # you query a cp1253 encoded database from a latin1 client... + if 'odbc_autotranslate' in keys: + connectors.append("AutoTranslate=%s" % + keys.pop("odbc_autotranslate")) + + connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()]) + + return [[";".join(connectors)], connect_args] + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, self.dbapi.ProgrammingError): + return "The cursor's connection has been closed." in str(e) or \ + 'Attempt to use a closed connection.' in str(e) + elif isinstance(e, self.dbapi.Error): + return '[08S01]' in str(e) + else: + return False + + def initialize(self, connection): + # determine FreeTDS first. can't issue SQL easily + # without getting unicode_statements/binds set up. + + pyodbc = self.dbapi + + dbapi_con = connection.connection + + _sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME) + self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name + )) + self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name + )) + + if self.freetds: + self.freetds_driver_version = dbapi_con.getinfo( + pyodbc.SQL_DRIVER_VER) + + self.supports_unicode_statements = ( + not util.py2k or + (not self.freetds and not self.easysoft) + ) + + if self._user_supports_unicode_binds is not None: + self.supports_unicode_binds = self._user_supports_unicode_binds + elif util.py2k: + self.supports_unicode_binds = ( + not self.freetds or self.freetds_driver_version >= '0.91' + ) and not self.easysoft + else: + self.supports_unicode_binds = True + + # run other initialization which asks for user name, etc. + super(PyODBCConnector, self).initialize(connection) + + def _dbapi_version(self): + if not self.dbapi: + return () + return self._parse_dbapi_version(self.dbapi.version) + + def _parse_dbapi_version(self, vers): + m = re.match( + r'(?:py.*-)?([\d\.]+)(?:-(\w+))?', + vers + ) + if not m: + return () + vers = tuple([int(x) for x in m.group(1).split(".")]) + if m.group(2): + vers += (m.group(2),) + return vers + + def _get_server_version_info(self, connection): + # NOTE: this function is not reliable, particularly when + # freetds is in use. Implement database-specific server version + # queries. + dbapi_con = connection.connection + version = [] + r = re.compile(r'[.\-]') + for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) diff --git a/app/lib/sqlalchemy/connectors/zxJDBC.py b/app/lib/sqlalchemy/connectors/zxJDBC.py new file mode 100644 index 0000000..8a5b749 --- /dev/null +++ b/app/lib/sqlalchemy/connectors/zxJDBC.py @@ -0,0 +1,60 @@ +# connectors/zxJDBC.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import sys +from . import Connector + + +class ZxJDBCConnector(Connector): + driver = 'zxjdbc' + + supports_sane_rowcount = False + supports_sane_multi_rowcount = False + + supports_unicode_binds = True + supports_unicode_statements = sys.version > '2.5.0+' + description_encoding = None + default_paramstyle = 'qmark' + + jdbc_db_name = None + jdbc_driver_name = None + + @classmethod + def dbapi(cls): + from com.ziclix.python.sql import zxJDBC + return zxJDBC + + def _driver_kwargs(self): + """Return kw arg dict to be sent to connect().""" + return {} + + def _create_jdbc_url(self, url): + """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`""" + return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host, + url.port is not None + and ':%s' % url.port or '', + url.database) + + def create_connect_args(self, url): + opts = self._driver_kwargs() + opts.update(url.query) + return [ + [self._create_jdbc_url(url), + url.username, url.password, + self.jdbc_driver_name], + opts] + + def is_disconnect(self, e, connection, cursor): + if not isinstance(e, self.dbapi.ProgrammingError): + return False + e = str(e) + return 'connection is closed' in e or 'cursor is closed' in e + + def _get_server_version_info(self, connection): + # use connection.connection.dbversion, and parse appropriately + # to get a tuple + raise NotImplementedError() diff --git a/app/lib/sqlalchemy/cprocessors.so b/app/lib/sqlalchemy/cprocessors.so new file mode 100755 index 0000000..f64d60c Binary files /dev/null and b/app/lib/sqlalchemy/cprocessors.so differ diff --git a/app/lib/sqlalchemy/cresultproxy.so b/app/lib/sqlalchemy/cresultproxy.so new file mode 100755 index 0000000..c00c034 Binary files /dev/null and b/app/lib/sqlalchemy/cresultproxy.so differ diff --git a/app/lib/sqlalchemy/cutils.so b/app/lib/sqlalchemy/cutils.so new file mode 100755 index 0000000..81c6b44 Binary files /dev/null and b/app/lib/sqlalchemy/cutils.so differ diff --git a/app/lib/sqlalchemy/databases/__init__.py b/app/lib/sqlalchemy/databases/__init__.py new file mode 100644 index 0000000..3fb659d --- /dev/null +++ b/app/lib/sqlalchemy/databases/__init__.py @@ -0,0 +1,30 @@ +# databases/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Include imports from the sqlalchemy.dialects package for backwards +compatibility with pre 0.6 versions. + +""" +from ..dialects.sqlite import base as sqlite +from ..dialects.postgresql import base as postgresql +postgres = postgresql +from ..dialects.mysql import base as mysql +from ..dialects.oracle import base as oracle +from ..dialects.firebird import base as firebird +from ..dialects.mssql import base as mssql +from ..dialects.sybase import base as sybase + + +__all__ = ( + 'firebird', + 'mssql', + 'mysql', + 'postgresql', + 'sqlite', + 'oracle', + 'sybase', +) diff --git a/app/lib/sqlalchemy/dialects/__init__.py b/app/lib/sqlalchemy/dialects/__init__.py new file mode 100644 index 0000000..44051f0 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/__init__.py @@ -0,0 +1,56 @@ +# dialects/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +__all__ = ( + 'firebird', + 'mssql', + 'mysql', + 'oracle', + 'postgresql', + 'sqlite', + 'sybase', +) + +from .. import util + +_translates = {'postgres': 'postgresql'} + +def _auto_fn(name): + """default dialect importer. + + plugs into the :class:`.PluginLoader` + as a first-hit system. + + """ + if "." in name: + dialect, driver = name.split(".") + else: + dialect = name + driver = "base" + + if dialect in _translates: + translated = _translates[dialect] + util.warn_deprecated( + "The '%s' dialect name has been " + "renamed to '%s'" % (dialect, translated) + ) + dialect = translated + try: + module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects + except ImportError: + return None + + module = getattr(module, dialect) + if hasattr(module, driver): + module = getattr(module, driver) + return lambda: module.dialect + else: + return None + +registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn) + +plugins = util.PluginLoader("sqlalchemy.plugins") \ No newline at end of file diff --git a/app/lib/sqlalchemy/dialects/firebird/__init__.py b/app/lib/sqlalchemy/dialects/firebird/__init__.py new file mode 100644 index 0000000..8dd9d11 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/firebird/__init__.py @@ -0,0 +1,21 @@ +# firebird/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from sqlalchemy.dialects.firebird import base, kinterbasdb, fdb + +base.dialect = fdb.dialect + +from sqlalchemy.dialects.firebird.base import \ + SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \ + TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\ + dialect + +__all__ = ( + 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME', + 'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB', + 'dialect' +) diff --git a/app/lib/sqlalchemy/dialects/firebird/base.py b/app/lib/sqlalchemy/dialects/firebird/base.py new file mode 100644 index 0000000..7d4aca5 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/firebird/base.py @@ -0,0 +1,741 @@ +# firebird/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r""" + +.. dialect:: firebird + :name: Firebird + +Firebird Dialects +----------------- + +Firebird offers two distinct dialects_ (not to be confused with a +SQLAlchemy ``Dialect``): + +dialect 1 + This is the old syntax and behaviour, inherited from Interbase pre-6.0. + +dialect 3 + This is the newer and supported syntax, introduced in Interbase 6.0. + +The SQLAlchemy Firebird dialect detects these versions and +adjusts its representation of SQL accordingly. However, +support for dialect 1 is not well tested and probably has +incompatibilities. + +Locking Behavior +---------------- + +Firebird locks tables aggressively. For this reason, a DROP TABLE may +hang until other transactions are released. SQLAlchemy does its best +to release transactions as quickly as possible. The most common cause +of hanging transactions is a non-fully consumed result set, i.e.:: + + result = engine.execute("select * from table") + row = result.fetchone() + return + +Where above, the ``ResultProxy`` has not been fully consumed. The +connection will be returned to the pool and the transactional state +rolled back once the Python garbage collector reclaims the objects +which hold onto the connection, which often occurs asynchronously. +The above use case can be alleviated by calling ``first()`` on the +``ResultProxy`` which will fetch the first row and immediately close +all remaining cursor/connection resources. + +RETURNING support +----------------- + +Firebird 2.0 supports returning a result set from inserts, and 2.1 +extends that to deletes and updates. This is generically exposed by +the SQLAlchemy ``returning()`` method, such as:: + + # INSERT..RETURNING + result = table.insert().returning(table.c.col1, table.c.col2).\ + values(name='foo') + print result.fetchall() + + # UPDATE..RETURNING + raises = empl.update().returning(empl.c.id, empl.c.salary).\ + where(empl.c.sales>100).\ + values(dict(salary=empl.c.salary * 1.1)) + print raises.fetchall() + + +.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html + +""" + +import datetime + +from sqlalchemy import schema as sa_schema +from sqlalchemy import exc, types as sqltypes, sql, util +from sqlalchemy.sql import expression +from sqlalchemy.engine import base, default, reflection +from sqlalchemy.sql import compiler +from sqlalchemy.sql.elements import quoted_name + +from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC, + SMALLINT, TEXT, TIME, TIMESTAMP, Integer) + + +RESERVED_WORDS = set([ + "active", "add", "admin", "after", "all", "alter", "and", "any", "as", + "asc", "ascending", "at", "auto", "avg", "before", "begin", "between", + "bigint", "bit_length", "blob", "both", "by", "case", "cast", "char", + "character", "character_length", "char_length", "check", "close", + "collate", "column", "commit", "committed", "computed", "conditional", + "connect", "constraint", "containing", "count", "create", "cross", + "cstring", "current", "current_connection", "current_date", + "current_role", "current_time", "current_timestamp", + "current_transaction", "current_user", "cursor", "database", "date", + "day", "dec", "decimal", "declare", "default", "delete", "desc", + "descending", "disconnect", "distinct", "do", "domain", "double", + "drop", "else", "end", "entry_point", "escape", "exception", + "execute", "exists", "exit", "external", "extract", "fetch", "file", + "filter", "float", "for", "foreign", "from", "full", "function", + "gdscode", "generator", "gen_id", "global", "grant", "group", + "having", "hour", "if", "in", "inactive", "index", "inner", + "input_type", "insensitive", "insert", "int", "integer", "into", "is", + "isolation", "join", "key", "leading", "left", "length", "level", + "like", "long", "lower", "manual", "max", "maximum_segment", "merge", + "min", "minute", "module_name", "month", "names", "national", + "natural", "nchar", "no", "not", "null", "numeric", "octet_length", + "of", "on", "only", "open", "option", "or", "order", "outer", + "output_type", "overflow", "page", "pages", "page_size", "parameter", + "password", "plan", "position", "post_event", "precision", "primary", + "privileges", "procedure", "protected", "rdb$db_key", "read", "real", + "record_version", "recreate", "recursive", "references", "release", + "reserv", "reserving", "retain", "returning_values", "returns", + "revoke", "right", "rollback", "rows", "row_count", "savepoint", + "schema", "second", "segment", "select", "sensitive", "set", "shadow", + "shared", "singular", "size", "smallint", "snapshot", "some", "sort", + "sqlcode", "stability", "start", "starting", "starts", "statistics", + "sub_type", "sum", "suspend", "table", "then", "time", "timestamp", + "to", "trailing", "transaction", "trigger", "trim", "uncommitted", + "union", "unique", "update", "upper", "user", "using", "value", + "values", "varchar", "variable", "varying", "view", "wait", "when", + "where", "while", "with", "work", "write", "year", +]) + + +class _StringType(sqltypes.String): + """Base for Firebird string types.""" + + def __init__(self, charset=None, **kw): + self.charset = charset + super(_StringType, self).__init__(**kw) + + +class VARCHAR(_StringType, sqltypes.VARCHAR): + """Firebird VARCHAR type""" + __visit_name__ = 'VARCHAR' + + def __init__(self, length=None, **kwargs): + super(VARCHAR, self).__init__(length=length, **kwargs) + + +class CHAR(_StringType, sqltypes.CHAR): + """Firebird CHAR type""" + __visit_name__ = 'CHAR' + + def __init__(self, length=None, **kwargs): + super(CHAR, self).__init__(length=length, **kwargs) + + +class _FBDateTime(sqltypes.DateTime): + def bind_processor(self, dialect): + def process(value): + if type(value) == datetime.date: + return datetime.datetime(value.year, value.month, value.day) + else: + return value + return process + +colspecs = { + sqltypes.DateTime: _FBDateTime +} + +ischema_names = { + 'SHORT': SMALLINT, + 'LONG': INTEGER, + 'QUAD': FLOAT, + 'FLOAT': FLOAT, + 'DATE': DATE, + 'TIME': TIME, + 'TEXT': TEXT, + 'INT64': BIGINT, + 'DOUBLE': FLOAT, + 'TIMESTAMP': TIMESTAMP, + 'VARYING': VARCHAR, + 'CSTRING': CHAR, + 'BLOB': BLOB, +} + + +# TODO: date conversion types (should be implemented as _FBDateTime, +# _FBDate, etc. as bind/result functionality is required) + +class FBTypeCompiler(compiler.GenericTypeCompiler): + def visit_boolean(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) + + def visit_datetime(self, type_, **kw): + return self.visit_TIMESTAMP(type_, **kw) + + def visit_TEXT(self, type_, **kw): + return "BLOB SUB_TYPE 1" + + def visit_BLOB(self, type_, **kw): + return "BLOB SUB_TYPE 0" + + def _extend_string(self, type_, basic): + charset = getattr(type_, 'charset', None) + if charset is None: + return basic + else: + return '%s CHARACTER SET %s' % (basic, charset) + + def visit_CHAR(self, type_, **kw): + basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw) + return self._extend_string(type_, basic) + + def visit_VARCHAR(self, type_, **kw): + if not type_.length: + raise exc.CompileError( + "VARCHAR requires a length on dialect %s" % + self.dialect.name) + basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw) + return self._extend_string(type_, basic) + + +class FBCompiler(sql.compiler.SQLCompiler): + """Firebird specific idiosyncrasies""" + + ansi_bind_rules = True + + # def visit_contains_op_binary(self, binary, operator, **kw): + # cant use CONTAINING b.c. it's case insensitive. + + # def visit_notcontains_op_binary(self, binary, operator, **kw): + # cant use NOT CONTAINING b.c. it's case insensitive. + + def visit_now_func(self, fn, **kw): + return "CURRENT_TIMESTAMP" + + def visit_startswith_op_binary(self, binary, operator, **kw): + return '%s STARTING WITH %s' % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw)) + + def visit_notstartswith_op_binary(self, binary, operator, **kw): + return '%s NOT STARTING WITH %s' % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw)) + + def visit_mod_binary(self, binary, operator, **kw): + return "mod(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw)) + + def visit_alias(self, alias, asfrom=False, **kwargs): + if self.dialect._version_two: + return super(FBCompiler, self).\ + visit_alias(alias, asfrom=asfrom, **kwargs) + else: + # Override to not use the AS keyword which FB 1.5 does not like + if asfrom: + alias_name = isinstance(alias.name, + expression._truncated_label) and \ + self._truncated_identifier("alias", + alias.name) or alias.name + + return self.process( + alias.original, asfrom=asfrom, **kwargs) + \ + " " + \ + self.preparer.format_alias(alias, alias_name) + else: + return self.process(alias.original, **kwargs) + + def visit_substring_func(self, func, **kw): + s = self.process(func.clauses.clauses[0]) + start = self.process(func.clauses.clauses[1]) + if len(func.clauses.clauses) > 2: + length = self.process(func.clauses.clauses[2]) + return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) + else: + return "SUBSTRING(%s FROM %s)" % (s, start) + + def visit_length_func(self, function, **kw): + if self.dialect._version_two: + return "char_length" + self.function_argspec(function) + else: + return "strlen" + self.function_argspec(function) + + visit_char_length_func = visit_length_func + + def function_argspec(self, func, **kw): + # TODO: this probably will need to be + # narrowed to a fixed list, some no-arg functions + # may require parens - see similar example in the oracle + # dialect + if func.clauses is not None and len(func.clauses): + return self.process(func.clause_expr, **kw) + else: + return "" + + def default_from(self): + return " FROM rdb$database" + + def visit_sequence(self, seq): + return "gen_id(%s, 1)" % self.preparer.format_sequence(seq) + + def get_select_precolumns(self, select, **kw): + """Called when building a ``SELECT`` statement, position is just + before column list Firebird puts the limit and offset right + after the ``SELECT``... + """ + + result = "" + if select._limit_clause is not None: + result += "FIRST %s " % self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + result += "SKIP %s " % self.process(select._offset_clause, **kw) + if select._distinct: + result += "DISTINCT " + return result + + def limit_clause(self, select, **kw): + """Already taken care of in the `get_select_precolumns` method.""" + + return "" + + def returning_clause(self, stmt, returning_cols): + columns = [ + self._label_select_column(None, c, True, False, {}) + for c in expression._select_iterables(returning_cols) + ] + + return 'RETURNING ' + ', '.join(columns) + + +class FBDDLCompiler(sql.compiler.DDLCompiler): + """Firebird syntactic idiosyncrasies""" + + def visit_create_sequence(self, create): + """Generate a ``CREATE GENERATOR`` statement for the sequence.""" + + # no syntax for these + # http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html + if create.element.start is not None: + raise NotImplemented( + "Firebird SEQUENCE doesn't support START WITH") + if create.element.increment is not None: + raise NotImplemented( + "Firebird SEQUENCE doesn't support INCREMENT BY") + + if self.dialect._version_two: + return "CREATE SEQUENCE %s" % \ + self.preparer.format_sequence(create.element) + else: + return "CREATE GENERATOR %s" % \ + self.preparer.format_sequence(create.element) + + def visit_drop_sequence(self, drop): + """Generate a ``DROP GENERATOR`` statement for the sequence.""" + + if self.dialect._version_two: + return "DROP SEQUENCE %s" % \ + self.preparer.format_sequence(drop.element) + else: + return "DROP GENERATOR %s" % \ + self.preparer.format_sequence(drop.element) + + +class FBIdentifierPreparer(sql.compiler.IdentifierPreparer): + """Install Firebird specific reserved words.""" + + reserved_words = RESERVED_WORDS + illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union( + ['_']) + + def __init__(self, dialect): + super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True) + + +class FBExecutionContext(default.DefaultExecutionContext): + def fire_sequence(self, seq, type_): + """Get the next value from the sequence using ``gen_id()``.""" + + return self._execute_scalar( + "SELECT gen_id(%s, 1) FROM rdb$database" % + self.dialect.identifier_preparer.format_sequence(seq), + type_ + ) + + +class FBDialect(default.DefaultDialect): + """Firebird dialect""" + + name = 'firebird' + + max_identifier_length = 31 + + supports_sequences = True + sequences_optional = False + supports_default_values = True + postfetch_lastrowid = False + + supports_native_boolean = False + + requires_name_normalize = True + supports_empty_insert = False + + statement_compiler = FBCompiler + ddl_compiler = FBDDLCompiler + preparer = FBIdentifierPreparer + type_compiler = FBTypeCompiler + execution_ctx_cls = FBExecutionContext + + colspecs = colspecs + ischema_names = ischema_names + + construct_arguments = [] + + # defaults to dialect ver. 3, + # will be autodetected off upon + # first connect + _version_two = True + + def initialize(self, connection): + super(FBDialect, self).initialize(connection) + self._version_two = ('firebird' in self.server_version_info and + self.server_version_info >= (2, ) + ) or \ + ('interbase' in self.server_version_info and + self.server_version_info >= (6, ) + ) + + if not self._version_two: + # TODO: whatever other pre < 2.0 stuff goes here + self.ischema_names = ischema_names.copy() + self.ischema_names['TIMESTAMP'] = sqltypes.DATE + self.colspecs = { + sqltypes.DateTime: sqltypes.DATE + } + + self.implicit_returning = self._version_two and \ + self.__dict__.get('implicit_returning', True) + + def normalize_name(self, name): + # Remove trailing spaces: FB uses a CHAR() type, + # that is padded with spaces + name = name and name.rstrip() + if name is None: + return None + elif name.upper() == name and \ + not self.identifier_preparer._requires_quotes(name.lower()): + return name.lower() + elif name.lower() == name: + return quoted_name(name, quote=True) + else: + return name + + def denormalize_name(self, name): + if name is None: + return None + elif name.lower() == name and \ + not self.identifier_preparer._requires_quotes(name.lower()): + return name.upper() + else: + return name + + def has_table(self, connection, table_name, schema=None): + """Return ``True`` if the given table exists, ignoring + the `schema`.""" + + tblqry = """ + SELECT 1 AS has_table FROM rdb$database + WHERE EXISTS (SELECT rdb$relation_name + FROM rdb$relations + WHERE rdb$relation_name=?) + """ + c = connection.execute(tblqry, [self.denormalize_name(table_name)]) + return c.first() is not None + + def has_sequence(self, connection, sequence_name, schema=None): + """Return ``True`` if the given sequence (generator) exists.""" + + genqry = """ + SELECT 1 AS has_sequence FROM rdb$database + WHERE EXISTS (SELECT rdb$generator_name + FROM rdb$generators + WHERE rdb$generator_name=?) + """ + c = connection.execute(genqry, [self.denormalize_name(sequence_name)]) + return c.first() is not None + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + # there are two queries commonly mentioned for this. + # this one, using view_blr, is at the Firebird FAQ among other places: + # http://www.firebirdfaq.org/faq174/ + s = """ + select rdb$relation_name + from rdb$relations + where rdb$view_blr is null + and (rdb$system_flag is null or rdb$system_flag = 0); + """ + + # the other query is this one. It's not clear if there's really + # any difference between these two. This link: + # http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8 + # states them as interchangeable. Some discussion at [ticket:2898] + # SELECT DISTINCT rdb$relation_name + # FROM rdb$relation_fields + # WHERE rdb$system_flag=0 AND rdb$view_context IS NULL + + return [self.normalize_name(row[0]) for row in connection.execute(s)] + + @reflection.cache + def get_view_names(self, connection, schema=None, **kw): + # see http://www.firebirdfaq.org/faq174/ + s = """ + select rdb$relation_name + from rdb$relations + where rdb$view_blr is not null + and (rdb$system_flag is null or rdb$system_flag = 0); + """ + return [self.normalize_name(row[0]) for row in connection.execute(s)] + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + qry = """ + SELECT rdb$view_source AS view_source + FROM rdb$relations + WHERE rdb$relation_name=? + """ + rp = connection.execute(qry, [self.denormalize_name(view_name)]) + row = rp.first() + if row: + return row['view_source'] + else: + return None + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + # Query to extract the PK/FK constrained fields of the given table + keyqry = """ + SELECT se.rdb$field_name AS fname + FROM rdb$relation_constraints rc + JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name + WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? + """ + tablename = self.denormalize_name(table_name) + # get primary key fields + c = connection.execute(keyqry, ["PRIMARY KEY", tablename]) + pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()] + return {'constrained_columns': pkfields, 'name': None} + + @reflection.cache + def get_column_sequence(self, connection, + table_name, column_name, + schema=None, **kw): + tablename = self.denormalize_name(table_name) + colname = self.denormalize_name(column_name) + # Heuristic-query to determine the generator associated to a PK field + genqry = """ + SELECT trigdep.rdb$depended_on_name AS fgenerator + FROM rdb$dependencies tabdep + JOIN rdb$dependencies trigdep + ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name + AND trigdep.rdb$depended_on_type=14 + AND trigdep.rdb$dependent_type=2 + JOIN rdb$triggers trig ON + trig.rdb$trigger_name=tabdep.rdb$dependent_name + WHERE tabdep.rdb$depended_on_name=? + AND tabdep.rdb$depended_on_type=0 + AND trig.rdb$trigger_type=1 + AND tabdep.rdb$field_name=? + AND (SELECT count(*) + FROM rdb$dependencies trigdep2 + WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2 + """ + genr = connection.execute(genqry, [tablename, colname]).first() + if genr is not None: + return dict(name=self.normalize_name(genr['fgenerator'])) + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + # Query to extract the details of all the fields of the given table + tblqry = """ + SELECT r.rdb$field_name AS fname, + r.rdb$null_flag AS null_flag, + t.rdb$type_name AS ftype, + f.rdb$field_sub_type AS stype, + f.rdb$field_length/ + COALESCE(cs.rdb$bytes_per_character,1) AS flen, + f.rdb$field_precision AS fprec, + f.rdb$field_scale AS fscale, + COALESCE(r.rdb$default_source, + f.rdb$default_source) AS fdefault + FROM rdb$relation_fields r + JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name + JOIN rdb$types t + ON t.rdb$type=f.rdb$field_type AND + t.rdb$field_name='RDB$FIELD_TYPE' + LEFT JOIN rdb$character_sets cs ON + f.rdb$character_set_id=cs.rdb$character_set_id + WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=? + ORDER BY r.rdb$field_position + """ + # get the PK, used to determine the eventual associated sequence + pk_constraint = self.get_pk_constraint(connection, table_name) + pkey_cols = pk_constraint['constrained_columns'] + + tablename = self.denormalize_name(table_name) + # get all of the fields for this table + c = connection.execute(tblqry, [tablename]) + cols = [] + while True: + row = c.fetchone() + if row is None: + break + name = self.normalize_name(row['fname']) + orig_colname = row['fname'] + + # get the data type + colspec = row['ftype'].rstrip() + coltype = self.ischema_names.get(colspec) + if coltype is None: + util.warn("Did not recognize type '%s' of column '%s'" % + (colspec, name)) + coltype = sqltypes.NULLTYPE + elif issubclass(coltype, Integer) and row['fprec'] != 0: + coltype = NUMERIC( + precision=row['fprec'], + scale=row['fscale'] * -1) + elif colspec in ('VARYING', 'CSTRING'): + coltype = coltype(row['flen']) + elif colspec == 'TEXT': + coltype = TEXT(row['flen']) + elif colspec == 'BLOB': + if row['stype'] == 1: + coltype = TEXT() + else: + coltype = BLOB() + else: + coltype = coltype() + + # does it have a default value? + defvalue = None + if row['fdefault'] is not None: + # the value comes down as "DEFAULT 'value'": there may be + # more than one whitespace around the "DEFAULT" keyword + # and it may also be lower case + # (see also http://tracker.firebirdsql.org/browse/CORE-356) + defexpr = row['fdefault'].lstrip() + assert defexpr[:8].rstrip().upper() == \ + 'DEFAULT', "Unrecognized default value: %s" % \ + defexpr + defvalue = defexpr[8:].strip() + if defvalue == 'NULL': + # Redundant + defvalue = None + col_d = { + 'name': name, + 'type': coltype, + 'nullable': not bool(row['null_flag']), + 'default': defvalue, + 'autoincrement': 'auto', + } + + if orig_colname.lower() == orig_colname: + col_d['quote'] = True + + # if the PK is a single field, try to see if its linked to + # a sequence thru a trigger + if len(pkey_cols) == 1 and name == pkey_cols[0]: + seq_d = self.get_column_sequence(connection, tablename, name) + if seq_d is not None: + col_d['sequence'] = seq_d + + cols.append(col_d) + return cols + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + # Query to extract the details of each UK/FK of the given table + fkqry = """ + SELECT rc.rdb$constraint_name AS cname, + cse.rdb$field_name AS fname, + ix2.rdb$relation_name AS targetrname, + se.rdb$field_name AS targetfname + FROM rdb$relation_constraints rc + JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name + JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key + JOIN rdb$index_segments cse ON + cse.rdb$index_name=ix1.rdb$index_name + JOIN rdb$index_segments se + ON se.rdb$index_name=ix2.rdb$index_name + AND se.rdb$field_position=cse.rdb$field_position + WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? + ORDER BY se.rdb$index_name, se.rdb$field_position + """ + tablename = self.denormalize_name(table_name) + + c = connection.execute(fkqry, ["FOREIGN KEY", tablename]) + fks = util.defaultdict(lambda: { + 'name': None, + 'constrained_columns': [], + 'referred_schema': None, + 'referred_table': None, + 'referred_columns': [] + }) + + for row in c: + cname = self.normalize_name(row['cname']) + fk = fks[cname] + if not fk['name']: + fk['name'] = cname + fk['referred_table'] = self.normalize_name(row['targetrname']) + fk['constrained_columns'].append( + self.normalize_name(row['fname'])) + fk['referred_columns'].append( + self.normalize_name(row['targetfname'])) + return list(fks.values()) + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + qry = """ + SELECT ix.rdb$index_name AS index_name, + ix.rdb$unique_flag AS unique_flag, + ic.rdb$field_name AS field_name + FROM rdb$indices ix + JOIN rdb$index_segments ic + ON ix.rdb$index_name=ic.rdb$index_name + LEFT OUTER JOIN rdb$relation_constraints + ON rdb$relation_constraints.rdb$index_name = + ic.rdb$index_name + WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL + AND rdb$relation_constraints.rdb$constraint_type IS NULL + ORDER BY index_name, ic.rdb$field_position + """ + c = connection.execute(qry, [self.denormalize_name(table_name)]) + + indexes = util.defaultdict(dict) + for row in c: + indexrec = indexes[row['index_name']] + if 'name' not in indexrec: + indexrec['name'] = self.normalize_name(row['index_name']) + indexrec['column_names'] = [] + indexrec['unique'] = bool(row['unique_flag']) + + indexrec['column_names'].append( + self.normalize_name(row['field_name'])) + + return list(indexes.values()) diff --git a/app/lib/sqlalchemy/dialects/firebird/fdb.py b/app/lib/sqlalchemy/dialects/firebird/fdb.py new file mode 100644 index 0000000..d590df7 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/firebird/fdb.py @@ -0,0 +1,118 @@ +# firebird/fdb.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: firebird+fdb + :name: fdb + :dbapi: pyodbc + :connectstring: firebird+fdb://user:password@host:port/path/to/db\ +[?key=value&key=value...] + :url: http://pypi.python.org/pypi/fdb/ + + fdb is a kinterbasdb compatible DBAPI for Firebird. + + .. versionadded:: 0.8 - Support for the fdb Firebird driver. + + .. versionchanged:: 0.9 - The fdb dialect is now the default dialect + under the ``firebird://`` URL space, as ``fdb`` is now the official + Python driver for Firebird. + +Arguments +---------- + +The ``fdb`` dialect is based on the +:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not +accept every argument that Kinterbasdb does. + +* ``enable_rowcount`` - True by default, setting this to False disables + the usage of "cursor.rowcount" with the + Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically + after any UPDATE or DELETE statement. When disabled, SQLAlchemy's + ResultProxy will return -1 for result.rowcount. The rationale here is + that Kinterbasdb requires a second round trip to the database when + .rowcount is called - since SQLA's resultproxy automatically closes + the cursor after a non-result-returning statement, rowcount must be + called, if at all, before the result object is returned. Additionally, + cursor.rowcount may not return correct results with older versions + of Firebird, and setting this flag to False will also cause the + SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a + per-execution basis using the ``enable_rowcount`` option with + :meth:`.Connection.execution_options`:: + + conn = engine.connect().execution_options(enable_rowcount=True) + r = conn.execute(stmt) + print r.rowcount + +* ``retaining`` - False by default. Setting this to True will pass the + ``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()`` + methods of the DBAPI connection, which can improve performance in some + situations, but apparently with significant caveats. + Please read the fdb and/or kinterbasdb DBAPI documentation in order to + understand the implications of this flag. + + .. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying + transaction retaining behavior - in 0.8 it defaults to ``True`` + for backwards compatibility. + + .. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``. + In 0.8 it defaulted to ``True``. + + .. seealso:: + + http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions + - information on the "retaining" flag. + +""" + +from .kinterbasdb import FBDialect_kinterbasdb +from ... import util + + +class FBDialect_fdb(FBDialect_kinterbasdb): + + def __init__(self, enable_rowcount=True, + retaining=False, **kwargs): + super(FBDialect_fdb, self).__init__( + enable_rowcount=enable_rowcount, + retaining=retaining, **kwargs) + + @classmethod + def dbapi(cls): + return __import__('fdb') + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + if opts.get('port'): + opts['host'] = "%s/%s" % (opts['host'], opts['port']) + del opts['port'] + opts.update(url.query) + + util.coerce_kw_type(opts, 'type_conv', int) + + return ([], opts) + + def _get_server_version_info(self, connection): + """Get the version of the Firebird server used by a connection. + + Returns a tuple of (`major`, `minor`, `build`), three integers + representing the version of the attached server. + """ + + # This is the simpler approach (the other uses the services api), + # that for backward compatibility reasons returns a string like + # LI-V6.3.3.12981 Firebird 2.0 + # where the first version is a fake one resembling the old + # Interbase signature. + + isc_info_firebird_version = 103 + fbconn = connection.connection + + version = fbconn.db_info(isc_info_firebird_version) + + return self._parse_version_info(version) + +dialect = FBDialect_fdb diff --git a/app/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/app/lib/sqlalchemy/dialects/firebird/kinterbasdb.py new file mode 100644 index 0000000..b7c1563 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -0,0 +1,184 @@ +# firebird/kinterbasdb.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: firebird+kinterbasdb + :name: kinterbasdb + :dbapi: kinterbasdb + :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\ +[?key=value&key=value...] + :url: http://firebirdsql.org/index.php?op=devel&sub=python + +Arguments +---------- + +The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining`` +arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect. +In addition, it also accepts the following: + +* ``type_conv`` - select the kind of mapping done on the types: by default + SQLAlchemy uses 200 with Unicode, datetime and decimal support. See + the linked documents below for further information. + +* ``concurrency_level`` - set the backend policy with regards to threading + issues: by default SQLAlchemy uses policy 1. See the linked documents + below for further information. + +.. seealso:: + + http://sourceforge.net/projects/kinterbasdb + + http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation + + http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency + +""" + +from .base import FBDialect, FBExecutionContext +from ... import util, types as sqltypes +from re import match +import decimal + + +class _kinterbasdb_numeric(object): + def bind_processor(self, dialect): + def process(value): + if isinstance(value, decimal.Decimal): + return str(value) + else: + return value + return process + + +class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric): + pass + + +class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float): + pass + + +class FBExecutionContext_kinterbasdb(FBExecutionContext): + @property + def rowcount(self): + if self.execution_options.get('enable_rowcount', + self.dialect.enable_rowcount): + return self.cursor.rowcount + else: + return -1 + + +class FBDialect_kinterbasdb(FBDialect): + driver = 'kinterbasdb' + supports_sane_rowcount = False + supports_sane_multi_rowcount = False + execution_ctx_cls = FBExecutionContext_kinterbasdb + + supports_native_decimal = True + + colspecs = util.update_copy( + FBDialect.colspecs, + { + sqltypes.Numeric: _FBNumeric_kinterbasdb, + sqltypes.Float: _FBFloat_kinterbasdb, + } + + ) + + def __init__(self, type_conv=200, concurrency_level=1, + enable_rowcount=True, + retaining=False, **kwargs): + super(FBDialect_kinterbasdb, self).__init__(**kwargs) + self.enable_rowcount = enable_rowcount + self.type_conv = type_conv + self.concurrency_level = concurrency_level + self.retaining = retaining + if enable_rowcount: + self.supports_sane_rowcount = True + + @classmethod + def dbapi(cls): + return __import__('kinterbasdb') + + def do_execute(self, cursor, statement, parameters, context=None): + # kinterbase does not accept a None, but wants an empty list + # when there are no arguments. + cursor.execute(statement, parameters or []) + + def do_rollback(self, dbapi_connection): + dbapi_connection.rollback(self.retaining) + + def do_commit(self, dbapi_connection): + dbapi_connection.commit(self.retaining) + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + if opts.get('port'): + opts['host'] = "%s/%s" % (opts['host'], opts['port']) + del opts['port'] + opts.update(url.query) + + util.coerce_kw_type(opts, 'type_conv', int) + + type_conv = opts.pop('type_conv', self.type_conv) + concurrency_level = opts.pop('concurrency_level', + self.concurrency_level) + + if self.dbapi is not None: + initialized = getattr(self.dbapi, 'initialized', None) + if initialized is None: + # CVS rev 1.96 changed the name of the attribute: + # http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/ + # Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96 + initialized = getattr(self.dbapi, '_initialized', False) + if not initialized: + self.dbapi.init(type_conv=type_conv, + concurrency_level=concurrency_level) + return ([], opts) + + def _get_server_version_info(self, connection): + """Get the version of the Firebird server used by a connection. + + Returns a tuple of (`major`, `minor`, `build`), three integers + representing the version of the attached server. + """ + + # This is the simpler approach (the other uses the services api), + # that for backward compatibility reasons returns a string like + # LI-V6.3.3.12981 Firebird 2.0 + # where the first version is a fake one resembling the old + # Interbase signature. + + fbconn = connection.connection + version = fbconn.server_version + + return self._parse_version_info(version) + + def _parse_version_info(self, version): + m = match( + r'\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version) + if not m: + raise AssertionError( + "Could not determine version from string '%s'" % version) + + if m.group(5) != None: + return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird']) + else: + return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase']) + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, (self.dbapi.OperationalError, + self.dbapi.ProgrammingError)): + msg = str(e) + return ('Unable to complete network request to host' in msg or + 'Invalid connection state' in msg or + 'Invalid cursor state' in msg or + 'connection shutdown' in msg) + else: + return False + +dialect = FBDialect_kinterbasdb diff --git a/app/lib/sqlalchemy/dialects/mssql/__init__.py b/app/lib/sqlalchemy/dialects/mssql/__init__.py new file mode 100644 index 0000000..6b70df3 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/__init__.py @@ -0,0 +1,27 @@ +# mssql/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \ + pymssql, zxjdbc, mxodbc + +base.dialect = pyodbc.dialect + +from sqlalchemy.dialects.mssql.base import \ + INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \ + NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\ + DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \ + BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\ + MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect + + +__all__ = ( + 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR', + 'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME', + 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME', + 'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP', + 'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect' +) diff --git a/app/lib/sqlalchemy/dialects/mssql/adodbapi.py b/app/lib/sqlalchemy/dialects/mssql/adodbapi.py new file mode 100644 index 0000000..221bf50 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/adodbapi.py @@ -0,0 +1,87 @@ +# mssql/adodbapi.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: mssql+adodbapi + :name: adodbapi + :dbapi: adodbapi + :connectstring: mssql+adodbapi://:@ + :url: http://adodbapi.sourceforge.net/ + +.. note:: + + The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and + above at this time. + +""" +import datetime +from sqlalchemy import types as sqltypes, util +from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect +import sys + + +class MSDateTime_adodbapi(MSDateTime): + def result_processor(self, dialect, coltype): + def process(value): + # adodbapi will return datetimes with empty time + # values as datetime.date() objects. + # Promote them back to full datetime.datetime() + if type(value) is datetime.date: + return datetime.datetime(value.year, value.month, value.day) + return value + return process + + +class MSDialect_adodbapi(MSDialect): + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + supports_unicode = sys.maxunicode == 65535 + supports_unicode_statements = True + driver = 'adodbapi' + + @classmethod + def import_dbapi(cls): + import adodbapi as module + return module + + colspecs = util.update_copy( + MSDialect.colspecs, + { + sqltypes.DateTime: MSDateTime_adodbapi + } + ) + + def create_connect_args(self, url): + def check_quote(token): + if ";" in str(token): + token = "'%s'" % token + return token + + keys = dict( + (k, check_quote(v)) for k, v in url.query.items() + ) + + connectors = ["Provider=SQLOLEDB"] + if 'port' in keys: + connectors.append("Data Source=%s, %s" % + (keys.get("host"), keys.get("port"))) + else: + connectors.append("Data Source=%s" % keys.get("host")) + connectors.append("Initial Catalog=%s" % keys.get("database")) + user = keys.get("user") + if user: + connectors.append("User Id=%s" % user) + connectors.append("Password=%s" % keys.get("password", "")) + else: + connectors.append("Integrated Security=SSPI") + return [[";".join(connectors)], {}] + + def is_disconnect(self, e, connection, cursor): + return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \ + "'connection failure'" in str(e) + +dialect = MSDialect_adodbapi diff --git a/app/lib/sqlalchemy/dialects/mssql/base.py b/app/lib/sqlalchemy/dialects/mssql/base.py new file mode 100644 index 0000000..6975754 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/base.py @@ -0,0 +1,2064 @@ +# mssql/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: mssql + :name: Microsoft SQL Server + + +Auto Increment Behavior +----------------------- + +SQL Server provides so-called "auto incrementing" behavior using the +``IDENTITY`` construct, which can be placed on an integer primary key. +SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior, +described at :paramref:`.Column.autoincrement`; this means +that by default, the first integer primary key column in a :class:`.Table` +will be considered to be the identity column and will generate DDL as such:: + + from sqlalchemy import Table, MetaData, Column, Integer + + m = MetaData() + t = Table('t', m, + Column('id', Integer, primary_key=True), + Column('x', Integer)) + m.create_all(engine) + +The above example will generate DDL as: + +.. sourcecode:: sql + + CREATE TABLE t ( + id INTEGER NOT NULL IDENTITY(1,1), + x INTEGER NULL, + PRIMARY KEY (id) + ) + +For the case where this default generation of ``IDENTITY`` is not desired, +specify ``autoincrement=False`` on all integer primary key columns:: + + m = MetaData() + t = Table('t', m, + Column('id', Integer, primary_key=True, autoincrement=False), + Column('x', Integer)) + m.create_all(engine) + +.. note:: + + An INSERT statement which refers to an explicit value for such + a column is prohibited by SQL Server, however SQLAlchemy will detect this + and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution + time. As this is not a high performing process, care should be taken to + set the ``autoincrement`` flag appropriately for columns that will not + actually require IDENTITY behavior. + +Controlling "Start" and "Increment" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Specific control over the parameters of the ``IDENTITY`` value is supported +using the :class:`.schema.Sequence` object. While this object normally +represents an explicit "sequence" for supporting backends, on SQL Server it is +re-purposed to specify behavior regarding the identity column, including +support of the "start" and "increment" values:: + + from sqlalchemy import Table, Integer, Sequence, Column + + Table('test', metadata, + Column('id', Integer, + Sequence('blah', start=100, increment=10), + primary_key=True), + Column('name', String(20)) + ).create(some_engine) + +would yield: + +.. sourcecode:: sql + + CREATE TABLE test ( + id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY, + name VARCHAR(20) NULL, + ) + +Note that the ``start`` and ``increment`` values for sequences are +optional and will default to 1,1. + +INSERT behavior +^^^^^^^^^^^^^^^^ + +Handling of the ``IDENTITY`` column at INSERT time involves two key +techniques. The most common is being able to fetch the "last inserted value" +for a given ``IDENTITY`` column, a process which SQLAlchemy performs +implicitly in many cases, most importantly within the ORM. + +The process for fetching this value has several variants: + +* In the vast majority of cases, RETURNING is used in conjunction with INSERT + statements on SQL Server in order to get newly generated primary key values: + + .. sourcecode:: sql + + INSERT INTO t (x) OUTPUT inserted.id VALUES (?) + +* When RETURNING is not available or has been disabled via + ``implicit_returning=False``, either the ``scope_identity()`` function or + the ``@@identity`` variable is used; behavior varies by backend: + + * when using PyODBC, the phrase ``; select scope_identity()`` will be + appended to the end of the INSERT statement; a second result set will be + fetched in order to receive the value. Given a table as:: + + t = Table('t', m, Column('id', Integer, primary_key=True), + Column('x', Integer), + implicit_returning=False) + + an INSERT will look like: + + .. sourcecode:: sql + + INSERT INTO t (x) VALUES (?); select scope_identity() + + * Other dialects such as pymssql will call upon + ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT + statement. If the flag ``use_scope_identity=False`` is passed to + :func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid`` + is used instead. + +A table that contains an ``IDENTITY`` column will prohibit an INSERT statement +that refers to the identity column explicitly. The SQLAlchemy dialect will +detect when an INSERT construct, created using a core :func:`.insert` +construct (not a plain string SQL), refers to the identity column, and +in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert +statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the +execution. Given this example:: + + m = MetaData() + t = Table('t', m, Column('id', Integer, primary_key=True), + Column('x', Integer)) + m.create_all(engine) + + engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2}) + +The above column will be created with IDENTITY, however the INSERT statement +we emit is specifying explicit values. In the echo output we can see +how SQLAlchemy handles this: + +.. sourcecode:: sql + + CREATE TABLE t ( + id INTEGER NOT NULL IDENTITY(1,1), + x INTEGER NULL, + PRIMARY KEY (id) + ) + + COMMIT + SET IDENTITY_INSERT t ON + INSERT INTO t (id, x) VALUES (?, ?) + ((1, 1), (2, 2)) + SET IDENTITY_INSERT t OFF + COMMIT + + + +This +is an auxiliary use case suitable for testing and bulk insert scenarios. + +MAX on VARCHAR / NVARCHAR +------------------------- + +SQL Server supports the special string "MAX" within the +:class:`.sqltypes.VARCHAR` and :class:`.sqltypes.NVARCHAR` datatypes, +to indicate "maximum length possible". The dialect currently handles this as +a length of "None" in the base type, rather than supplying a +dialect-specific version of these types, so that a base type +specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on +more than one backend without using dialect-specific types. + +To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None:: + + my_table = Table( + 'my_table', metadata, + Column('my_data', VARCHAR(None)), + Column('my_n_data', NVARCHAR(None)) + ) + + +Collation Support +----------------- + +Character collations are supported by the base string types, +specified by the string argument "collation":: + + from sqlalchemy import VARCHAR + Column('login', VARCHAR(32, collation='Latin1_General_CI_AS')) + +When such a column is associated with a :class:`.Table`, the +CREATE TABLE statement for this column will yield:: + + login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL + +.. versionadded:: 0.8 Character collations are now part of the base string + types. + +LIMIT/OFFSET Support +-------------------- + +MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is +supported directly through the ``TOP`` Transact SQL keyword:: + + select.limit + +will yield:: + + SELECT TOP n + +If using SQL Server 2005 or above, LIMIT with OFFSET +support is available through the ``ROW_NUMBER OVER`` construct. +For versions below 2005, LIMIT with OFFSET usage will fail. + +.. _mssql_isolation_level: + +Transaction Isolation Level +--------------------------- + +All SQL Server dialects support setting of transaction isolation level +both via a dialect-specific parameter +:paramref:`.create_engine.isolation_level` +accepted by :func:`.create_engine`, +as well as the :paramref:`.Connection.execution_options.isolation_level` +argument as passed to +:meth:`.Connection.execution_options`. This feature works by issuing the +command ``SET TRANSACTION ISOLATION LEVEL `` for +each new connection. + +To set isolation level using :func:`.create_engine`:: + + engine = create_engine( + "mssql+pyodbc://scott:tiger@ms_2008", + isolation_level="REPEATABLE READ" + ) + +To set using per-connection execution options:: + + connection = engine.connect() + connection = connection.execution_options( + isolation_level="READ COMMITTED" + ) + +Valid values for ``isolation_level`` include: + +* ``READ COMMITTED`` +* ``READ UNCOMMITTED`` +* ``REPEATABLE READ`` +* ``SERIALIZABLE`` +* ``SNAPSHOT`` - specific to SQL Server + +.. versionadded:: 1.1 support for isolation level setting on Microsoft + SQL Server. + + +Nullability +----------- +MSSQL has support for three levels of column nullability. The default +nullability allows nulls and is explicit in the CREATE TABLE +construct:: + + name VARCHAR(20) NULL + +If ``nullable=None`` is specified then no specification is made. In +other words the database's configured default is used. This will +render:: + + name VARCHAR(20) + +If ``nullable`` is ``True`` or ``False`` then the column will be +``NULL`` or ``NOT NULL`` respectively. + +Date / Time Handling +-------------------- +DATE and TIME are supported. Bind parameters are converted +to datetime.datetime() objects as required by most MSSQL drivers, +and results are processed from strings if needed. +The DATE and TIME types are not available for MSSQL 2005 and +previous - if a server version below 2008 is detected, DDL +for these types will be issued as DATETIME. + +.. _mssql_large_type_deprecation: + +Large Text/Binary Type Deprecation +---------------------------------- + +Per `SQL Server 2012/2014 Documentation `_, +the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server +in a future release. SQLAlchemy normally relates these types to the +:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes. + +In order to accommodate this change, a new flag ``deprecate_large_types`` +is added to the dialect, which will be automatically set based on detection +of the server version in use, if not otherwise set by the user. The +behavior of this flag is as follows: + +* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and + :class:`.LargeBinary` datatypes, when used to render DDL, will render the + types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``, + respectively. This is a new behavior as of the addition of this flag. + +* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and + :class:`.LargeBinary` datatypes, when used to render DDL, will render the + types ``NTEXT``, ``TEXT``, and ``IMAGE``, + respectively. This is the long-standing behavior of these types. + +* The flag begins with the value ``None``, before a database connection is + established. If the dialect is used to render DDL without the flag being + set, it is interpreted the same as ``False``. + +* On first connection, the dialect detects if SQL Server version 2012 or greater + is in use; if the flag is still at ``None``, it sets it to ``True`` or + ``False`` based on whether 2012 or greater is detected. + +* The flag can be set to either ``True`` or ``False`` when the dialect + is created, typically via :func:`.create_engine`:: + + eng = create_engine("mssql+pymssql://user:pass@host/db", + deprecate_large_types=True) + +* Complete control over whether the "old" or "new" types are rendered is + available in all SQLAlchemy versions by using the UPPERCASE type objects + instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`, + :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain + fixed and always output exactly that type. + +.. versionadded:: 1.0.0 + +.. _legacy_schema_rendering: + +Legacy Schema Mode +------------------ + +Very old versions of the MSSQL dialect introduced the behavior such that a +schema-qualified table would be auto-aliased when used in a +SELECT statement; given a table:: + + account_table = Table( + 'account', metadata, + Column('id', Integer, primary_key=True), + Column('info', String(100)), + schema="customer_schema" + ) + +this legacy mode of rendering would assume that "customer_schema.account" +would not be accepted by all parts of the SQL statement, as illustrated +below:: + + >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True) + >>> print(account_table.select().compile(eng)) + SELECT account_1.id, account_1.info + FROM customer_schema.account AS account_1 + +This mode of behavior is now off by default, as it appears to have served +no purpose; however in the case that legacy applications rely upon it, +it is available using the ``legacy_schema_aliasing`` argument to +:func:`.create_engine` as illustrated above. + +.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced + in version 1.0.5 to allow disabling of legacy mode for schemas now + defaults to False. + + +.. _mssql_indexes: + +Clustered Index Support +----------------------- + +The MSSQL dialect supports clustered indexes (and primary keys) via the +``mssql_clustered`` option. This option is available to :class:`.Index`, +:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`. + +To generate a clustered index:: + + Index("my_index", table.c.x, mssql_clustered=True) + +which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``. + +To generate a clustered primary key use:: + + Table('my_table', metadata, + Column('x', ...), + Column('y', ...), + PrimaryKeyConstraint("x", "y", mssql_clustered=True)) + +which will render the table, for example, as:: + + CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, + PRIMARY KEY CLUSTERED (x, y)) + +Similarly, we can generate a clustered unique constraint using:: + + Table('my_table', metadata, + Column('x', ...), + Column('y', ...), + PrimaryKeyConstraint("x"), + UniqueConstraint("y", mssql_clustered=True), + ) + +To explicitly request a non-clustered primary key (for example, when +a separate clustered index is desired), use:: + + Table('my_table', metadata, + Column('x', ...), + Column('y', ...), + PrimaryKeyConstraint("x", "y", mssql_clustered=False)) + +which will render the table, for example, as:: + + CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, + PRIMARY KEY NONCLUSTERED (x, y)) + +.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults + to None, rather than False. ``mssql_clustered=False`` now explicitly + renders the NONCLUSTERED clause, whereas None omits the CLUSTERED + clause entirely, allowing SQL Server defaults to take effect. + + +MSSQL-Specific Index Options +----------------------------- + +In addition to clustering, the MSSQL dialect supports other special options +for :class:`.Index`. + +INCLUDE +^^^^^^^ + +The ``mssql_include`` option renders INCLUDE(colname) for the given string +names:: + + Index("my_index", table.c.x, mssql_include=['y']) + +would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)`` + +.. versionadded:: 0.8 + +Index ordering +^^^^^^^^^^^^^^ + +Index ordering is available via functional expressions, such as:: + + Index("my_index", table.c.x.desc()) + +would render the index as ``CREATE INDEX my_index ON table (x DESC)`` + +.. versionadded:: 0.8 + +.. seealso:: + + :ref:`schema_indexes_functional` + +Compatibility Levels +-------------------- +MSSQL supports the notion of setting compatibility levels at the +database level. This allows, for instance, to run a database that +is compatible with SQL2000 while running on a SQL2005 database +server. ``server_version_info`` will always return the database +server version information (in this case SQL2005) and not the +compatibility level information. Because of this, if running under +a backwards compatibility mode SQAlchemy may attempt to use T-SQL +statements that are unable to be parsed by the database server. + +Triggers +-------- + +SQLAlchemy by default uses OUTPUT INSERTED to get at newly +generated primary key values via IDENTITY columns or other +server side defaults. MS-SQL does not +allow the usage of OUTPUT INSERTED on tables that have triggers. +To disable the usage of OUTPUT INSERTED on a per-table basis, +specify ``implicit_returning=False`` for each :class:`.Table` +which has triggers:: + + Table('mytable', metadata, + Column('id', Integer, primary_key=True), + # ..., + implicit_returning=False + ) + +Declarative form:: + + class MyClass(Base): + # ... + __table_args__ = {'implicit_returning':False} + + +This option can also be specified engine-wide using the +``implicit_returning=False`` argument on :func:`.create_engine`. + +.. _mssql_rowcount_versioning: + +Rowcount Support / ORM Versioning +--------------------------------- + +The SQL Server drivers have very limited ability to return the number +of rows updated from an UPDATE or DELETE statement. In particular, the +pymssql driver has no support, whereas the pyodbc driver can only return +this value under certain conditions. + +In particular, updated rowcount is not available when OUTPUT INSERTED +is used. This impacts the SQLAlchemy ORM's versioning feature when +server-side versioning schemes are used. When +using pyodbc, the "implicit_returning" flag needs to be set to false +for any ORM mapped class that uses a version_id column in conjunction with +a server-side version generator:: + + class MyTable(Base): + __tablename__ = 'mytable' + id = Column(Integer, primary_key=True) + stuff = Column(String(10)) + timestamp = Column(TIMESTAMP(), default=text('DEFAULT')) + __mapper_args__ = { + 'version_id_col': timestamp, + 'version_id_generator': False, + } + __table_args__ = { + 'implicit_returning': False + } + +Without the implicit_returning flag above, the UPDATE statement will +use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as +-1, causing the versioning logic to fail. + +Enabling Snapshot Isolation +--------------------------- + +Not necessarily specific to SQLAlchemy, SQL Server has a default transaction +isolation mode that locks entire tables, and causes even mildly concurrent +applications to have long held locks and frequent deadlocks. +Enabling snapshot isolation for the database as a whole is recommended +for modern levels of concurrency support. This is accomplished via the +following ALTER DATABASE commands executed at the SQL prompt:: + + ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON + + ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON + +Background on SQL Server snapshot isolation is available at +http://msdn.microsoft.com/en-us/library/ms175095.aspx. + +Known Issues +------------ + +* No support for more than one ``IDENTITY`` column per table +* reflection of indexes does not work with versions older than + SQL Server 2005 + +""" +import datetime +import operator +import re + +from ... import sql, schema as sa_schema, exc, util +from ...sql import compiler, expression, util as sql_util +from ... import engine +from ...engine import reflection, default +from ... import types as sqltypes +from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ + FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ + TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR + + +from ...util import update_wrapper +from . import information_schema as ischema + +# http://sqlserverbuilds.blogspot.com/ +MS_2016_VERSION = (13,) +MS_2014_VERSION = (12,) +MS_2012_VERSION = (11,) +MS_2008_VERSION = (10,) +MS_2005_VERSION = (9,) +MS_2000_VERSION = (8,) + +RESERVED_WORDS = set( + ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization', + 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade', + 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce', + 'collate', 'column', 'commit', 'compute', 'constraint', 'contains', + 'containstable', 'continue', 'convert', 'create', 'cross', 'current', + 'current_date', 'current_time', 'current_timestamp', 'current_user', + 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default', + 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double', + 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec', + 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor', + 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full', + 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity', + 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert', + 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like', + 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not', + 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource', + 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer', + 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print', + 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext', + 'reconfigure', 'references', 'replication', 'restore', 'restrict', + 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount', + 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select', + 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics', + 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top', + 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union', + 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values', + 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with', + 'writetext', + ]) + + +class REAL(sqltypes.REAL): + __visit_name__ = 'REAL' + + def __init__(self, **kw): + # REAL is a synonym for FLOAT(24) on SQL server + kw['precision'] = 24 + super(REAL, self).__init__(**kw) + + +class TINYINT(sqltypes.Integer): + __visit_name__ = 'TINYINT' + + +# MSSQL DATE/TIME types have varied behavior, sometimes returning +# strings. MSDate/TIME check for everything, and always +# filter bind parameters into datetime objects (required by pyodbc, +# not sure about other dialects). + +class _MSDate(sqltypes.Date): + + def bind_processor(self, dialect): + def process(value): + if type(value) == datetime.date: + return datetime.datetime(value.year, value.month, value.day) + else: + return value + return process + + _reg = re.compile(r"(\d+)-(\d+)-(\d+)") + + def result_processor(self, dialect, coltype): + def process(value): + if isinstance(value, datetime.datetime): + return value.date() + elif isinstance(value, util.string_types): + m = self._reg.match(value) + if not m: + raise ValueError( + "could not parse %r as a date value" % (value, )) + return datetime.date(*[ + int(x or 0) + for x in m.groups() + ]) + else: + return value + return process + + +class TIME(sqltypes.TIME): + + def __init__(self, precision=None, **kwargs): + self.precision = precision + super(TIME, self).__init__() + + __zero_date = datetime.date(1900, 1, 1) + + def bind_processor(self, dialect): + def process(value): + if isinstance(value, datetime.datetime): + value = datetime.datetime.combine( + self.__zero_date, value.time()) + elif isinstance(value, datetime.time): + value = datetime.datetime.combine(self.__zero_date, value) + return value + return process + + _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?") + + def result_processor(self, dialect, coltype): + def process(value): + if isinstance(value, datetime.datetime): + return value.time() + elif isinstance(value, util.string_types): + m = self._reg.match(value) + if not m: + raise ValueError( + "could not parse %r as a time value" % (value, )) + return datetime.time(*[ + int(x or 0) + for x in m.groups()]) + else: + return value + return process +_MSTime = TIME + + +class _DateTimeBase(object): + + def bind_processor(self, dialect): + def process(value): + if type(value) == datetime.date: + return datetime.datetime(value.year, value.month, value.day) + else: + return value + return process + + +class _MSDateTime(_DateTimeBase, sqltypes.DateTime): + pass + + +class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime): + __visit_name__ = 'SMALLDATETIME' + + +class DATETIME2(_DateTimeBase, sqltypes.DateTime): + __visit_name__ = 'DATETIME2' + + def __init__(self, precision=None, **kw): + super(DATETIME2, self).__init__(**kw) + self.precision = precision + + +# TODO: is this not an Interval ? +class DATETIMEOFFSET(sqltypes.TypeEngine): + __visit_name__ = 'DATETIMEOFFSET' + + def __init__(self, precision=None, **kwargs): + self.precision = precision + + +class _StringType(object): + + """Base for MSSQL string types.""" + + def __init__(self, collation=None): + super(_StringType, self).__init__(collation=collation) + + +class NTEXT(sqltypes.UnicodeText): + + """MSSQL NTEXT type, for variable-length unicode text up to 2^30 + characters.""" + + __visit_name__ = 'NTEXT' + + +class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): + """The MSSQL VARBINARY type. + + This type extends both :class:`.types.VARBINARY` and + :class:`.types.LargeBinary`. In "deprecate_large_types" mode, + the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)`` + on SQL Server. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :ref:`mssql_large_type_deprecation` + + + + """ + __visit_name__ = 'VARBINARY' + + +class IMAGE(sqltypes.LargeBinary): + __visit_name__ = 'IMAGE' + + +class BIT(sqltypes.TypeEngine): + __visit_name__ = 'BIT' + + +class MONEY(sqltypes.TypeEngine): + __visit_name__ = 'MONEY' + + +class SMALLMONEY(sqltypes.TypeEngine): + __visit_name__ = 'SMALLMONEY' + + +class UNIQUEIDENTIFIER(sqltypes.TypeEngine): + __visit_name__ = "UNIQUEIDENTIFIER" + + +class SQL_VARIANT(sqltypes.TypeEngine): + __visit_name__ = 'SQL_VARIANT' + +# old names. +MSDateTime = _MSDateTime +MSDate = _MSDate +MSReal = REAL +MSTinyInteger = TINYINT +MSTime = TIME +MSSmallDateTime = SMALLDATETIME +MSDateTime2 = DATETIME2 +MSDateTimeOffset = DATETIMEOFFSET +MSText = TEXT +MSNText = NTEXT +MSString = VARCHAR +MSNVarchar = NVARCHAR +MSChar = CHAR +MSNChar = NCHAR +MSBinary = BINARY +MSVarBinary = VARBINARY +MSImage = IMAGE +MSBit = BIT +MSMoney = MONEY +MSSmallMoney = SMALLMONEY +MSUniqueIdentifier = UNIQUEIDENTIFIER +MSVariant = SQL_VARIANT + +ischema_names = { + 'int': INTEGER, + 'bigint': BIGINT, + 'smallint': SMALLINT, + 'tinyint': TINYINT, + 'varchar': VARCHAR, + 'nvarchar': NVARCHAR, + 'char': CHAR, + 'nchar': NCHAR, + 'text': TEXT, + 'ntext': NTEXT, + 'decimal': DECIMAL, + 'numeric': NUMERIC, + 'float': FLOAT, + 'datetime': DATETIME, + 'datetime2': DATETIME2, + 'datetimeoffset': DATETIMEOFFSET, + 'date': DATE, + 'time': TIME, + 'smalldatetime': SMALLDATETIME, + 'binary': BINARY, + 'varbinary': VARBINARY, + 'bit': BIT, + 'real': REAL, + 'image': IMAGE, + 'timestamp': TIMESTAMP, + 'money': MONEY, + 'smallmoney': SMALLMONEY, + 'uniqueidentifier': UNIQUEIDENTIFIER, + 'sql_variant': SQL_VARIANT, +} + + +class MSTypeCompiler(compiler.GenericTypeCompiler): + def _extend(self, spec, type_, length=None): + """Extend a string-type declaration with standard SQL + COLLATE annotations. + + """ + + if getattr(type_, 'collation', None): + collation = 'COLLATE %s' % type_.collation + else: + collation = None + + if not length: + length = type_.length + + if length: + spec = spec + "(%s)" % length + + return ' '.join([c for c in (spec, collation) + if c is not None]) + + def visit_FLOAT(self, type_, **kw): + precision = getattr(type_, 'precision', None) + if precision is None: + return "FLOAT" + else: + return "FLOAT(%(precision)s)" % {'precision': precision} + + def visit_TINYINT(self, type_, **kw): + return "TINYINT" + + def visit_DATETIMEOFFSET(self, type_, **kw): + if type_.precision is not None: + return "DATETIMEOFFSET(%s)" % type_.precision + else: + return "DATETIMEOFFSET" + + def visit_TIME(self, type_, **kw): + precision = getattr(type_, 'precision', None) + if precision is not None: + return "TIME(%s)" % precision + else: + return "TIME" + + def visit_DATETIME2(self, type_, **kw): + precision = getattr(type_, 'precision', None) + if precision is not None: + return "DATETIME2(%s)" % precision + else: + return "DATETIME2" + + def visit_SMALLDATETIME(self, type_, **kw): + return "SMALLDATETIME" + + def visit_unicode(self, type_, **kw): + return self.visit_NVARCHAR(type_, **kw) + + def visit_text(self, type_, **kw): + if self.dialect.deprecate_large_types: + return self.visit_VARCHAR(type_, **kw) + else: + return self.visit_TEXT(type_, **kw) + + def visit_unicode_text(self, type_, **kw): + if self.dialect.deprecate_large_types: + return self.visit_NVARCHAR(type_, **kw) + else: + return self.visit_NTEXT(type_, **kw) + + def visit_NTEXT(self, type_, **kw): + return self._extend("NTEXT", type_) + + def visit_TEXT(self, type_, **kw): + return self._extend("TEXT", type_) + + def visit_VARCHAR(self, type_, **kw): + return self._extend("VARCHAR", type_, length=type_.length or 'max') + + def visit_CHAR(self, type_, **kw): + return self._extend("CHAR", type_) + + def visit_NCHAR(self, type_, **kw): + return self._extend("NCHAR", type_) + + def visit_NVARCHAR(self, type_, **kw): + return self._extend("NVARCHAR", type_, length=type_.length or 'max') + + def visit_date(self, type_, **kw): + if self.dialect.server_version_info < MS_2008_VERSION: + return self.visit_DATETIME(type_, **kw) + else: + return self.visit_DATE(type_, **kw) + + def visit_time(self, type_, **kw): + if self.dialect.server_version_info < MS_2008_VERSION: + return self.visit_DATETIME(type_, **kw) + else: + return self.visit_TIME(type_, **kw) + + def visit_large_binary(self, type_, **kw): + if self.dialect.deprecate_large_types: + return self.visit_VARBINARY(type_, **kw) + else: + return self.visit_IMAGE(type_, **kw) + + def visit_IMAGE(self, type_, **kw): + return "IMAGE" + + def visit_VARBINARY(self, type_, **kw): + return self._extend( + "VARBINARY", + type_, + length=type_.length or 'max') + + def visit_boolean(self, type_, **kw): + return self.visit_BIT(type_) + + def visit_BIT(self, type_, **kw): + return "BIT" + + def visit_MONEY(self, type_, **kw): + return "MONEY" + + def visit_SMALLMONEY(self, type_, **kw): + return 'SMALLMONEY' + + def visit_UNIQUEIDENTIFIER(self, type_, **kw): + return "UNIQUEIDENTIFIER" + + def visit_SQL_VARIANT(self, type_, **kw): + return 'SQL_VARIANT' + + +class MSExecutionContext(default.DefaultExecutionContext): + _enable_identity_insert = False + _select_lastrowid = False + _result_proxy = None + _lastrowid = None + + def _opt_encode(self, statement): + if not self.dialect.supports_unicode_statements: + return self.dialect._encoder(statement)[0] + else: + return statement + + def pre_exec(self): + """Activate IDENTITY_INSERT if needed.""" + + if self.isinsert: + tbl = self.compiled.statement.table + seq_column = tbl._autoincrement_column + insert_has_sequence = seq_column is not None + + if insert_has_sequence: + self._enable_identity_insert = \ + seq_column.key in self.compiled_parameters[0] or \ + ( + self.compiled.statement.parameters and ( + ( + self.compiled.statement._has_multi_parameters + and + seq_column.key in + self.compiled.statement.parameters[0] + ) or ( + not + self.compiled.statement._has_multi_parameters + and + seq_column.key in + self.compiled.statement.parameters + ) + ) + ) + else: + self._enable_identity_insert = False + + self._select_lastrowid = not self.compiled.inline and \ + insert_has_sequence and \ + not self.compiled.returning and \ + not self._enable_identity_insert and \ + not self.executemany + + if self._enable_identity_insert: + self.root_connection._cursor_execute( + self.cursor, + self._opt_encode( + "SET IDENTITY_INSERT %s ON" % + self.dialect.identifier_preparer.format_table(tbl)), + (), + self) + + def post_exec(self): + """Disable IDENTITY_INSERT if enabled.""" + + conn = self.root_connection + if self._select_lastrowid: + if self.dialect.use_scope_identity: + conn._cursor_execute( + self.cursor, + "SELECT scope_identity() AS lastrowid", (), self) + else: + conn._cursor_execute(self.cursor, + "SELECT @@identity AS lastrowid", + (), + self) + # fetchall() ensures the cursor is consumed without closing it + row = self.cursor.fetchall()[0] + self._lastrowid = int(row[0]) + + if (self.isinsert or self.isupdate or self.isdelete) and \ + self.compiled.returning: + self._result_proxy = engine.FullyBufferedResultProxy(self) + + if self._enable_identity_insert: + conn._cursor_execute( + self.cursor, + self._opt_encode( + "SET IDENTITY_INSERT %s OFF" % + self.dialect.identifier_preparer. format_table( + self.compiled.statement.table)), + (), + self) + + def get_lastrowid(self): + return self._lastrowid + + def handle_dbapi_exception(self, e): + if self._enable_identity_insert: + try: + self.cursor.execute( + self._opt_encode( + "SET IDENTITY_INSERT %s OFF" % + self.dialect.identifier_preparer. format_table( + self.compiled.statement.table))) + except Exception: + pass + + def get_result_proxy(self): + if self._result_proxy: + return self._result_proxy + else: + return engine.ResultProxy(self) + + +class MSSQLCompiler(compiler.SQLCompiler): + returning_precedes_values = True + + extract_map = util.update_copy( + compiler.SQLCompiler.extract_map, + { + 'doy': 'dayofyear', + 'dow': 'weekday', + 'milliseconds': 'millisecond', + 'microseconds': 'microsecond' + }) + + def __init__(self, *args, **kwargs): + self.tablealiases = {} + super(MSSQLCompiler, self).__init__(*args, **kwargs) + + def _with_legacy_schema_aliasing(fn): + def decorate(self, *arg, **kw): + if self.dialect.legacy_schema_aliasing: + return fn(self, *arg, **kw) + else: + super_ = getattr(super(MSSQLCompiler, self), fn.__name__) + return super_(*arg, **kw) + return decorate + + def visit_now_func(self, fn, **kw): + return "CURRENT_TIMESTAMP" + + def visit_current_date_func(self, fn, **kw): + return "GETDATE()" + + def visit_length_func(self, fn, **kw): + return "LEN%s" % self.function_argspec(fn, **kw) + + def visit_char_length_func(self, fn, **kw): + return "LEN%s" % self.function_argspec(fn, **kw) + + def visit_concat_op_binary(self, binary, operator, **kw): + return "%s + %s" % \ + (self.process(binary.left, **kw), + self.process(binary.right, **kw)) + + def visit_true(self, expr, **kw): + return '1' + + def visit_false(self, expr, **kw): + return '0' + + def visit_match_op_binary(self, binary, operator, **kw): + return "CONTAINS (%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw)) + + def get_select_precolumns(self, select, **kw): + """ MS-SQL puts TOP, it's version of LIMIT here """ + + s = "" + if select._distinct: + s += "DISTINCT " + + if select._simple_int_limit and not select._offset: + # ODBC drivers and possibly others + # don't support bind params in the SELECT clause on SQL Server. + # so have to use literal here. + s += "TOP %d " % select._limit + + if s: + return s + else: + return compiler.SQLCompiler.get_select_precolumns( + self, select, **kw) + + def get_from_hint_text(self, table, text): + return text + + def get_crud_hint_text(self, table, text): + return text + + def limit_clause(self, select, **kw): + # Limit in mssql is after the select keyword + return "" + + def visit_select(self, select, **kwargs): + """Look for ``LIMIT`` and OFFSET in a select statement, and if + so tries to wrap it in a subquery with ``row_number()`` criterion. + + """ + if ( + ( + not select._simple_int_limit and + select._limit_clause is not None + ) or ( + select._offset_clause is not None and + not select._simple_int_offset or select._offset + ) + ) and not getattr(select, '_mssql_visit', None): + + # to use ROW_NUMBER(), an ORDER BY is required. + if not select._order_by_clause.clauses: + raise exc.CompileError('MSSQL requires an order_by when ' + 'using an OFFSET or a non-simple ' + 'LIMIT clause') + + _order_by_clauses = [ + sql_util.unwrap_label_reference(elem) + for elem in select._order_by_clause.clauses + ] + + limit_clause = select._limit_clause + offset_clause = select._offset_clause + kwargs['select_wraps_for'] = select + select = select._generate() + select._mssql_visit = True + select = select.column( + sql.func.ROW_NUMBER().over(order_by=_order_by_clauses) + .label("mssql_rn")).order_by(None).alias() + + mssql_rn = sql.column('mssql_rn') + limitselect = sql.select([c for c in select.c if + c.key != 'mssql_rn']) + if offset_clause is not None: + limitselect.append_whereclause(mssql_rn > offset_clause) + if limit_clause is not None: + limitselect.append_whereclause( + mssql_rn <= (limit_clause + offset_clause)) + else: + limitselect.append_whereclause( + mssql_rn <= (limit_clause)) + return self.process(limitselect, **kwargs) + else: + return compiler.SQLCompiler.visit_select(self, select, **kwargs) + + @_with_legacy_schema_aliasing + def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs): + if mssql_aliased is table or iscrud: + return super(MSSQLCompiler, self).visit_table(table, **kwargs) + + # alias schema-qualified tables + alias = self._schema_aliased_table(table) + if alias is not None: + return self.process(alias, mssql_aliased=table, **kwargs) + else: + return super(MSSQLCompiler, self).visit_table(table, **kwargs) + + @_with_legacy_schema_aliasing + def visit_alias(self, alias, **kw): + # translate for schema-qualified table aliases + kw['mssql_aliased'] = alias.original + return super(MSSQLCompiler, self).visit_alias(alias, **kw) + + @_with_legacy_schema_aliasing + def visit_column(self, column, add_to_result_map=None, **kw): + if column.table is not None and \ + (not self.isupdate and not self.isdelete) or \ + self.is_subquery(): + # translate for schema-qualified table aliases + t = self._schema_aliased_table(column.table) + if t is not None: + converted = expression._corresponding_column_or_error( + t, column) + if add_to_result_map is not None: + add_to_result_map( + column.name, + column.name, + (column, column.name, column.key), + column.type + ) + + return super(MSSQLCompiler, self).\ + visit_column(converted, **kw) + + return super(MSSQLCompiler, self).visit_column( + column, add_to_result_map=add_to_result_map, **kw) + + def _schema_aliased_table(self, table): + if getattr(table, 'schema', None) is not None: + if table not in self.tablealiases: + self.tablealiases[table] = table.alias() + return self.tablealiases[table] + else: + return None + + def visit_extract(self, extract, **kw): + field = self.extract_map.get(extract.field, extract.field) + return 'DATEPART(%s, %s)' % \ + (field, self.process(extract.expr, **kw)) + + def visit_savepoint(self, savepoint_stmt): + return "SAVE TRANSACTION %s" % \ + self.preparer.format_savepoint(savepoint_stmt) + + def visit_rollback_to_savepoint(self, savepoint_stmt): + return ("ROLLBACK TRANSACTION %s" + % self.preparer.format_savepoint(savepoint_stmt)) + + def visit_binary(self, binary, **kwargs): + """Move bind parameters to the right-hand side of an operator, where + possible. + + """ + if ( + isinstance(binary.left, expression.BindParameter) + and binary.operator == operator.eq + and not isinstance(binary.right, expression.BindParameter) + ): + return self.process( + expression.BinaryExpression(binary.right, + binary.left, + binary.operator), + **kwargs) + return super(MSSQLCompiler, self).visit_binary(binary, **kwargs) + + def returning_clause(self, stmt, returning_cols): + + if self.isinsert or self.isupdate: + target = stmt.table.alias("inserted") + else: + target = stmt.table.alias("deleted") + + adapter = sql_util.ClauseAdapter(target) + + columns = [ + self._label_select_column(None, adapter.traverse(c), + True, False, {}) + for c in expression._select_iterables(returning_cols) + ] + + return 'OUTPUT ' + ', '.join(columns) + + def get_cte_preamble(self, recursive): + # SQL Server finds it too inconvenient to accept + # an entirely optional, SQL standard specified, + # "RECURSIVE" word with their "WITH", + # so here we go + return "WITH" + + def label_select_column(self, select, column, asfrom): + if isinstance(column, expression.Function): + return column.label(None) + else: + return super(MSSQLCompiler, self).\ + label_select_column(select, column, asfrom) + + def for_update_clause(self, select): + # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which + # SQLAlchemy doesn't use + return '' + + def order_by_clause(self, select, **kw): + order_by = self.process(select._order_by_clause, **kw) + + # MSSQL only allows ORDER BY in subqueries if there is a LIMIT + if order_by and (not self.is_subquery() or select._limit): + return " ORDER BY " + order_by + else: + return "" + + def update_from_clause(self, update_stmt, + from_table, extra_froms, + from_hints, + **kw): + """Render the UPDATE..FROM clause specific to MSSQL. + + In MSSQL, if the UPDATE statement involves an alias of the table to + be updated, then the table itself must be added to the FROM list as + well. Otherwise, it is optional. Here, we add it regardless. + + """ + return "FROM " + ', '.join( + t._compiler_dispatch(self, asfrom=True, + fromhints=from_hints, **kw) + for t in [from_table] + extra_froms) + + +class MSSQLStrictCompiler(MSSQLCompiler): + + """A subclass of MSSQLCompiler which disables the usage of bind + parameters where not allowed natively by MS-SQL. + + A dialect may use this compiler on a platform where native + binds are used. + + """ + ansi_bind_rules = True + + def visit_in_op_binary(self, binary, operator, **kw): + kw['literal_binds'] = True + return "%s IN %s" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw) + ) + + def visit_notin_op_binary(self, binary, operator, **kw): + kw['literal_binds'] = True + return "%s NOT IN %s" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw) + ) + + def render_literal_value(self, value, type_): + """ + For date and datetime values, convert to a string + format acceptable to MSSQL. That seems to be the + so-called ODBC canonical date format which looks + like this: + + yyyy-mm-dd hh:mi:ss.mmm(24h) + + For other data types, call the base class implementation. + """ + # datetime and date are both subclasses of datetime.date + if issubclass(type(value), datetime.date): + # SQL Server wants single quotes around the date string. + return "'" + str(value) + "'" + else: + return super(MSSQLStrictCompiler, self).\ + render_literal_value(value, type_) + + +class MSDDLCompiler(compiler.DDLCompiler): + + def get_column_specification(self, column, **kwargs): + colspec = ( + self.preparer.format_column(column) + " " + + self.dialect.type_compiler.process( + column.type, type_expression=column) + ) + + if column.nullable is not None: + if not column.nullable or column.primary_key or \ + isinstance(column.default, sa_schema.Sequence): + colspec += " NOT NULL" + else: + colspec += " NULL" + + if column.table is None: + raise exc.CompileError( + "mssql requires Table-bound columns " + "in order to generate DDL") + + # install an IDENTITY Sequence if we either a sequence or an implicit + # IDENTITY column + if isinstance(column.default, sa_schema.Sequence): + if column.default.start == 0: + start = 0 + else: + start = column.default.start or 1 + + colspec += " IDENTITY(%s,%s)" % (start, + column.default.increment or 1) + elif column is column.table._autoincrement_column: + colspec += " IDENTITY(1,1)" + else: + default = self.get_column_default_string(column) + if default is not None: + colspec += " DEFAULT " + default + + return colspec + + def visit_create_index(self, create, include_schema=False): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + + # handle clustering option + clustered = index.dialect_options['mssql']['clustered'] + if clustered is not None: + if clustered: + text += "CLUSTERED " + else: + text += "NONCLUSTERED " + + text += "INDEX %s ON %s (%s)" \ + % ( + self._prepared_index_name(index, + include_schema=include_schema), + preparer.format_table(index.table), + ', '.join( + self.sql_compiler.process(expr, + include_table=False, + literal_binds=True) for + expr in index.expressions) + ) + + # handle other included columns + if index.dialect_options['mssql']['include']: + inclusions = [index.table.c[col] + if isinstance(col, util.string_types) else col + for col in + index.dialect_options['mssql']['include'] + ] + + text += " INCLUDE (%s)" \ + % ', '.join([preparer.quote(c.name) + for c in inclusions]) + + return text + + def visit_drop_index(self, drop): + return "\nDROP INDEX %s ON %s" % ( + self._prepared_index_name(drop.element, include_schema=False), + self.preparer.format_table(drop.element.table) + ) + + def visit_primary_key_constraint(self, constraint): + if len(constraint) == 0: + return '' + text = "" + if constraint.name is not None: + text += "CONSTRAINT %s " % \ + self.preparer.format_constraint(constraint) + text += "PRIMARY KEY " + + clustered = constraint.dialect_options['mssql']['clustered'] + if clustered is not None: + if clustered: + text += "CLUSTERED " + else: + text += "NONCLUSTERED " + + text += "(%s)" % ', '.join(self.preparer.quote(c.name) + for c in constraint) + text += self.define_constraint_deferrability(constraint) + return text + + def visit_unique_constraint(self, constraint): + if len(constraint) == 0: + return '' + text = "" + if constraint.name is not None: + text += "CONSTRAINT %s " % \ + self.preparer.format_constraint(constraint) + text += "UNIQUE " + + clustered = constraint.dialect_options['mssql']['clustered'] + if clustered is not None: + if clustered: + text += "CLUSTERED " + else: + text += "NONCLUSTERED " + + text += "(%s)" % ', '.join(self.preparer.quote(c.name) + for c in constraint) + text += self.define_constraint_deferrability(constraint) + return text + + +class MSIdentifierPreparer(compiler.IdentifierPreparer): + reserved_words = RESERVED_WORDS + + def __init__(self, dialect): + super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[', + final_quote=']') + + def _escape_identifier(self, value): + return value + + def quote_schema(self, schema, force=None): + """Prepare a quoted table and schema name.""" + result = '.'.join([self.quote(x, force) for x in schema.split('.')]) + return result + + +def _db_plus_owner_listing(fn): + def wrap(dialect, connection, schema=None, **kw): + dbname, owner = _owner_plus_db(dialect, schema) + return _switch_db(dbname, connection, fn, dialect, connection, + dbname, owner, schema, **kw) + return update_wrapper(wrap, fn) + + +def _db_plus_owner(fn): + def wrap(dialect, connection, tablename, schema=None, **kw): + dbname, owner = _owner_plus_db(dialect, schema) + return _switch_db(dbname, connection, fn, dialect, connection, + tablename, dbname, owner, schema, **kw) + return update_wrapper(wrap, fn) + + +def _switch_db(dbname, connection, fn, *arg, **kw): + if dbname: + current_db = connection.scalar("select db_name()") + connection.execute("use %s" % dbname) + try: + return fn(*arg, **kw) + finally: + if dbname: + connection.execute("use %s" % current_db) + + +def _owner_plus_db(dialect, schema): + if not schema: + return None, dialect.default_schema_name + elif "." in schema: + return schema.split(".", 1) + else: + return None, schema + + +class MSDialect(default.DefaultDialect): + name = 'mssql' + supports_default_values = True + supports_empty_insert = False + execution_ctx_cls = MSExecutionContext + use_scope_identity = True + max_identifier_length = 128 + schema_name = "dbo" + + colspecs = { + sqltypes.DateTime: _MSDateTime, + sqltypes.Date: _MSDate, + sqltypes.Time: TIME, + } + + engine_config_types = default.DefaultDialect.engine_config_types.union([ + ('legacy_schema_aliasing', util.asbool), + ]) + + ischema_names = ischema_names + + supports_native_boolean = False + supports_unicode_binds = True + postfetch_lastrowid = True + + server_version_info = () + + statement_compiler = MSSQLCompiler + ddl_compiler = MSDDLCompiler + type_compiler = MSTypeCompiler + preparer = MSIdentifierPreparer + + construct_arguments = [ + (sa_schema.PrimaryKeyConstraint, { + "clustered": None + }), + (sa_schema.UniqueConstraint, { + "clustered": None + }), + (sa_schema.Index, { + "clustered": None, + "include": None + }) + ] + + def __init__(self, + query_timeout=None, + use_scope_identity=True, + max_identifier_length=None, + schema_name="dbo", + isolation_level=None, + deprecate_large_types=None, + legacy_schema_aliasing=False, **opts): + self.query_timeout = int(query_timeout or 0) + self.schema_name = schema_name + + self.use_scope_identity = use_scope_identity + self.max_identifier_length = int(max_identifier_length or 0) or \ + self.max_identifier_length + self.deprecate_large_types = deprecate_large_types + self.legacy_schema_aliasing = legacy_schema_aliasing + + super(MSDialect, self).__init__(**opts) + + self.isolation_level = isolation_level + + def do_savepoint(self, connection, name): + # give the DBAPI a push + connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION") + super(MSDialect, self).do_savepoint(connection, name) + + def do_release_savepoint(self, connection, name): + # SQL Server does not support RELEASE SAVEPOINT + pass + + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ', + 'SNAPSHOT']) + + def set_isolation_level(self, connection, level): + level = level.replace('_', ' ') + if level not in self._isolation_lookup: + raise exc.ArgumentError( + "Invalid value '%s' for isolation_level. " + "Valid isolation levels for %s are %s" % + (level, self.name, ", ".join(self._isolation_lookup)) + ) + cursor = connection.cursor() + cursor.execute( + "SET TRANSACTION ISOLATION LEVEL %s" % level) + cursor.close() + + def get_isolation_level(self, connection): + if self.server_version_info < MS_2005_VERSION: + raise NotImplementedError( + "Can't fetch isolation level prior to SQL Server 2005") + + cursor = connection.cursor() + cursor.execute(""" + SELECT CASE transaction_isolation_level + WHEN 0 THEN NULL + WHEN 1 THEN 'READ UNCOMMITTED' + WHEN 2 THEN 'READ COMMITTED' + WHEN 3 THEN 'REPEATABLE READ' + WHEN 4 THEN 'SERIALIZABLE' + WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL + FROM sys.dm_exec_sessions + where session_id = @@SPID + """) + val = cursor.fetchone()[0] + cursor.close() + return val.upper() + + def initialize(self, connection): + super(MSDialect, self).initialize(connection) + self._setup_version_attributes() + + def on_connect(self): + if self.isolation_level is not None: + def connect(conn): + self.set_isolation_level(conn, self.isolation_level) + return connect + else: + return None + + def _setup_version_attributes(self): + if self.server_version_info[0] not in list(range(8, 17)): + util.warn( + "Unrecognized server version info '%s'. Some SQL Server " + "features may not function properly." % + ".".join(str(x) for x in self.server_version_info)) + if self.server_version_info >= MS_2005_VERSION and \ + 'implicit_returning' not in self.__dict__: + self.implicit_returning = True + if self.server_version_info >= MS_2008_VERSION: + self.supports_multivalues_insert = True + if self.deprecate_large_types is None: + self.deprecate_large_types = \ + self.server_version_info >= MS_2012_VERSION + + def _get_default_schema_name(self, connection): + if self.server_version_info < MS_2005_VERSION: + return self.schema_name + else: + query = sql.text("SELECT schema_name()") + default_schema_name = connection.scalar(query) + if default_schema_name is not None: + return util.text_type(default_schema_name) + else: + return self.schema_name + + @_db_plus_owner + def has_table(self, connection, tablename, dbname, owner, schema): + columns = ischema.columns + + whereclause = columns.c.table_name == tablename + + if owner: + whereclause = sql.and_(whereclause, + columns.c.table_schema == owner) + s = sql.select([columns], whereclause) + c = connection.execute(s) + return c.first() is not None + + @reflection.cache + def get_schema_names(self, connection, **kw): + s = sql.select([ischema.schemata.c.schema_name], + order_by=[ischema.schemata.c.schema_name] + ) + schema_names = [r[0] for r in connection.execute(s)] + return schema_names + + @reflection.cache + @_db_plus_owner_listing + def get_table_names(self, connection, dbname, owner, schema, **kw): + tables = ischema.tables + s = sql.select([tables.c.table_name], + sql.and_( + tables.c.table_schema == owner, + tables.c.table_type == 'BASE TABLE' + ), + order_by=[tables.c.table_name] + ) + table_names = [r[0] for r in connection.execute(s)] + return table_names + + @reflection.cache + @_db_plus_owner_listing + def get_view_names(self, connection, dbname, owner, schema, **kw): + tables = ischema.tables + s = sql.select([tables.c.table_name], + sql.and_( + tables.c.table_schema == owner, + tables.c.table_type == 'VIEW' + ), + order_by=[tables.c.table_name] + ) + view_names = [r[0] for r in connection.execute(s)] + return view_names + + @reflection.cache + @_db_plus_owner + def get_indexes(self, connection, tablename, dbname, owner, schema, **kw): + # using system catalogs, don't support index reflection + # below MS 2005 + if self.server_version_info < MS_2005_VERSION: + return [] + + rp = connection.execute( + sql.text("select ind.index_id, ind.is_unique, ind.name " + "from sys.indexes as ind join sys.tables as tab on " + "ind.object_id=tab.object_id " + "join sys.schemas as sch on sch.schema_id=tab.schema_id " + "where tab.name = :tabname " + "and sch.name=:schname " + "and ind.is_primary_key=0", + bindparams=[ + sql.bindparam('tabname', tablename, + sqltypes.String(convert_unicode=True)), + sql.bindparam('schname', owner, + sqltypes.String(convert_unicode=True)) + ], + typemap={ + 'name': sqltypes.Unicode() + } + ) + ) + indexes = {} + for row in rp: + indexes[row['index_id']] = { + 'name': row['name'], + 'unique': row['is_unique'] == 1, + 'column_names': [] + } + rp = connection.execute( + sql.text( + "select ind_col.index_id, ind_col.object_id, col.name " + "from sys.columns as col " + "join sys.tables as tab on tab.object_id=col.object_id " + "join sys.index_columns as ind_col on " + "(ind_col.column_id=col.column_id and " + "ind_col.object_id=tab.object_id) " + "join sys.schemas as sch on sch.schema_id=tab.schema_id " + "where tab.name=:tabname " + "and sch.name=:schname", + bindparams=[ + sql.bindparam('tabname', tablename, + sqltypes.String(convert_unicode=True)), + sql.bindparam('schname', owner, + sqltypes.String(convert_unicode=True)) + ], + typemap={'name': sqltypes.Unicode()} + ), + ) + for row in rp: + if row['index_id'] in indexes: + indexes[row['index_id']]['column_names'].append(row['name']) + + return list(indexes.values()) + + @reflection.cache + @_db_plus_owner + def get_view_definition(self, connection, viewname, + dbname, owner, schema, **kw): + rp = connection.execute( + sql.text( + "select definition from sys.sql_modules as mod, " + "sys.views as views, " + "sys.schemas as sch" + " where " + "mod.object_id=views.object_id and " + "views.schema_id=sch.schema_id and " + "views.name=:viewname and sch.name=:schname", + bindparams=[ + sql.bindparam('viewname', viewname, + sqltypes.String(convert_unicode=True)), + sql.bindparam('schname', owner, + sqltypes.String(convert_unicode=True)) + ] + ) + ) + + if rp: + view_def = rp.scalar() + return view_def + + @reflection.cache + @_db_plus_owner + def get_columns(self, connection, tablename, dbname, owner, schema, **kw): + # Get base columns + columns = ischema.columns + if owner: + whereclause = sql.and_(columns.c.table_name == tablename, + columns.c.table_schema == owner) + else: + whereclause = columns.c.table_name == tablename + s = sql.select([columns], whereclause, + order_by=[columns.c.ordinal_position]) + + c = connection.execute(s) + cols = [] + while True: + row = c.fetchone() + if row is None: + break + (name, type, nullable, charlen, + numericprec, numericscale, default, collation) = ( + row[columns.c.column_name], + row[columns.c.data_type], + row[columns.c.is_nullable] == 'YES', + row[columns.c.character_maximum_length], + row[columns.c.numeric_precision], + row[columns.c.numeric_scale], + row[columns.c.column_default], + row[columns.c.collation_name] + ) + coltype = self.ischema_names.get(type, None) + + kwargs = {} + if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, + MSNText, MSBinary, MSVarBinary, + sqltypes.LargeBinary): + if charlen == -1: + charlen = None + kwargs['length'] = charlen + if collation: + kwargs['collation'] = collation + + if coltype is None: + util.warn( + "Did not recognize type '%s' of column '%s'" % + (type, name)) + coltype = sqltypes.NULLTYPE + else: + if issubclass(coltype, sqltypes.Numeric) and \ + coltype is not MSReal: + kwargs['scale'] = numericscale + kwargs['precision'] = numericprec + + coltype = coltype(**kwargs) + cdict = { + 'name': name, + 'type': coltype, + 'nullable': nullable, + 'default': default, + 'autoincrement': False, + } + cols.append(cdict) + # autoincrement and identity + colmap = {} + for col in cols: + colmap[col['name']] = col + # We also run an sp_columns to check for identity columns: + cursor = connection.execute("sp_columns @table_name = '%s', " + "@table_owner = '%s'" + % (tablename, owner)) + ic = None + while True: + row = cursor.fetchone() + if row is None: + break + (col_name, type_name) = row[3], row[5] + if type_name.endswith("identity") and col_name in colmap: + ic = col_name + colmap[col_name]['autoincrement'] = True + colmap[col_name]['sequence'] = dict( + name='%s_identity' % col_name) + break + cursor.close() + + if ic is not None and self.server_version_info >= MS_2005_VERSION: + table_fullname = "%s.%s" % (owner, tablename) + cursor = connection.execute( + "select ident_seed('%s'), ident_incr('%s')" + % (table_fullname, table_fullname) + ) + + row = cursor.first() + if row is not None and row[0] is not None: + colmap[ic]['sequence'].update({ + 'start': int(row[0]), + 'increment': int(row[1]) + }) + return cols + + @reflection.cache + @_db_plus_owner + def get_pk_constraint(self, connection, tablename, + dbname, owner, schema, **kw): + pkeys = [] + TC = ischema.constraints + C = ischema.key_constraints.alias('C') + + # Primary key constraints + s = sql.select([C.c.column_name, + TC.c.constraint_type, + C.c.constraint_name], + sql.and_(TC.c.constraint_name == C.c.constraint_name, + TC.c.table_schema == C.c.table_schema, + C.c.table_name == tablename, + C.c.table_schema == owner) + ) + c = connection.execute(s) + constraint_name = None + for row in c: + if 'PRIMARY' in row[TC.c.constraint_type.name]: + pkeys.append(row[0]) + if constraint_name is None: + constraint_name = row[C.c.constraint_name.name] + return {'constrained_columns': pkeys, 'name': constraint_name} + + @reflection.cache + @_db_plus_owner + def get_foreign_keys(self, connection, tablename, + dbname, owner, schema, **kw): + RR = ischema.ref_constraints + C = ischema.key_constraints.alias('C') + R = ischema.key_constraints.alias('R') + + # Foreign key constraints + s = sql.select([C.c.column_name, + R.c.table_schema, R.c.table_name, R.c.column_name, + RR.c.constraint_name, RR.c.match_option, + RR.c.update_rule, + RR.c.delete_rule], + sql.and_(C.c.table_name == tablename, + C.c.table_schema == owner, + C.c.constraint_name == RR.c.constraint_name, + R.c.constraint_name == + RR.c.unique_constraint_name, + C.c.ordinal_position == R.c.ordinal_position + ), + order_by=[RR.c.constraint_name, R.c.ordinal_position] + ) + + # group rows by constraint ID, to handle multi-column FKs + fkeys = [] + fknm, scols, rcols = (None, [], []) + + def fkey_rec(): + return { + 'name': None, + 'constrained_columns': [], + 'referred_schema': None, + 'referred_table': None, + 'referred_columns': [] + } + + fkeys = util.defaultdict(fkey_rec) + + for r in connection.execute(s).fetchall(): + scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r + + rec = fkeys[rfknm] + rec['name'] = rfknm + if not rec['referred_table']: + rec['referred_table'] = rtbl + if schema is not None or owner != rschema: + if dbname: + rschema = dbname + "." + rschema + rec['referred_schema'] = rschema + + local_cols, remote_cols = \ + rec['constrained_columns'],\ + rec['referred_columns'] + + local_cols.append(scol) + remote_cols.append(rcol) + + return list(fkeys.values()) diff --git a/app/lib/sqlalchemy/dialects/mssql/information_schema.py b/app/lib/sqlalchemy/dialects/mssql/information_schema.py new file mode 100644 index 0000000..625479b --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -0,0 +1,136 @@ +# mssql/information_schema.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +# TODO: should be using the sys. catalog with SQL Server, not information +# schema + +from ... import Table, MetaData, Column +from ...types import String, Unicode, UnicodeText, Integer, TypeDecorator +from ... import cast +from ... import util +from ...sql import expression +from ...ext.compiler import compiles + +ischema = MetaData() + + +class CoerceUnicode(TypeDecorator): + impl = Unicode + + def process_bind_param(self, value, dialect): + if util.py2k and isinstance(value, util.binary_type): + value = value.decode(dialect.encoding) + return value + + def bind_expression(self, bindvalue): + return _cast_on_2005(bindvalue) + + +class _cast_on_2005(expression.ColumnElement): + def __init__(self, bindvalue): + self.bindvalue = bindvalue + + +@compiles(_cast_on_2005) +def _compile(element, compiler, **kw): + from . import base + if compiler.dialect.server_version_info < base.MS_2005_VERSION: + return compiler.process(element.bindvalue, **kw) + else: + return compiler.process(cast(element.bindvalue, Unicode), **kw) + +schemata = Table("SCHEMATA", ischema, + Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"), + Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"), + Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"), + schema="INFORMATION_SCHEMA") + +tables = Table("TABLES", ischema, + Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), + Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, key="table_name"), + Column( + "TABLE_TYPE", String(convert_unicode=True), + key="table_type"), + schema="INFORMATION_SCHEMA") + +columns = Table("COLUMNS", ischema, + Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, key="table_name"), + Column("COLUMN_NAME", CoerceUnicode, key="column_name"), + Column("IS_NULLABLE", Integer, key="is_nullable"), + Column("DATA_TYPE", String, key="data_type"), + Column("ORDINAL_POSITION", Integer, key="ordinal_position"), + Column("CHARACTER_MAXIMUM_LENGTH", Integer, + key="character_maximum_length"), + Column("NUMERIC_PRECISION", Integer, key="numeric_precision"), + Column("NUMERIC_SCALE", Integer, key="numeric_scale"), + Column("COLUMN_DEFAULT", Integer, key="column_default"), + Column("COLLATION_NAME", String, key="collation_name"), + schema="INFORMATION_SCHEMA") + +constraints = Table("TABLE_CONSTRAINTS", ischema, + Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, key="table_name"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), + Column("CONSTRAINT_TYPE", String( + convert_unicode=True), key="constraint_type"), + schema="INFORMATION_SCHEMA") + +column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema, + Column("TABLE_SCHEMA", CoerceUnicode, + key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, + key="table_name"), + Column("COLUMN_NAME", CoerceUnicode, + key="column_name"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), + schema="INFORMATION_SCHEMA") + +key_constraints = Table("KEY_COLUMN_USAGE", ischema, + Column("TABLE_SCHEMA", CoerceUnicode, + key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, + key="table_name"), + Column("COLUMN_NAME", CoerceUnicode, + key="column_name"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), + Column("ORDINAL_POSITION", Integer, + key="ordinal_position"), + schema="INFORMATION_SCHEMA") + +ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema, + Column("CONSTRAINT_CATALOG", CoerceUnicode, + key="constraint_catalog"), + Column("CONSTRAINT_SCHEMA", CoerceUnicode, + key="constraint_schema"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), + # TODO: is CATLOG misspelled ? + Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, + key="unique_constraint_catalog"), + + Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode, + key="unique_constraint_schema"), + Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode, + key="unique_constraint_name"), + Column("MATCH_OPTION", String, key="match_option"), + Column("UPDATE_RULE", String, key="update_rule"), + Column("DELETE_RULE", String, key="delete_rule"), + schema="INFORMATION_SCHEMA") + +views = Table("VIEWS", ischema, + Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), + Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, key="table_name"), + Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"), + Column("CHECK_OPTION", String, key="check_option"), + Column("IS_UPDATABLE", String, key="is_updatable"), + schema="INFORMATION_SCHEMA") diff --git a/app/lib/sqlalchemy/dialects/mssql/mxodbc.py b/app/lib/sqlalchemy/dialects/mssql/mxodbc.py new file mode 100644 index 0000000..41729b7 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -0,0 +1,139 @@ +# mssql/mxodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: mssql+mxodbc + :name: mxODBC + :dbapi: mxodbc + :connectstring: mssql+mxodbc://:@ + :url: http://www.egenix.com/ + +Execution Modes +--------------- + +mxODBC features two styles of statement execution, using the +``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being +an extension to the DBAPI specification). The former makes use of a particular +API call specific to the SQL Server Native Client ODBC driver known +SQLDescribeParam, while the latter does not. + +mxODBC apparently only makes repeated use of a single prepared statement +when SQLDescribeParam is used. The advantage to prepared statement reuse is +one of performance. The disadvantage is that SQLDescribeParam has a limited +set of scenarios in which bind parameters are understood, including that they +cannot be placed within the argument lists of function calls, anywhere outside +the FROM, or even within subqueries within the FROM clause - making the usage +of bind parameters within SELECT statements impossible for all but the most +simplistic statements. + +For this reason, the mxODBC dialect uses the "native" mode by default only for +INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for +all other statements. + +This behavior can be controlled via +:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the +``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a +value of ``True`` will unconditionally use native bind parameters and a value +of ``False`` will unconditionally use string-escaped parameters. + +""" + + +from ... import types as sqltypes +from ...connectors.mxodbc import MxODBCConnector +from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc +from .base import (MSDialect, + MSSQLStrictCompiler, + VARBINARY, + _MSDateTime, _MSDate, _MSTime) + + +class _MSNumeric_mxodbc(_MSNumeric_pyodbc): + """Include pyodbc's numeric processor. + """ + + +class _MSDate_mxodbc(_MSDate): + def bind_processor(self, dialect): + def process(value): + if value is not None: + return "%s-%s-%s" % (value.year, value.month, value.day) + else: + return None + return process + + +class _MSTime_mxodbc(_MSTime): + def bind_processor(self, dialect): + def process(value): + if value is not None: + return "%s:%s:%s" % (value.hour, value.minute, value.second) + else: + return None + return process + + +class _VARBINARY_mxodbc(VARBINARY): + + """ + mxODBC Support for VARBINARY column types. + + This handles the special case for null VARBINARY values, + which maps None values to the mx.ODBC.Manager.BinaryNull symbol. + """ + + def bind_processor(self, dialect): + if dialect.dbapi is None: + return None + + DBAPIBinary = dialect.dbapi.Binary + + def process(value): + if value is not None: + return DBAPIBinary(value) + else: + # should pull from mx.ODBC.Manager.BinaryNull + return dialect.dbapi.BinaryNull + return process + + +class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc): + """ + The pyodbc execution context is useful for enabling + SELECT SCOPE_IDENTITY in cases where OUTPUT clause + does not work (tables with insert triggers). + """ + # todo - investigate whether the pyodbc execution context + # is really only being used in cases where OUTPUT + # won't work. + + +class MSDialect_mxodbc(MxODBCConnector, MSDialect): + + # this is only needed if "native ODBC" mode is used, + # which is now disabled by default. + # statement_compiler = MSSQLStrictCompiler + + execution_ctx_cls = MSExecutionContext_mxodbc + + # flag used by _MSNumeric_mxodbc + _need_decimal_fix = True + + colspecs = { + sqltypes.Numeric: _MSNumeric_mxodbc, + sqltypes.DateTime: _MSDateTime, + sqltypes.Date: _MSDate_mxodbc, + sqltypes.Time: _MSTime_mxodbc, + VARBINARY: _VARBINARY_mxodbc, + sqltypes.LargeBinary: _VARBINARY_mxodbc, + } + + def __init__(self, description_encoding=None, **params): + super(MSDialect_mxodbc, self).__init__(**params) + self.description_encoding = description_encoding + +dialect = MSDialect_mxodbc diff --git a/app/lib/sqlalchemy/dialects/mssql/pymssql.py b/app/lib/sqlalchemy/dialects/mssql/pymssql.py new file mode 100644 index 0000000..57ca8ab --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -0,0 +1,97 @@ +# mssql/pymssql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: mssql+pymssql + :name: pymssql + :dbapi: pymssql + :connectstring: mssql+pymssql://:@/?\ +charset=utf8 + :url: http://pymssql.org/ + +pymssql is a Python module that provides a Python DBAPI interface around +`FreeTDS `_. Compatible builds are available for +Linux, MacOSX and Windows platforms. + +""" +from .base import MSDialect +from ... import types as sqltypes, util, processors +import re + + +class _MSNumeric_pymssql(sqltypes.Numeric): + def result_processor(self, dialect, type_): + if not self.asdecimal: + return processors.to_float + else: + return sqltypes.Numeric.result_processor(self, dialect, type_) + + +class MSDialect_pymssql(MSDialect): + supports_sane_rowcount = False + driver = 'pymssql' + + colspecs = util.update_copy( + MSDialect.colspecs, + { + sqltypes.Numeric: _MSNumeric_pymssql, + sqltypes.Float: sqltypes.Float, + } + ) + + @classmethod + def dbapi(cls): + module = __import__('pymssql') + # pymmsql < 2.1.1 doesn't have a Binary method. we use string + client_ver = tuple(int(x) for x in module.__version__.split(".")) + if client_ver < (2, 1, 1): + # TODO: monkeypatching here is less than ideal + module.Binary = lambda x: x if hasattr(x, 'decode') else str(x) + + if client_ver < (1, ): + util.warn("The pymssql dialect expects at least " + "the 1.0 series of the pymssql DBAPI.") + return module + + def __init__(self, **params): + super(MSDialect_pymssql, self).__init__(**params) + self.use_scope_identity = True + + def _get_server_version_info(self, connection): + vers = connection.scalar("select @@version") + m = re.match( + r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers) + if m: + return tuple(int(x) for x in m.group(1, 2, 3, 4)) + else: + return None + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + opts.update(url.query) + port = opts.pop('port', None) + if port and 'host' in opts: + opts['host'] = "%s:%s" % (opts['host'], port) + return [[], opts] + + def is_disconnect(self, e, connection, cursor): + for msg in ( + "Adaptive Server connection timed out", + "Net-Lib error during Connection reset by peer", + "message 20003", # connection timeout + "Error 10054", + "Not connected to any MS SQL server", + "Connection is closed", + "message 20006", # Write to the server failed + "message 20017", # Unexpected EOF from the server + ): + if msg in str(e): + return True + else: + return False + +dialect = MSDialect_pymssql diff --git a/app/lib/sqlalchemy/dialects/mssql/pyodbc.py b/app/lib/sqlalchemy/dialects/mssql/pyodbc.py new file mode 100644 index 0000000..c6368f9 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -0,0 +1,292 @@ +# mssql/pyodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r""" +.. dialect:: mssql+pyodbc + :name: PyODBC + :dbapi: pyodbc + :connectstring: mssql+pyodbc://:@ + :url: http://pypi.python.org/pypi/pyodbc/ + +Connecting to PyODBC +-------------------- + +The URL here is to be translated to PyODBC connection strings, as +detailed in `ConnectionStrings `_. + +DSN Connections +^^^^^^^^^^^^^^^ + +A DSN-based connection is **preferred** overall when using ODBC. A +basic DSN-based connection looks like:: + + engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn") + +Which above, will pass the following connection string to PyODBC:: + + dsn=mydsn;UID=user;PWD=pass + +If the username and password are omitted, the DSN form will also add +the ``Trusted_Connection=yes`` directive to the ODBC string. + +Hostname Connections +^^^^^^^^^^^^^^^^^^^^ + +Hostname-based connections are **not preferred**, however are supported. +The ODBC driver name must be explicitly specified:: + + engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") + +.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the + SQL Server driver name specified explicitly. SQLAlchemy cannot + choose an optimal default here as it varies based on platform + and installed drivers. + +Other keywords interpreted by the Pyodbc dialect to be passed to +``pyodbc.connect()`` in both the DSN and hostname cases include: +``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``. + +Pass through exact Pyodbc string +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A PyODBC connection string can also be sent exactly as specified in +`ConnectionStrings `_ +into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however, +as illustrated below using ``urllib.quote_plus``:: + + import urllib + params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password") + + engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params) + + +Unicode Binds +------------- + +The current state of PyODBC on a unix backend with FreeTDS and/or +EasySoft is poor regarding unicode; different OS platforms and versions of +UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself +dramatically alter how strings are received. The PyODBC dialect attempts to +use all the information it knows to determine whether or not a Python unicode +literal can be passed directly to the PyODBC driver or not; while SQLAlchemy +can encode these to bytestrings first, some users have reported that PyODBC +mis-handles bytestrings for certain encodings and requires a Python unicode +object, while the author has observed widespread cases where a Python unicode +is completely misinterpreted by PyODBC, particularly when dealing with +the information schema tables used in table reflection, and the value +must first be encoded to a bytestring. + +It is for this reason that whether or not unicode literals for bound +parameters be sent to PyODBC can be controlled using the +``supports_unicode_binds`` parameter to ``create_engine()``. When +left at its default of ``None``, the PyODBC dialect will use its +best guess as to whether or not the driver deals with unicode literals +well. When ``False``, unicode literals will be encoded first, and when +``True`` unicode literals will be passed straight through. This is an interim +flag that hopefully should not be needed when the unicode situation stabilizes +for unix + PyODBC. + +.. versionadded:: 0.7.7 + ``supports_unicode_binds`` parameter to ``create_engine()``\ . + +Rowcount Support +---------------- + +Pyodbc only has partial support for rowcount. See the notes at +:ref:`mssql_rowcount_versioning` for important notes when using ORM +versioning. + +""" + +from .base import MSExecutionContext, MSDialect, VARBINARY +from ...connectors.pyodbc import PyODBCConnector +from ... import types as sqltypes, util, exc +import decimal +import re + + +class _ms_numeric_pyodbc(object): + + """Turns Decimals with adjusted() < 0 or > 7 into strings. + + The routines here are needed for older pyodbc versions + as well as current mxODBC versions. + + """ + + def bind_processor(self, dialect): + + super_process = super(_ms_numeric_pyodbc, self).\ + bind_processor(dialect) + + if not dialect._need_decimal_fix: + return super_process + + def process(value): + if self.asdecimal and \ + isinstance(value, decimal.Decimal): + + adjusted = value.adjusted() + if adjusted < 0: + return self._small_dec_to_string(value) + elif adjusted > 7: + return self._large_dec_to_string(value) + + if super_process: + return super_process(value) + else: + return value + return process + + # these routines needed for older versions of pyodbc. + # as of 2.1.8 this logic is integrated. + + def _small_dec_to_string(self, value): + return "%s0.%s%s" % ( + (value < 0 and '-' or ''), + '0' * (abs(value.adjusted()) - 1), + "".join([str(nint) for nint in value.as_tuple()[1]])) + + def _large_dec_to_string(self, value): + _int = value.as_tuple()[1] + if 'E' in str(value): + result = "%s%s%s" % ( + (value < 0 and '-' or ''), + "".join([str(s) for s in _int]), + "0" * (value.adjusted() - (len(_int) - 1))) + else: + if (len(_int) - 1) > value.adjusted(): + result = "%s%s.%s" % ( + (value < 0 and '-' or ''), + "".join( + [str(s) for s in _int][0:value.adjusted() + 1]), + "".join( + [str(s) for s in _int][value.adjusted() + 1:])) + else: + result = "%s%s" % ( + (value < 0 and '-' or ''), + "".join( + [str(s) for s in _int][0:value.adjusted() + 1])) + return result + + +class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric): + pass + + +class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float): + pass + + +class _VARBINARY_pyodbc(VARBINARY): + def bind_processor(self, dialect): + if dialect.dbapi is None: + return None + + DBAPIBinary = dialect.dbapi.Binary + + def process(value): + if value is not None: + return DBAPIBinary(value) + else: + # pyodbc-specific + return dialect.dbapi.BinaryNull + return process + + +class MSExecutionContext_pyodbc(MSExecutionContext): + _embedded_scope_identity = False + + def pre_exec(self): + """where appropriate, issue "select scope_identity()" in the same + statement. + + Background on why "scope_identity()" is preferable to "@@identity": + http://msdn.microsoft.com/en-us/library/ms190315.aspx + + Background on why we attempt to embed "scope_identity()" into the same + statement as the INSERT: + http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values? + + """ + + super(MSExecutionContext_pyodbc, self).pre_exec() + + # don't embed the scope_identity select into an + # "INSERT .. DEFAULT VALUES" + if self._select_lastrowid and \ + self.dialect.use_scope_identity and \ + len(self.parameters[0]): + self._embedded_scope_identity = True + + self.statement += "; select scope_identity()" + + def post_exec(self): + if self._embedded_scope_identity: + # Fetch the last inserted id from the manipulated statement + # We may have to skip over a number of result sets with + # no data (due to triggers, etc.) + while True: + try: + # fetchall() ensures the cursor is consumed + # without closing it (FreeTDS particularly) + row = self.cursor.fetchall()[0] + break + except self.dialect.dbapi.Error as e: + # no way around this - nextset() consumes the previous set + # so we need to just keep flipping + self.cursor.nextset() + + self._lastrowid = int(row[0]) + else: + super(MSExecutionContext_pyodbc, self).post_exec() + + +class MSDialect_pyodbc(PyODBCConnector, MSDialect): + + execution_ctx_cls = MSExecutionContext_pyodbc + + colspecs = util.update_copy( + MSDialect.colspecs, + { + sqltypes.Numeric: _MSNumeric_pyodbc, + sqltypes.Float: _MSFloat_pyodbc, + VARBINARY: _VARBINARY_pyodbc, + sqltypes.LargeBinary: _VARBINARY_pyodbc, + } + ) + + def __init__(self, description_encoding=None, **params): + if 'description_encoding' in params: + self.description_encoding = params.pop('description_encoding') + super(MSDialect_pyodbc, self).__init__(**params) + self.use_scope_identity = self.use_scope_identity and \ + self.dbapi and \ + hasattr(self.dbapi.Cursor, 'nextset') + self._need_decimal_fix = self.dbapi and \ + self._dbapi_version() < (2, 1, 8) + + def _get_server_version_info(self, connection): + try: + raw = connection.scalar("SELECT SERVERPROPERTY('ProductVersion')") + except exc.DBAPIError: + # SQL Server docs indicate this function isn't present prior to + # 2008; additionally, unknown combinations of pyodbc aren't + # able to run this query. + return super(MSDialect_pyodbc, self).\ + _get_server_version_info(connection) + else: + version = [] + r = re.compile(r'[.\-]') + for n in r.split(raw): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + +dialect = MSDialect_pyodbc diff --git a/app/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/app/lib/sqlalchemy/dialects/mssql/zxjdbc.py new file mode 100644 index 0000000..eaf5c96 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mssql/zxjdbc.py @@ -0,0 +1,69 @@ +# mssql/zxjdbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: mssql+zxjdbc + :name: zxJDBC for Jython + :dbapi: zxjdbc + :connectstring: mssql+zxjdbc://user:pass@host:port/dbname\ +[?key=value&key=value...] + :driverurl: http://jtds.sourceforge.net/ + + .. note:: Jython is not supported by current versions of SQLAlchemy. The + zxjdbc dialect should be considered as experimental. + +""" +from ...connectors.zxJDBC import ZxJDBCConnector +from .base import MSDialect, MSExecutionContext +from ... import engine + + +class MSExecutionContext_zxjdbc(MSExecutionContext): + + _embedded_scope_identity = False + + def pre_exec(self): + super(MSExecutionContext_zxjdbc, self).pre_exec() + # scope_identity after the fact returns null in jTDS so we must + # embed it + if self._select_lastrowid and self.dialect.use_scope_identity: + self._embedded_scope_identity = True + self.statement += "; SELECT scope_identity()" + + def post_exec(self): + if self._embedded_scope_identity: + while True: + try: + row = self.cursor.fetchall()[0] + break + except self.dialect.dbapi.Error: + self.cursor.nextset() + self._lastrowid = int(row[0]) + + if (self.isinsert or self.isupdate or self.isdelete) and \ + self.compiled.returning: + self._result_proxy = engine.FullyBufferedResultProxy(self) + + if self._enable_identity_insert: + table = self.dialect.identifier_preparer.format_table( + self.compiled.statement.table) + self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table) + + +class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect): + jdbc_db_name = 'jtds:sqlserver' + jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver' + + execution_ctx_cls = MSExecutionContext_zxjdbc + + def _get_server_version_info(self, connection): + return tuple( + int(x) + for x in connection.connection.dbversion.split('.') + ) + +dialect = MSDialect_zxjdbc diff --git a/app/lib/sqlalchemy/dialects/mysql/__init__.py b/app/lib/sqlalchemy/dialects/mysql/__init__.py new file mode 100644 index 0000000..2ff8542 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/__init__.py @@ -0,0 +1,31 @@ +# mysql/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from . import base, mysqldb, oursql, \ + pyodbc, zxjdbc, mysqlconnector, pymysql,\ + gaerdbms, cymysql + +# default dialect +base.dialect = mysqldb.dialect + +from .base import \ + BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \ + DECIMAL, DOUBLE, ENUM, DECIMAL,\ + FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \ + MEDIUMINT, MEDIUMTEXT, NCHAR, \ + NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \ + TINYBLOB, TINYINT, TINYTEXT,\ + VARBINARY, VARCHAR, YEAR, dialect + +__all__ = ( + 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', + 'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', + 'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', + 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', + 'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', + 'YEAR', 'dialect' +) diff --git a/app/lib/sqlalchemy/dialects/mysql/base.py b/app/lib/sqlalchemy/dialects/mysql/base.py new file mode 100644 index 0000000..822e932 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/base.py @@ -0,0 +1,2056 @@ +# mysql/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r""" + +.. dialect:: mysql + :name: MySQL + +Supported Versions and Features +------------------------------- + +SQLAlchemy supports MySQL starting with version 4.1 through modern releases. +However, no heroic measures are taken to work around major missing +SQL features - if your server version does not support sub-selects, for +example, they won't work in SQLAlchemy either. + +See the official MySQL documentation for detailed information about features +supported in any given server release. + +.. _mysql_connection_timeouts: + +Connection Timeouts +------------------- + +MySQL features an automatic connection close behavior, for connections that +have been idle for eight hours or more. To circumvent having this issue, use +the ``pool_recycle`` option which controls the maximum age of any connection:: + + engine = create_engine('mysql+mysqldb://...', pool_recycle=3600) + +.. seealso:: + + :ref:`pool_setting_recycle` - full description of the pool recycle feature. + + +.. _mysql_storage_engines: + +CREATE TABLE arguments including Storage Engines +------------------------------------------------ + +MySQL's CREATE TABLE syntax includes a wide array of special options, +including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``, +``INSERT_METHOD``, and many more. +To accommodate the rendering of these arguments, specify the form +``mysql_argument_name="value"``. For example, to specify a table with +``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE`` +of ``1024``:: + + Table('mytable', metadata, + Column('data', String(32)), + mysql_engine='InnoDB', + mysql_charset='utf8', + mysql_key_block_size="1024" + ) + +The MySQL dialect will normally transfer any keyword specified as +``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the +``CREATE TABLE`` statement. A handful of these names will render with a space +instead of an underscore; to support this, the MySQL dialect has awareness of +these particular names, which include ``DATA DIRECTORY`` +(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g. +``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g. +``mysql_index_directory``). + +The most common argument is ``mysql_engine``, which refers to the storage +engine for the table. Historically, MySQL server installations would default +to ``MyISAM`` for this value, although newer versions may be defaulting +to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support +of transactions and foreign keys. + +A :class:`.Table` that is created in a MySQL database with a storage engine +of ``MyISAM`` will be essentially non-transactional, meaning any +INSERT/UPDATE/DELETE statement referring to this table will be invoked as +autocommit. It also will have no support for foreign key constraints; while +the ``CREATE TABLE`` statement accepts foreign key options, when using the +``MyISAM`` storage engine these arguments are discarded. Reflecting such a +table will also produce no foreign key constraint information. + +For fully atomic transactions as well as support for foreign key +constraints, all participating ``CREATE TABLE`` statements must specify a +transactional engine, which in the vast majority of cases is ``InnoDB``. + +.. seealso:: + + `The InnoDB Storage Engine + `_ - + on the MySQL website. + +Case Sensitivity and Table Reflection +------------------------------------- + +MySQL has inconsistent support for case-sensitive identifier +names, basing support on specific details of the underlying +operating system. However, it has been observed that no matter +what case sensitivity behavior is present, the names of tables in +foreign key declarations are *always* received from the database +as all-lower case, making it impossible to accurately reflect a +schema where inter-related tables use mixed-case identifier names. + +Therefore it is strongly advised that table names be declared as +all lower case both within SQLAlchemy as well as on the MySQL +database itself, especially if database reflection features are +to be used. + +.. _mysql_isolation_level: + +Transaction Isolation Level +--------------------------- + +All MySQL dialects support setting of transaction isolation level +both via a dialect-specific parameter :paramref:`.create_engine.isolation_level` +accepted by :func:`.create_engine`, +as well as the :paramref:`.Connection.execution_options.isolation_level` +argument as passed to :meth:`.Connection.execution_options`. +This feature works by issuing the command +``SET SESSION TRANSACTION ISOLATION LEVEL `` for +each new connection. For the special AUTOCOMMIT isolation level, DBAPI-specific +techniques are used. + +To set isolation level using :func:`.create_engine`:: + + engine = create_engine( + "mysql://scott:tiger@localhost/test", + isolation_level="READ UNCOMMITTED" + ) + +To set using per-connection execution options:: + + connection = engine.connect() + connection = connection.execution_options( + isolation_level="READ COMMITTED" + ) + +Valid values for ``isolation_level`` include: + +* ``READ COMMITTED`` +* ``READ UNCOMMITTED`` +* ``REPEATABLE READ`` +* ``SERIALIZABLE`` +* ``AUTOCOMMIT`` + +The special ``AUTOCOMMIT`` value makes use of the various "autocommit" +attributes provided by specific DBAPIs, and is currently supported by +MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it, +the MySQL connection will return true for the value of +``SELECT @@autocommit;``. + +.. versionadded:: 1.1 - added support for the AUTOCOMMIT isolation level. + +AUTO_INCREMENT Behavior +----------------------- + +When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on +the first :class:`.Integer` primary key column which is not marked as a +foreign key:: + + >>> t = Table('mytable', metadata, + ... Column('mytable_id', Integer, primary_key=True) + ... ) + >>> t.create() + CREATE TABLE mytable ( + id INTEGER NOT NULL AUTO_INCREMENT, + PRIMARY KEY (id) + ) + +You can disable this behavior by passing ``False`` to the +:paramref:`~.Column.autoincrement` argument of :class:`.Column`. This flag +can also be used to enable auto-increment on a secondary column in a +multi-column key for some storage engines:: + + Table('mytable', metadata, + Column('gid', Integer, primary_key=True, autoincrement=False), + Column('id', Integer, primary_key=True) + ) + +.. _mysql_ss_cursors: + +Server Side Cursors +------------------- + +Server-side cursor support is available for the MySQLdb and PyMySQL dialects. +From a MySQL point of view this means that the ``MySQLdb.cursors.SSCursor`` or +``pymysql.cursors.SSCursor`` class is used when building up the cursor which +will receive results. The most typical way of invoking this feature is via the +:paramref:`.Connection.execution_options.stream_results` connection execution +option. Server side cursors can also be enabled for all SELECT statements +unconditionally by passing ``server_side_cursors=True`` to +:func:`.create_engine`. + +.. versionadded:: 1.1.4 - added server-side cursor support. + +.. _mysql_unicode: + +Unicode +------- + +Charset Selection +~~~~~~~~~~~~~~~~~ + +Most MySQL DBAPIs offer the option to set the client character set for +a connection. This is typically delivered using the ``charset`` parameter +in the URL, such as:: + + e = create_engine("mysql+pymysql://scott:tiger@localhost/\ +test?charset=utf8") + +This charset is the **client character set** for the connection. Some +MySQL DBAPIs will default this to a value such as ``latin1``, and some +will make use of the ``default-character-set`` setting in the ``my.cnf`` +file as well. Documentation for the DBAPI in use should be consulted +for specific behavior. + +The encoding used for Unicode has traditionally been ``'utf8'``. However, +for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding +``'utf8mb4'`` has been introduced. The rationale for this new encoding +is due to the fact that MySQL's utf-8 encoding only supports +codepoints up to three bytes instead of four. Therefore, +when communicating with a MySQL database +that includes codepoints more than three bytes in size, +this new charset is preferred, if supported by both the database as well +as the client DBAPI, as in:: + + e = create_engine("mysql+pymysql://scott:tiger@localhost/\ +test?charset=utf8mb4") + +At the moment, up-to-date versions of MySQLdb and PyMySQL support the +``utf8mb4`` charset. Other DBAPIs such as MySQL-Connector and OurSQL +may **not** support it as of yet. + +In order to use ``utf8mb4`` encoding, changes to +the MySQL schema and/or server configuration may be required. + +.. seealso:: + + `The utf8mb4 Character Set \ +`_ - \ +in the MySQL documentation + +Unicode Encoding / Decoding +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All modern MySQL DBAPIs all offer the service of handling the encoding and +decoding of unicode data between the Python application space and the database. +As this was not always the case, SQLAlchemy also includes a comprehensive system +of performing the encode/decode task as well. As only one of these systems +should be in use at at time, SQLAlchemy has long included functionality +to automatically detect upon first connection whether or not the DBAPI is +automatically handling unicode. + +Whether or not the MySQL DBAPI will handle encoding can usually be configured +using a DBAPI flag ``use_unicode``, which is known to be supported at least +by MySQLdb, PyMySQL, and MySQL-Connector. Setting this value to ``0`` +in the "connect args" or query string will have the effect of disabling the +DBAPI's handling of unicode, such that it instead will return data of the +``str`` type or ``bytes`` type, with data in the configured charset:: + + # connect while disabling the DBAPI's unicode encoding/decoding + e = create_engine("mysql+mysqldb://scott:tiger@localhost/test?charset=utf8&use_unicode=0") + +Current recommendations for modern DBAPIs are as follows: + +* It is generally always safe to leave the ``use_unicode`` flag set at + its default; that is, don't use it at all. +* Under Python 3, the ``use_unicode=0`` flag should **never be used**. + SQLAlchemy under Python 3 generally assumes the DBAPI receives and returns + string values as Python 3 strings, which are inherently unicode objects. +* Under Python 2 with MySQLdb, the ``use_unicode=0`` flag will **offer + superior performance**, as MySQLdb's unicode converters under Python 2 only + have been observed to have unusually slow performance compared to SQLAlchemy's + fast C-based encoders/decoders. + +In short: don't specify ``use_unicode`` *at all*, with the possible +exception of ``use_unicode=0`` on MySQLdb with Python 2 **only** for a +potential performance gain. + +Ansi Quoting Style +------------------ + +MySQL features two varieties of identifier "quoting style", one using +backticks and the other using quotes, e.g. ```some_identifier``` vs. +``"some_identifier"``. All MySQL dialects detect which version +is in use by checking the value of ``sql_mode`` when a connection is first +established with a particular :class:`.Engine`. This quoting style comes +into play when rendering table and column names as well as when reflecting +existing database structures. The detection is entirely automatic and +no special configuration is needed to use either quoting style. + +.. versionchanged:: 0.6 detection of ANSI quoting style is entirely automatic, + there's no longer any end-user ``create_engine()`` options in this regard. + +MySQL SQL Extensions +-------------------- + +Many of the MySQL SQL extensions are handled through SQLAlchemy's generic +function and operator support:: + + table.select(table.c.password==func.md5('plaintext')) + table.select(table.c.username.op('regexp')('^[a-d]')) + +And of course any valid MySQL statement can be executed as a string as well. + +Some limited direct support for MySQL extensions to SQL is currently +available. + +* SELECT pragma:: + + select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT']) + +* UPDATE with LIMIT:: + + update(..., mysql_limit=10) + +rowcount Support +---------------- + +SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the +usual definition of "number of rows matched by an UPDATE or DELETE" statement. +This is in contradiction to the default setting on most MySQL DBAPI drivers, +which is "number of rows actually modified/deleted". For this reason, the +SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS`` +flag, or whatever is equivalent for the target dialect, upon connection. +This setting is currently hardcoded. + +.. seealso:: + + :attr:`.ResultProxy.rowcount` + + +CAST Support +------------ + +MySQL documents the CAST operator as available in version 4.0.2. When using +the SQLAlchemy :func:`.cast` function, SQLAlchemy +will not render the CAST token on MySQL before this version, based on server +version detection, instead rendering the internal expression directly. + +CAST may still not be desirable on an early MySQL version post-4.0.2, as it +didn't add all datatype support until 4.1.1. If your application falls into +this narrow area, the behavior of CAST can be controlled using the +:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below:: + + from sqlalchemy.sql.expression import Cast + from sqlalchemy.ext.compiler import compiles + + @compiles(Cast, 'mysql') + def _check_mysql_version(element, compiler, **kw): + if compiler.dialect.server_version_info < (4, 1, 0): + return compiler.process(element.clause, **kw) + else: + return compiler.visit_cast(element, **kw) + +The above function, which only needs to be declared once +within an application, overrides the compilation of the +:func:`.cast` construct to check for version 4.1.0 before +fully rendering CAST; else the internal element of the +construct is rendered directly. + + +.. _mysql_indexes: + +MySQL Specific Index Options +---------------------------- + +MySQL-specific extensions to the :class:`.Index` construct are available. + +Index Length +~~~~~~~~~~~~~ + +MySQL provides an option to create index entries with a certain length, where +"length" refers to the number of characters or bytes in each value which will +become part of the index. SQLAlchemy provides this feature via the +``mysql_length`` parameter:: + + Index('my_index', my_table.c.data, mysql_length=10) + + Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, + 'b': 9}) + +Prefix lengths are given in characters for nonbinary string types and in bytes +for binary string types. The value passed to the keyword argument *must* be +either an integer (and, thus, specify the same prefix length value for all +columns of the index) or a dict in which keys are column names and values are +prefix length values for corresponding columns. MySQL only allows a length for +a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and +BLOB. + +.. versionadded:: 0.8.2 ``mysql_length`` may now be specified as a dictionary + for use with composite indexes. + +Index Prefixes +~~~~~~~~~~~~~~ + +MySQL storage engines permit you to specify an index prefix when creating +an index. SQLAlchemy provides this feature via the +``mysql_prefix`` parameter on :class:`.Index`:: + + Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT') + +The value passed to the keyword argument will be simply passed through to the +underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL +storage engine. + +.. versionadded:: 1.1.5 + +.. seealso:: + + `CREATE INDEX `_ - MySQL documentation + +Index Types +~~~~~~~~~~~~~ + +Some MySQL storage engines permit you to specify an index type when creating +an index or primary key constraint. SQLAlchemy provides this feature via the +``mysql_using`` parameter on :class:`.Index`:: + + Index('my_index', my_table.c.data, mysql_using='hash') + +As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`:: + + PrimaryKeyConstraint("data", mysql_using='hash') + +The value passed to the keyword argument will be simply passed through to the +underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index +type for your MySQL storage engine. + +More information can be found at: + +http://dev.mysql.com/doc/refman/5.0/en/create-index.html + +http://dev.mysql.com/doc/refman/5.0/en/create-table.html + +.. _mysql_foreign_keys: + +MySQL Foreign Keys +------------------ + +MySQL's behavior regarding foreign keys has some important caveats. + +Foreign Key Arguments to Avoid +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY", +or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with +:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of +these keywords being rendered in a DDL expression, which will then raise an +error on MySQL. In order to use these keywords on a foreign key while having +them ignored on a MySQL backend, use a custom compile rule:: + + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.schema import ForeignKeyConstraint + + @compiles(ForeignKeyConstraint, "mysql") + def process(element, compiler, **kw): + element.deferrable = element.initially = None + return compiler.visit_foreign_key_constraint(element, **kw) + +.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores + the ``deferrable`` or ``initially`` keyword arguments of + :class:`.ForeignKeyConstraint` and :class:`.ForeignKey`. + +The "MATCH" keyword is in fact more insidious, and is explicitly disallowed +by SQLAlchemy in conjunction with the MySQL backend. This argument is +silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON +DELETE options also being ignored by the backend. Therefore MATCH should +never be used with the MySQL backend; as is the case with DEFERRABLE and +INITIALLY, custom compilation rules can be used to correct a MySQL +ForeignKeyConstraint at DDL definition time. + +.. versionadded:: 0.9.0 - the MySQL backend will raise a + :class:`.CompileError` when the ``match`` keyword is used with + :class:`.ForeignKeyConstraint` or :class:`.ForeignKey`. + +Reflection of Foreign Key Constraints +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Not all MySQL storage engines support foreign keys. When using the +very common ``MyISAM`` MySQL storage engine, the information loaded by table +reflection will not include foreign keys. For these tables, you may supply a +:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time:: + + Table('mytable', metadata, + ForeignKeyConstraint(['other_id'], ['othertable.other_id']), + autoload=True + ) + +.. seealso:: + + :ref:`mysql_storage_engines` + +.. _mysql_unique_constraints: + +MySQL Unique Constraints and Reflection +--------------------------------------- + +SQLAlchemy supports both the :class:`.Index` construct with the +flag ``unique=True``, indicating a UNIQUE index, as well as the +:class:`.UniqueConstraint` construct, representing a UNIQUE constraint. +Both objects/syntaxes are supported by MySQL when emitting DDL to create +these constraints. However, MySQL does not have a unique constraint +construct that is separate from a unique index; that is, the "UNIQUE" +constraint on MySQL is equivalent to creating a "UNIQUE INDEX". + +When reflecting these constructs, the :meth:`.Inspector.get_indexes` +and the :meth:`.Inspector.get_unique_constraints` methods will **both** +return an entry for a UNIQUE index in MySQL. However, when performing +full table reflection using ``Table(..., autoload=True)``, +the :class:`.UniqueConstraint` construct is +**not** part of the fully reflected :class:`.Table` construct under any +circumstances; this construct is always represented by a :class:`.Index` +with the ``unique=True`` setting present in the :attr:`.Table.indexes` +collection. + + +.. _mysql_timestamp_null: + +TIMESTAMP Columns and NULL +-------------------------- + +MySQL historically enforces that a column which specifies the +TIMESTAMP datatype implicitly includes a default value of +CURRENT_TIMESTAMP, even though this is not stated, and additionally +sets the column as NOT NULL, the opposite behavior vs. that of all +other datatypes:: + + mysql> CREATE TABLE ts_test ( + -> a INTEGER, + -> b INTEGER NOT NULL, + -> c TIMESTAMP, + -> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + -> e TIMESTAMP NULL); + Query OK, 0 rows affected (0.03 sec) + + mysql> SHOW CREATE TABLE ts_test; + +---------+----------------------------------------------------- + | Table | Create Table + +---------+----------------------------------------------------- + | ts_test | CREATE TABLE `ts_test` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL, + `c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `e` timestamp NULL DEFAULT NULL + ) ENGINE=MyISAM DEFAULT CHARSET=latin1 + +Above, we see that an INTEGER column defaults to NULL, unless it is specified +with NOT NULL. But when the column is of type TIMESTAMP, an implicit +default of CURRENT_TIMESTAMP is generated which also coerces the column +to be a NOT NULL, even though we did not specify it as such. + +This behavior of MySQL can be changed on the MySQL side using the +`explicit_defaults_for_timestamp +`_ configuration flag introduced in +MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like +any other datatype on the MySQL side with regards to defaults and nullability. + +However, to accommodate the vast majority of MySQL databases that do not +specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with +any TIMESTAMP column that does not specify ``nullable=False``. In order +to accommodate newer databases that specify ``explicit_defaults_for_timestamp``, +SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify +``nullable=False``. The following example illustrates:: + + from sqlalchemy import MetaData, Integer, Table, Column, text + from sqlalchemy.dialects.mysql import TIMESTAMP + + m = MetaData() + t = Table('ts_test', m, + Column('a', Integer), + Column('b', Integer, nullable=False), + Column('c', TIMESTAMP), + Column('d', TIMESTAMP, nullable=False) + ) + + + from sqlalchemy import create_engine + e = create_engine("mysql://scott:tiger@localhost/test", echo=True) + m.create_all(e) + +output:: + + CREATE TABLE ts_test ( + a INTEGER, + b INTEGER NOT NULL, + c TIMESTAMP NULL, + d TIMESTAMP NOT NULL + ) + +.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all + cases for TIMESTAMP columns, to accommodate + ``explicit_defaults_for_timestamp``. Prior to this version, it will + not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``. + +""" + +import re +import sys +import json + +from ... import schema as sa_schema +from ... import exc, log, sql, util +from ...sql import compiler, elements +from array import array as _array + +from ...engine import reflection +from ...engine import default +from ... import types as sqltypes +from ...util import topological +from ...types import DATE, BOOLEAN, \ + BLOB, BINARY, VARBINARY + +from . import reflection as _reflection +from .types import BIGINT, BIT, CHAR, DECIMAL, DATETIME, \ + DOUBLE, FLOAT, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, \ + MEDIUMTEXT, NCHAR, NUMERIC, NVARCHAR, REAL, SMALLINT, TEXT, TIME, \ + TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT, VARCHAR, YEAR +from .types import _StringType, _IntegerType, _NumericType, \ + _FloatType, _MatchType +from .enumerated import ENUM, SET +from .json import JSON, JSONIndexType, JSONPathType + + +RESERVED_WORDS = set( + ['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc', + 'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both', + 'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check', + 'collate', 'column', 'condition', 'constraint', 'continue', 'convert', + 'create', 'cross', 'current_date', 'current_time', 'current_timestamp', + 'current_user', 'cursor', 'database', 'databases', 'day_hour', + 'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal', + 'declare', 'default', 'delayed', 'delete', 'desc', 'describe', + 'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop', + 'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists', + 'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8', + 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', + 'having', 'high_priority', 'hour_microsecond', 'hour_minute', + 'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout', + 'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', + 'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', + 'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', + 'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', + 'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert', + 'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint', + 'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural', + 'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize', + 'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile', + 'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads', + 'read_only', 'read_write', 'real', 'references', 'regexp', 'release', + 'rename', 'repeat', 'replace', 'require', 'restrict', 'return', + 'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond', + 'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial', + 'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning', + 'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl', + 'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob', + 'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo', + 'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use', + 'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary', + 'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with', + + 'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0 + + 'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1 + + 'accessible', 'linear', 'master_ssl_verify_server_cert', 'range', + 'read_only', 'read_write', # 5.1 + + 'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue', + 'resignal', 'signal', 'slow', # 5.5 + + 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot', + 'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6 + + 'generated', 'optimizer_costs', 'stored', 'virtual', # 5.7 + + 'admin', 'except', 'grouping', 'of', 'persist', 'recursive', + 'role', # 8.0 + + ]) + +AUTOCOMMIT_RE = re.compile( + r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)', + re.I | re.UNICODE) +SET_RE = re.compile( + r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w', + re.I | re.UNICODE) + + +# old names +MSTime = TIME +MSSet = SET +MSEnum = ENUM +MSLongBlob = LONGBLOB +MSMediumBlob = MEDIUMBLOB +MSTinyBlob = TINYBLOB +MSBlob = BLOB +MSBinary = BINARY +MSVarBinary = VARBINARY +MSNChar = NCHAR +MSNVarChar = NVARCHAR +MSChar = CHAR +MSString = VARCHAR +MSLongText = LONGTEXT +MSMediumText = MEDIUMTEXT +MSTinyText = TINYTEXT +MSText = TEXT +MSYear = YEAR +MSTimeStamp = TIMESTAMP +MSBit = BIT +MSSmallInteger = SMALLINT +MSTinyInteger = TINYINT +MSMediumInteger = MEDIUMINT +MSBigInteger = BIGINT +MSNumeric = NUMERIC +MSDecimal = DECIMAL +MSDouble = DOUBLE +MSReal = REAL +MSFloat = FLOAT +MSInteger = INTEGER + +colspecs = { + _IntegerType: _IntegerType, + _NumericType: _NumericType, + _FloatType: _FloatType, + sqltypes.Numeric: NUMERIC, + sqltypes.Float: FLOAT, + sqltypes.Time: TIME, + sqltypes.Enum: ENUM, + sqltypes.MatchType: _MatchType, + sqltypes.JSON: JSON, + sqltypes.JSON.JSONIndexType: JSONIndexType, + sqltypes.JSON.JSONPathType: JSONPathType + +} + +# Everything 3.23 through 5.1 excepting OpenGIS types. +ischema_names = { + 'bigint': BIGINT, + 'binary': BINARY, + 'bit': BIT, + 'blob': BLOB, + 'boolean': BOOLEAN, + 'char': CHAR, + 'date': DATE, + 'datetime': DATETIME, + 'decimal': DECIMAL, + 'double': DOUBLE, + 'enum': ENUM, + 'fixed': DECIMAL, + 'float': FLOAT, + 'int': INTEGER, + 'integer': INTEGER, + 'json': JSON, + 'longblob': LONGBLOB, + 'longtext': LONGTEXT, + 'mediumblob': MEDIUMBLOB, + 'mediumint': MEDIUMINT, + 'mediumtext': MEDIUMTEXT, + 'nchar': NCHAR, + 'nvarchar': NVARCHAR, + 'numeric': NUMERIC, + 'set': SET, + 'smallint': SMALLINT, + 'text': TEXT, + 'time': TIME, + 'timestamp': TIMESTAMP, + 'tinyblob': TINYBLOB, + 'tinyint': TINYINT, + 'tinytext': TINYTEXT, + 'varbinary': VARBINARY, + 'varchar': VARCHAR, + 'year': YEAR, +} + + +class MySQLExecutionContext(default.DefaultExecutionContext): + + def should_autocommit_text(self, statement): + return AUTOCOMMIT_RE.match(statement) + + def create_server_side_cursor(self): + if self.dialect.supports_server_side_cursors: + return self._dbapi_connection.cursor(self.dialect._sscursor) + else: + raise NotImplementedError() + + +class MySQLCompiler(compiler.SQLCompiler): + + render_table_with_column_in_update_from = True + """Overridden from base SQLCompiler value""" + + extract_map = compiler.SQLCompiler.extract_map.copy() + extract_map.update({'milliseconds': 'millisecond'}) + + def visit_random_func(self, fn, **kw): + return "rand%s" % self.function_argspec(fn) + + def visit_utc_timestamp_func(self, fn, **kw): + return "UTC_TIMESTAMP" + + def visit_sysdate_func(self, fn, **kw): + return "SYSDATE()" + + def visit_json_getitem_op_binary(self, binary, operator, **kw): + return "JSON_EXTRACT(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw)) + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + return "JSON_EXTRACT(%s, %s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw)) + + def visit_concat_op_binary(self, binary, operator, **kw): + return "concat(%s, %s)" % (self.process(binary.left), + self.process(binary.right)) + + def visit_match_op_binary(self, binary, operator, **kw): + return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \ + (self.process(binary.left), self.process(binary.right)) + + def get_from_hint_text(self, table, text): + return text + + def visit_typeclause(self, typeclause, type_=None): + if type_ is None: + type_ = typeclause.type.dialect_impl(self.dialect) + if isinstance(type_, sqltypes.TypeDecorator): + return self.visit_typeclause(typeclause, type_.impl) + elif isinstance(type_, sqltypes.Integer): + if getattr(type_, 'unsigned', False): + return 'UNSIGNED INTEGER' + else: + return 'SIGNED INTEGER' + elif isinstance(type_, sqltypes.TIMESTAMP): + return 'DATETIME' + elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime, + sqltypes.Date, sqltypes.Time)): + return self.dialect.type_compiler.process(type_) + elif isinstance(type_, sqltypes.String) \ + and not isinstance(type_, (ENUM, SET)): + adapted = CHAR._adapt_string_for_cast(type_) + return self.dialect.type_compiler.process(adapted) + elif isinstance(type_, sqltypes._Binary): + return 'BINARY' + elif isinstance(type_, sqltypes.JSON): + return "JSON" + elif isinstance(type_, sqltypes.NUMERIC): + return self.dialect.type_compiler.process( + type_).replace('NUMERIC', 'DECIMAL') + else: + return None + + def visit_cast(self, cast, **kw): + # No cast until 4, no decimals until 5. + if not self.dialect._supports_cast: + util.warn( + "Current MySQL version does not support " + "CAST; the CAST will be skipped.") + return self.process(cast.clause.self_group(), **kw) + + type_ = self.process(cast.typeclause) + if type_ is None: + util.warn( + "Datatype %s does not support CAST on MySQL; " + "the CAST will be skipped." % + self.dialect.type_compiler.process(cast.typeclause.type)) + return self.process(cast.clause.self_group(), **kw) + + return 'CAST(%s AS %s)' % (self.process(cast.clause, **kw), type_) + + def render_literal_value(self, value, type_): + value = super(MySQLCompiler, self).render_literal_value(value, type_) + if self.dialect._backslash_escapes: + value = value.replace('\\', '\\\\') + return value + + # override native_boolean=False behavior here, as + # MySQL still supports native boolean + def visit_true(self, element, **kw): + return "true" + + def visit_false(self, element, **kw): + return "false" + + def get_select_precolumns(self, select, **kw): + """Add special MySQL keywords in place of DISTINCT. + + .. note:: + + this usage is deprecated. :meth:`.Select.prefix_with` + should be used for special keywords at the start + of a SELECT. + + """ + if isinstance(select._distinct, util.string_types): + return select._distinct.upper() + " " + elif select._distinct: + return "DISTINCT " + else: + return "" + + def visit_join(self, join, asfrom=False, **kwargs): + if join.full: + join_type = " FULL OUTER JOIN " + elif join.isouter: + join_type = " LEFT OUTER JOIN " + else: + join_type = " INNER JOIN " + + return ''.join( + (self.process(join.left, asfrom=True, **kwargs), + join_type, + self.process(join.right, asfrom=True, **kwargs), + " ON ", + self.process(join.onclause, **kwargs))) + + def for_update_clause(self, select, **kw): + if select._for_update_arg.read: + return " LOCK IN SHARE MODE" + else: + return " FOR UPDATE" + + def limit_clause(self, select, **kw): + # MySQL supports: + # LIMIT + # LIMIT , + # and in server versions > 3.3: + # LIMIT OFFSET + # The latter is more readable for offsets but we're stuck with the + # former until we can refine dialects by server revision. + + limit_clause, offset_clause = select._limit_clause, \ + select._offset_clause + + if limit_clause is None and offset_clause is None: + return '' + elif offset_clause is not None: + # As suggested by the MySQL docs, need to apply an + # artificial limit if one wasn't provided + # http://dev.mysql.com/doc/refman/5.0/en/select.html + if limit_clause is None: + # hardwire the upper limit. Currently + # needed by OurSQL with Python 3 + # (https://bugs.launchpad.net/oursql/+bug/686232), + # but also is consistent with the usage of the upper + # bound as part of MySQL's "syntax" for OFFSET with + # no LIMIT + return ' \n LIMIT %s, %s' % ( + self.process(offset_clause, **kw), + "18446744073709551615") + else: + return ' \n LIMIT %s, %s' % ( + self.process(offset_clause, **kw), + self.process(limit_clause, **kw)) + else: + # No offset provided, so just use the limit + return ' \n LIMIT %s' % (self.process(limit_clause, **kw),) + + def update_limit_clause(self, update_stmt): + limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None) + if limit: + return "LIMIT %s" % limit + else: + return None + + def update_tables_clause(self, update_stmt, from_table, + extra_froms, **kw): + return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw) + for t in [from_table] + list(extra_froms)) + + def update_from_clause(self, update_stmt, from_table, + extra_froms, from_hints, **kw): + return None + + +class MySQLDDLCompiler(compiler.DDLCompiler): + def get_column_specification(self, column, **kw): + """Builds column DDL.""" + + colspec = [ + self.preparer.format_column(column), + self.dialect.type_compiler.process( + column.type, type_expression=column) + ] + + is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP) + + if not column.nullable: + colspec.append('NOT NULL') + + # see: http://docs.sqlalchemy.org/en/latest/dialects/ + # mysql.html#mysql_timestamp_null + elif column.nullable and is_timestamp: + colspec.append('NULL') + + default = self.get_column_default_string(column) + if default is not None: + colspec.append('DEFAULT ' + default) + + if column.table is not None \ + and column is column.table._autoincrement_column and \ + column.server_default is None: + colspec.append('AUTO_INCREMENT') + + return ' '.join(colspec) + + def post_create_table(self, table): + """Build table-level CREATE options like ENGINE and COLLATE.""" + + table_opts = [] + + opts = dict( + ( + k[len(self.dialect.name) + 1:].upper(), + v + ) + for k, v in table.kwargs.items() + if k.startswith('%s_' % self.dialect.name) + ) + + for opt in topological.sort([ + ('DEFAULT_CHARSET', 'COLLATE'), + ('DEFAULT_CHARACTER_SET', 'COLLATE'), + ('PARTITION_BY', 'PARTITIONS'), # only for test consistency + ], opts): + arg = opts[opt] + if opt in _reflection._options_of_type_string: + arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''") + + if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY', + 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET', + 'DEFAULT_CHARSET', + 'DEFAULT_COLLATE', 'PARTITION_BY'): + opt = opt.replace('_', ' ') + + joiner = '=' + if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET', + 'CHARACTER SET', 'COLLATE', + 'PARTITION BY', 'PARTITIONS'): + joiner = ' ' + + table_opts.append(joiner.join((opt, arg))) + return ' '.join(table_opts) + + def visit_create_index(self, create): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + table = preparer.format_table(index.table) + columns = [self.sql_compiler.process(expr, include_table=False, + literal_binds=True) + for expr in index.expressions] + + name = self._prepared_index_name(index) + + text = "CREATE " + if index.unique: + text += "UNIQUE " + + index_prefix = index.kwargs.get('mysql_prefix', None) + if index_prefix: + text += index_prefix + ' ' + + text += "INDEX %s ON %s " % (name, table) + + length = index.dialect_options['mysql']['length'] + if length is not None: + + if isinstance(length, dict): + # length value can be a (column_name --> integer value) + # mapping specifying the prefix length for each column of the + # index + columns = ', '.join( + '%s(%d)' % (expr, length[col.name]) if col.name in length + else + ( + '%s(%d)' % (expr, length[expr]) if expr in length + else '%s' % expr + ) + for col, expr in zip(index.expressions, columns) + ) + else: + # or can be an integer value specifying the same + # prefix length for all columns of the index + columns = ', '.join( + '%s(%d)' % (col, length) + for col in columns + ) + else: + columns = ', '.join(columns) + text += '(%s)' % columns + + using = index.dialect_options['mysql']['using'] + if using is not None: + text += " USING %s" % (preparer.quote(using)) + + return text + + def visit_primary_key_constraint(self, constraint): + text = super(MySQLDDLCompiler, self).\ + visit_primary_key_constraint(constraint) + using = constraint.dialect_options['mysql']['using'] + if using: + text += " USING %s" % (self.preparer.quote(using)) + return text + + def visit_drop_index(self, drop): + index = drop.element + + return "\nDROP INDEX %s ON %s" % ( + self._prepared_index_name(index, + include_schema=False), + self.preparer.format_table(index.table)) + + def visit_drop_constraint(self, drop): + constraint = drop.element + if isinstance(constraint, sa_schema.ForeignKeyConstraint): + qual = "FOREIGN KEY " + const = self.preparer.format_constraint(constraint) + elif isinstance(constraint, sa_schema.PrimaryKeyConstraint): + qual = "PRIMARY KEY " + const = "" + elif isinstance(constraint, sa_schema.UniqueConstraint): + qual = "INDEX " + const = self.preparer.format_constraint(constraint) + else: + qual = "" + const = self.preparer.format_constraint(constraint) + return "ALTER TABLE %s DROP %s%s" % \ + (self.preparer.format_table(constraint.table), + qual, const) + + def define_constraint_match(self, constraint): + if constraint.match is not None: + raise exc.CompileError( + "MySQL ignores the 'MATCH' keyword while at the same time " + "causes ON UPDATE/ON DELETE clauses to be ignored.") + return "" + + +class MySQLTypeCompiler(compiler.GenericTypeCompiler): + def _extend_numeric(self, type_, spec): + "Extend a numeric-type declaration with MySQL specific extensions." + + if not self._mysql_type(type_): + return spec + + if type_.unsigned: + spec += ' UNSIGNED' + if type_.zerofill: + spec += ' ZEROFILL' + return spec + + def _extend_string(self, type_, defaults, spec): + """Extend a string-type declaration with standard SQL CHARACTER SET / + COLLATE annotations and MySQL specific extensions. + + """ + + def attr(name): + return getattr(type_, name, defaults.get(name)) + + if attr('charset'): + charset = 'CHARACTER SET %s' % attr('charset') + elif attr('ascii'): + charset = 'ASCII' + elif attr('unicode'): + charset = 'UNICODE' + else: + charset = None + + if attr('collation'): + collation = 'COLLATE %s' % type_.collation + elif attr('binary'): + collation = 'BINARY' + else: + collation = None + + if attr('national'): + # NATIONAL (aka NCHAR/NVARCHAR) trumps charsets. + return ' '.join([c for c in ('NATIONAL', spec, collation) + if c is not None]) + return ' '.join([c for c in (spec, charset, collation) + if c is not None]) + + def _mysql_type(self, type_): + return isinstance(type_, (_StringType, _NumericType)) + + def visit_NUMERIC(self, type_, **kw): + if type_.precision is None: + return self._extend_numeric(type_, "NUMERIC") + elif type_.scale is None: + return self._extend_numeric(type_, + "NUMERIC(%(precision)s)" % + {'precision': type_.precision}) + else: + return self._extend_numeric(type_, + "NUMERIC(%(precision)s, %(scale)s)" % + {'precision': type_.precision, + 'scale': type_.scale}) + + def visit_DECIMAL(self, type_, **kw): + if type_.precision is None: + return self._extend_numeric(type_, "DECIMAL") + elif type_.scale is None: + return self._extend_numeric(type_, + "DECIMAL(%(precision)s)" % + {'precision': type_.precision}) + else: + return self._extend_numeric(type_, + "DECIMAL(%(precision)s, %(scale)s)" % + {'precision': type_.precision, + 'scale': type_.scale}) + + def visit_DOUBLE(self, type_, **kw): + if type_.precision is not None and type_.scale is not None: + return self._extend_numeric(type_, + "DOUBLE(%(precision)s, %(scale)s)" % + {'precision': type_.precision, + 'scale': type_.scale}) + else: + return self._extend_numeric(type_, 'DOUBLE') + + def visit_REAL(self, type_, **kw): + if type_.precision is not None and type_.scale is not None: + return self._extend_numeric(type_, + "REAL(%(precision)s, %(scale)s)" % + {'precision': type_.precision, + 'scale': type_.scale}) + else: + return self._extend_numeric(type_, 'REAL') + + def visit_FLOAT(self, type_, **kw): + if self._mysql_type(type_) and \ + type_.scale is not None and \ + type_.precision is not None: + return self._extend_numeric( + type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)) + elif type_.precision is not None: + return self._extend_numeric(type_, + "FLOAT(%s)" % (type_.precision,)) + else: + return self._extend_numeric(type_, "FLOAT") + + def visit_INTEGER(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, "INTEGER(%(display_width)s)" % + {'display_width': type_.display_width}) + else: + return self._extend_numeric(type_, "INTEGER") + + def visit_BIGINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, "BIGINT(%(display_width)s)" % + {'display_width': type_.display_width}) + else: + return self._extend_numeric(type_, "BIGINT") + + def visit_MEDIUMINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric( + type_, "MEDIUMINT(%(display_width)s)" % + {'display_width': type_.display_width}) + else: + return self._extend_numeric(type_, "MEDIUMINT") + + def visit_TINYINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric(type_, + "TINYINT(%s)" % type_.display_width) + else: + return self._extend_numeric(type_, "TINYINT") + + def visit_SMALLINT(self, type_, **kw): + if self._mysql_type(type_) and type_.display_width is not None: + return self._extend_numeric(type_, + "SMALLINT(%(display_width)s)" % + {'display_width': type_.display_width} + ) + else: + return self._extend_numeric(type_, "SMALLINT") + + def visit_BIT(self, type_, **kw): + if type_.length is not None: + return "BIT(%s)" % type_.length + else: + return "BIT" + + def visit_DATETIME(self, type_, **kw): + if getattr(type_, 'fsp', None): + return "DATETIME(%d)" % type_.fsp + else: + return "DATETIME" + + def visit_DATE(self, type_, **kw): + return "DATE" + + def visit_TIME(self, type_, **kw): + if getattr(type_, 'fsp', None): + return "TIME(%d)" % type_.fsp + else: + return "TIME" + + def visit_TIMESTAMP(self, type_, **kw): + if getattr(type_, 'fsp', None): + return "TIMESTAMP(%d)" % type_.fsp + else: + return "TIMESTAMP" + + def visit_YEAR(self, type_, **kw): + if type_.display_width is None: + return "YEAR" + else: + return "YEAR(%s)" % type_.display_width + + def visit_TEXT(self, type_, **kw): + if type_.length: + return self._extend_string(type_, {}, "TEXT(%d)" % type_.length) + else: + return self._extend_string(type_, {}, "TEXT") + + def visit_TINYTEXT(self, type_, **kw): + return self._extend_string(type_, {}, "TINYTEXT") + + def visit_MEDIUMTEXT(self, type_, **kw): + return self._extend_string(type_, {}, "MEDIUMTEXT") + + def visit_LONGTEXT(self, type_, **kw): + return self._extend_string(type_, {}, "LONGTEXT") + + def visit_VARCHAR(self, type_, **kw): + if type_.length: + return self._extend_string( + type_, {}, "VARCHAR(%d)" % type_.length) + else: + raise exc.CompileError( + "VARCHAR requires a length on dialect %s" % + self.dialect.name) + + def visit_CHAR(self, type_, **kw): + if type_.length: + return self._extend_string(type_, {}, "CHAR(%(length)s)" % + {'length': type_.length}) + else: + return self._extend_string(type_, {}, "CHAR") + + def visit_NVARCHAR(self, type_, **kw): + # We'll actually generate the equiv. "NATIONAL VARCHAR" instead + # of "NVARCHAR". + if type_.length: + return self._extend_string( + type_, {'national': True}, + "VARCHAR(%(length)s)" % {'length': type_.length}) + else: + raise exc.CompileError( + "NVARCHAR requires a length on dialect %s" % + self.dialect.name) + + def visit_NCHAR(self, type_, **kw): + # We'll actually generate the equiv. + # "NATIONAL CHAR" instead of "NCHAR". + if type_.length: + return self._extend_string( + type_, {'national': True}, + "CHAR(%(length)s)" % {'length': type_.length}) + else: + return self._extend_string(type_, {'national': True}, "CHAR") + + def visit_VARBINARY(self, type_, **kw): + return "VARBINARY(%d)" % type_.length + + def visit_JSON(self, type_, **kw): + return "JSON" + + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_) + + def visit_enum(self, type_, **kw): + if not type_.native_enum: + return super(MySQLTypeCompiler, self).visit_enum(type_) + else: + return self._visit_enumerated_values("ENUM", type_, type_.enums) + + def visit_BLOB(self, type_, **kw): + if type_.length: + return "BLOB(%d)" % type_.length + else: + return "BLOB" + + def visit_TINYBLOB(self, type_, **kw): + return "TINYBLOB" + + def visit_MEDIUMBLOB(self, type_, **kw): + return "MEDIUMBLOB" + + def visit_LONGBLOB(self, type_, **kw): + return "LONGBLOB" + + def _visit_enumerated_values(self, name, type_, enumerated_values): + quoted_enums = [] + for e in enumerated_values: + quoted_enums.append("'%s'" % e.replace("'", "''")) + return self._extend_string(type_, {}, "%s(%s)" % ( + name, ",".join(quoted_enums)) + ) + + def visit_ENUM(self, type_, **kw): + return self._visit_enumerated_values("ENUM", type_, + type_._enumerated_values) + + def visit_SET(self, type_, **kw): + return self._visit_enumerated_values("SET", type_, + type_._enumerated_values) + + def visit_BOOLEAN(self, type, **kw): + return "BOOL" + + +class MySQLIdentifierPreparer(compiler.IdentifierPreparer): + + reserved_words = RESERVED_WORDS + + def __init__(self, dialect, server_ansiquotes=False, **kw): + if not server_ansiquotes: + quote = "`" + else: + quote = '"' + + super(MySQLIdentifierPreparer, self).__init__( + dialect, + initial_quote=quote, + escape_quote=quote) + + def _quote_free_identifiers(self, *ids): + """Unilaterally identifier-quote any number of strings.""" + + return tuple([self.quote_identifier(i) for i in ids if i is not None]) + + +@log.class_logger +class MySQLDialect(default.DefaultDialect): + """Details of the MySQL dialect. + Not used directly in application code. + """ + + name = 'mysql' + supports_alter = True + + # MySQL has no true "boolean" type; we + # allow for the "true" and "false" keywords, however + supports_native_boolean = False + + # identifiers are 64, however aliases can be 255... + max_identifier_length = 255 + max_index_name_length = 64 + + supports_native_enum = True + + supports_sane_rowcount = True + supports_sane_multi_rowcount = False + supports_multivalues_insert = True + + default_paramstyle = 'format' + colspecs = colspecs + + statement_compiler = MySQLCompiler + ddl_compiler = MySQLDDLCompiler + type_compiler = MySQLTypeCompiler + ischema_names = ischema_names + preparer = MySQLIdentifierPreparer + + # default SQL compilation settings - + # these are modified upon initialize(), + # i.e. first connect + _backslash_escapes = True + _server_ansiquotes = False + + construct_arguments = [ + (sa_schema.Table, { + "*": None + }), + (sql.Update, { + "limit": None + }), + (sa_schema.PrimaryKeyConstraint, { + "using": None + }), + (sa_schema.Index, { + "using": None, + "length": None, + "prefix": None, + }) + ] + + def __init__(self, isolation_level=None, json_serializer=None, + json_deserializer=None, **kwargs): + kwargs.pop('use_ansiquotes', None) # legacy + default.DefaultDialect.__init__(self, **kwargs) + self.isolation_level = isolation_level + self._json_serializer = json_serializer + self._json_deserializer = json_deserializer + + def on_connect(self): + if self.isolation_level is not None: + def connect(conn): + self.set_isolation_level(conn, self.isolation_level) + return connect + else: + return None + + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ']) + + def set_isolation_level(self, connection, level): + level = level.replace('_', ' ') + + # adjust for ConnectionFairy being present + # allows attribute set e.g. "connection.autocommit = True" + # to work properly + if hasattr(connection, 'connection'): + connection = connection.connection + + self._set_isolation_level(connection, level) + + def _set_isolation_level(self, connection, level): + if level not in self._isolation_lookup: + raise exc.ArgumentError( + "Invalid value '%s' for isolation_level. " + "Valid isolation levels for %s are %s" % + (level, self.name, ", ".join(self._isolation_lookup)) + ) + cursor = connection.cursor() + cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level) + cursor.execute("COMMIT") + cursor.close() + + def get_isolation_level(self, connection): + cursor = connection.cursor() + cursor.execute('SELECT @@tx_isolation') + val = cursor.fetchone()[0] + cursor.close() + if util.py3k and isinstance(val, bytes): + val = val.decode() + return val.upper().replace("-", " ") + + def do_commit(self, dbapi_connection): + """Execute a COMMIT.""" + + # COMMIT/ROLLBACK were introduced in 3.23.15. + # Yes, we have at least one user who has to talk to these old + # versions! + # + # Ignore commit/rollback if support isn't present, otherwise even + # basic operations via autocommit fail. + try: + dbapi_connection.commit() + except Exception: + if self.server_version_info < (3, 23, 15): + args = sys.exc_info()[1].args + if args and args[0] == 1064: + return + raise + + def do_rollback(self, dbapi_connection): + """Execute a ROLLBACK.""" + + try: + dbapi_connection.rollback() + except Exception: + if self.server_version_info < (3, 23, 15): + args = sys.exc_info()[1].args + if args and args[0] == 1064: + return + raise + + def do_begin_twophase(self, connection, xid): + connection.execute(sql.text("XA BEGIN :xid"), xid=xid) + + def do_prepare_twophase(self, connection, xid): + connection.execute(sql.text("XA END :xid"), xid=xid) + connection.execute(sql.text("XA PREPARE :xid"), xid=xid) + + def do_rollback_twophase(self, connection, xid, is_prepared=True, + recover=False): + if not is_prepared: + connection.execute(sql.text("XA END :xid"), xid=xid) + connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid) + + def do_commit_twophase(self, connection, xid, is_prepared=True, + recover=False): + if not is_prepared: + self.do_prepare_twophase(connection, xid) + connection.execute(sql.text("XA COMMIT :xid"), xid=xid) + + def do_recover_twophase(self, connection): + resultset = connection.execute("XA RECOVER") + return [row['data'][0:row['gtrid_length']] for row in resultset] + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, (self.dbapi.OperationalError, + self.dbapi.ProgrammingError)): + return self._extract_error_code(e) in \ + (2006, 2013, 2014, 2045, 2055) + elif isinstance(e, self.dbapi.InterfaceError): + # if underlying connection is closed, + # this is the error you get + return "(0, '')" in str(e) + else: + return False + + def _compat_fetchall(self, rp, charset=None): + """Proxy result rows to smooth over MySQL-Python driver + inconsistencies.""" + + return [_DecodingRowProxy(row, charset) for row in rp.fetchall()] + + def _compat_fetchone(self, rp, charset=None): + """Proxy a result row to smooth over MySQL-Python driver + inconsistencies.""" + + return _DecodingRowProxy(rp.fetchone(), charset) + + def _compat_first(self, rp, charset=None): + """Proxy a result row to smooth over MySQL-Python driver + inconsistencies.""" + + return _DecodingRowProxy(rp.first(), charset) + + def _extract_error_code(self, exception): + raise NotImplementedError() + + def _get_default_schema_name(self, connection): + return connection.execute('SELECT DATABASE()').scalar() + + def has_table(self, connection, table_name, schema=None): + # SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly + # on macosx (and maybe win?) with multibyte table names. + # + # TODO: if this is not a problem on win, make the strategy swappable + # based on platform. DESCRIBE is slower. + + # [ticket:726] + # full_name = self.identifier_preparer.format_table(table, + # use_schema=True) + + full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( + schema, table_name)) + + st = "DESCRIBE %s" % full_name + rs = None + try: + try: + rs = connection.execution_options( + skip_user_error_events=True).execute(st) + have = rs.fetchone() is not None + rs.close() + return have + except exc.DBAPIError as e: + if self._extract_error_code(e.orig) == 1146: + return False + raise + finally: + if rs: + rs.close() + + def initialize(self, connection): + self._connection_charset = self._detect_charset(connection) + self._detect_ansiquotes(connection) + if self._server_ansiquotes: + # if ansiquotes == True, build a new IdentifierPreparer + # with the new setting + self.identifier_preparer = self.preparer( + self, server_ansiquotes=self._server_ansiquotes) + + default.DefaultDialect.initialize(self, connection) + + @property + def _is_mariadb(self): + return 'MariaDB' in self.server_version_info + + @property + def _supports_cast(self): + return self.server_version_info is None or \ + self.server_version_info >= (4, 0, 2) + + @reflection.cache + def get_schema_names(self, connection, **kw): + rp = connection.execute("SHOW schemas") + return [r[0] for r in rp] + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + """Return a Unicode SHOW TABLES from a given schema.""" + if schema is not None: + current_schema = schema + else: + current_schema = self.default_schema_name + + charset = self._connection_charset + if self.server_version_info < (5, 0, 2): + rp = connection.execute( + "SHOW TABLES FROM %s" % + self.identifier_preparer.quote_identifier(current_schema)) + return [row[0] for + row in self._compat_fetchall(rp, charset=charset)] + else: + rp = connection.execute( + "SHOW FULL TABLES FROM %s" % + self.identifier_preparer.quote_identifier(current_schema)) + + return [row[0] + for row in self._compat_fetchall(rp, charset=charset) + if row[1] == 'BASE TABLE'] + + @reflection.cache + def get_view_names(self, connection, schema=None, **kw): + if self.server_version_info < (5, 0, 2): + raise NotImplementedError + if schema is None: + schema = self.default_schema_name + if self.server_version_info < (5, 0, 2): + return self.get_table_names(connection, schema) + charset = self._connection_charset + rp = connection.execute( + "SHOW FULL TABLES FROM %s" % + self.identifier_preparer.quote_identifier(schema)) + return [row[0] + for row in self._compat_fetchall(rp, charset=charset) + if row[1] in ('VIEW', 'SYSTEM VIEW')] + + @reflection.cache + def get_table_options(self, connection, table_name, schema=None, **kw): + + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw) + return parsed_state.table_options + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw) + return parsed_state.columns + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw) + for key in parsed_state.keys: + if key['type'] == 'PRIMARY': + # There can be only one. + cols = [s[0] for s in key['columns']] + return {'constrained_columns': cols, 'name': None} + return {'constrained_columns': [], 'name': None} + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw) + default_schema = None + + fkeys = [] + + for spec in parsed_state.constraints: + # only FOREIGN KEYs + ref_name = spec['table'][-1] + ref_schema = len(spec['table']) > 1 and \ + spec['table'][-2] or schema + + if not ref_schema: + if default_schema is None: + default_schema = \ + connection.dialect.default_schema_name + if schema == default_schema: + ref_schema = schema + + loc_names = spec['local'] + ref_names = spec['foreign'] + + con_kw = {} + for opt in ('onupdate', 'ondelete'): + if spec.get(opt, False): + con_kw[opt] = spec[opt] + + fkey_d = { + 'name': spec['name'], + 'constrained_columns': loc_names, + 'referred_schema': ref_schema, + 'referred_table': ref_name, + 'referred_columns': ref_names, + 'options': con_kw + } + fkeys.append(fkey_d) + return fkeys + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw) + + indexes = [] + for spec in parsed_state.keys: + unique = False + flavor = spec['type'] + if flavor == 'PRIMARY': + continue + if flavor == 'UNIQUE': + unique = True + elif flavor in (None, 'FULLTEXT', 'SPATIAL'): + pass + else: + self.logger.info( + "Converting unknown KEY type %s to a plain KEY", flavor) + pass + index_d = {} + index_d['name'] = spec['name'] + index_d['column_names'] = [s[0] for s in spec['columns']] + index_d['unique'] = unique + if flavor: + index_d['type'] = flavor + indexes.append(index_d) + return indexes + + @reflection.cache + def get_unique_constraints(self, connection, table_name, + schema=None, **kw): + parsed_state = self._parsed_state_or_create( + connection, table_name, schema, **kw) + + return [ + { + 'name': key['name'], + 'column_names': [col[0] for col in key['columns']], + 'duplicates_index': key['name'], + } + for key in parsed_state.keys + if key['type'] == 'UNIQUE' + ] + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + + charset = self._connection_charset + full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( + schema, view_name)) + sql = self._show_create_table(connection, None, charset, + full_name=full_name) + return sql + + def _parsed_state_or_create(self, connection, table_name, + schema=None, **kw): + return self._setup_parser( + connection, + table_name, + schema, + info_cache=kw.get('info_cache', None) + ) + + @util.memoized_property + def _tabledef_parser(self): + """return the MySQLTableDefinitionParser, generate if needed. + + The deferred creation ensures that the dialect has + retrieved server version information first. + + """ + if (self.server_version_info < (4, 1) and self._server_ansiquotes): + # ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1 + preparer = self.preparer(self, server_ansiquotes=False) + else: + preparer = self.identifier_preparer + return _reflection.MySQLTableDefinitionParser(self, preparer) + + @reflection.cache + def _setup_parser(self, connection, table_name, schema=None, **kw): + charset = self._connection_charset + parser = self._tabledef_parser + full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( + schema, table_name)) + sql = self._show_create_table(connection, None, charset, + full_name=full_name) + if re.match(r'^CREATE (?:ALGORITHM)?.* VIEW', sql): + # Adapt views to something table-like. + columns = self._describe_table(connection, None, charset, + full_name=full_name) + sql = parser._describe_to_create(table_name, columns) + return parser.parse(sql, charset) + + def _detect_charset(self, connection): + raise NotImplementedError() + + def _detect_casing(self, connection): + """Sniff out identifier case sensitivity. + + Cached per-connection. This value can not change without a server + restart. + + """ + # http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html + + charset = self._connection_charset + row = self._compat_first(connection.execute( + "SHOW VARIABLES LIKE 'lower_case_table_names'"), + charset=charset) + if not row: + cs = 0 + else: + # 4.0.15 returns OFF or ON according to [ticket:489] + # 3.23 doesn't, 4.0.27 doesn't.. + if row[1] == 'OFF': + cs = 0 + elif row[1] == 'ON': + cs = 1 + else: + cs = int(row[1]) + return cs + + def _detect_collations(self, connection): + """Pull the active COLLATIONS list from the server. + + Cached per-connection. + """ + + collations = {} + if self.server_version_info < (4, 1, 0): + pass + else: + charset = self._connection_charset + rs = connection.execute('SHOW COLLATION') + for row in self._compat_fetchall(rs, charset): + collations[row[0]] = row[1] + return collations + + def _detect_ansiquotes(self, connection): + """Detect and adjust for the ANSI_QUOTES sql mode.""" + + row = self._compat_first( + connection.execute("SHOW VARIABLES LIKE 'sql_mode'"), + charset=self._connection_charset) + + if not row: + mode = '' + else: + mode = row[1] or '' + # 4.0 + if mode.isdigit(): + mode_no = int(mode) + mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or '' + + self._server_ansiquotes = 'ANSI_QUOTES' in mode + + # as of MySQL 5.0.1 + self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode + + def _show_create_table(self, connection, table, charset=None, + full_name=None): + """Run SHOW CREATE TABLE for a ``Table``.""" + + if full_name is None: + full_name = self.identifier_preparer.format_table(table) + st = "SHOW CREATE TABLE %s" % full_name + + rp = None + try: + rp = connection.execution_options( + skip_user_error_events=True).execute(st) + except exc.DBAPIError as e: + if self._extract_error_code(e.orig) == 1146: + raise exc.NoSuchTableError(full_name) + else: + raise + row = self._compat_first(rp, charset=charset) + if not row: + raise exc.NoSuchTableError(full_name) + return row[1].strip() + + return sql + + def _describe_table(self, connection, table, charset=None, + full_name=None): + """Run DESCRIBE for a ``Table`` and return processed rows.""" + + if full_name is None: + full_name = self.identifier_preparer.format_table(table) + st = "DESCRIBE %s" % full_name + + rp, rows = None, None + try: + try: + rp = connection.execution_options( + skip_user_error_events=True).execute(st) + except exc.DBAPIError as e: + if self._extract_error_code(e.orig) == 1146: + raise exc.NoSuchTableError(full_name) + else: + raise + rows = self._compat_fetchall(rp, charset=charset) + finally: + if rp: + rp.close() + return rows + + + +class _DecodingRowProxy(object): + """Return unicode-decoded values based on type inspection. + + Smooth over data type issues (esp. with alpha driver versions) and + normalize strings as Unicode regardless of user-configured driver + encoding settings. + + """ + + # Some MySQL-python versions can return some columns as + # sets.Set(['value']) (seriously) but thankfully that doesn't + # seem to come up in DDL queries. + + _encoding_compat = { + 'koi8r': 'koi8_r', + 'koi8u': 'koi8_u', + 'utf16': 'utf-16-be', # MySQL's uft16 is always bigendian + 'utf8mb4': 'utf8', # real utf8 + 'eucjpms': 'ujis', + } + + def __init__(self, rowproxy, charset): + self.rowproxy = rowproxy + self.charset = self._encoding_compat.get(charset, charset) + + def __getitem__(self, index): + item = self.rowproxy[index] + if isinstance(item, _array): + item = item.tostring() + + if self.charset and isinstance(item, util.binary_type): + return item.decode(self.charset) + else: + return item + + def __getattr__(self, attr): + item = getattr(self.rowproxy, attr) + if isinstance(item, _array): + item = item.tostring() + if self.charset and isinstance(item, util.binary_type): + return item.decode(self.charset) + else: + return item + diff --git a/app/lib/sqlalchemy/dialects/mysql/cymysql.py b/app/lib/sqlalchemy/dialects/mysql/cymysql.py new file mode 100644 index 0000000..a5ddb1a --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -0,0 +1,87 @@ +# mysql/cymysql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: mysql+cymysql + :name: CyMySQL + :dbapi: cymysql + :connectstring: mysql+cymysql://:@/\ +[?] + :url: https://github.com/nakagami/CyMySQL + +""" +import re + +from .mysqldb import MySQLDialect_mysqldb +from .base import (BIT, MySQLDialect) +from ... import util + + +class _cymysqlBIT(BIT): + def result_processor(self, dialect, coltype): + """Convert a MySQL's 64 bit, variable length binary string to a long. + """ + + def process(value): + if value is not None: + v = 0 + for i in util.iterbytes(value): + v = v << 8 | i + return v + return value + return process + + +class MySQLDialect_cymysql(MySQLDialect_mysqldb): + driver = 'cymysql' + + description_encoding = None + supports_sane_rowcount = True + supports_sane_multi_rowcount = False + supports_unicode_statements = True + + colspecs = util.update_copy( + MySQLDialect.colspecs, + { + BIT: _cymysqlBIT, + } + ) + + @classmethod + def dbapi(cls): + return __import__('cymysql') + + def _get_server_version_info(self, connection): + dbapi_con = connection.connection + version = [] + r = re.compile(r'[.\-]') + for n in r.split(dbapi_con.server_version): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + + def _detect_charset(self, connection): + return connection.connection.charset + + def _extract_error_code(self, exception): + return exception.errno + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, self.dbapi.OperationalError): + return self._extract_error_code(e) in \ + (2006, 2013, 2014, 2045, 2055) + elif isinstance(e, self.dbapi.InterfaceError): + # if underlying connection is closed, + # this is the error you get + return True + else: + return False + +dialect = MySQLDialect_cymysql diff --git a/app/lib/sqlalchemy/dialects/mysql/enumerated.py b/app/lib/sqlalchemy/dialects/mysql/enumerated.py new file mode 100644 index 0000000..495bee5 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -0,0 +1,311 @@ +# mysql/enumerated.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import re + +from .types import _StringType +from ... import exc, sql, util +from ... import types as sqltypes + + +class _EnumeratedValues(_StringType): + def _init_values(self, values, kw): + self.quoting = kw.pop('quoting', 'auto') + + if self.quoting == 'auto' and len(values): + # What quoting character are we using? + q = None + for e in values: + if len(e) == 0: + self.quoting = 'unquoted' + break + elif q is None: + q = e[0] + + if len(e) == 1 or e[0] != q or e[-1] != q: + self.quoting = 'unquoted' + break + else: + self.quoting = 'quoted' + + if self.quoting == 'quoted': + util.warn_deprecated( + 'Manually quoting %s value literals is deprecated. Supply ' + 'unquoted values and use the quoting= option in cases of ' + 'ambiguity.' % self.__class__.__name__) + + values = self._strip_values(values) + + self._enumerated_values = values + length = max([len(v) for v in values] + [0]) + return values, length + + @classmethod + def _strip_values(cls, values): + strip_values = [] + for a in values: + if a[0:1] == '"' or a[0:1] == "'": + # strip enclosing quotes and unquote interior + a = a[1:-1].replace(a[0] * 2, a[0]) + strip_values.append(a) + return strip_values + + +class ENUM(sqltypes.Enum, _EnumeratedValues): + """MySQL ENUM type.""" + + __visit_name__ = 'ENUM' + + def __init__(self, *enums, **kw): + """Construct an ENUM. + + E.g.:: + + Column('myenum', ENUM("foo", "bar", "baz")) + + :param enums: The range of valid values for this ENUM. Values will be + quoted when generating the schema according to the quoting flag (see + below). This object may also be a PEP-435-compliant enumerated + type. + + .. versionadded: 1.1 added support for PEP-435-compliant enumerated + types. + + :param strict: This flag has no effect. + + .. versionchanged:: The MySQL ENUM type as well as the base Enum + type now validates all Python data values. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + :param quoting: Defaults to 'auto': automatically determine enum value + quoting. If all enum values are surrounded by the same quoting + character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. + + 'quoted': values in enums are already quoted, they will be used + directly when generating the schema - this usage is deprecated. + + 'unquoted': values in enums are not quoted, they will be escaped and + surrounded by single quotes when generating the schema. + + Previous versions of this type always required manually quoted + values to be supplied; future versions will always quote the string + literals for you. This is a transitional option. + + """ + + kw.pop('strict', None) + validate_strings = kw.pop("validate_strings", False) + sqltypes.Enum.__init__( + self, validate_strings=validate_strings, *enums) + kw.pop('metadata', None) + kw.pop('schema', None) + kw.pop('name', None) + kw.pop('quote', None) + kw.pop('native_enum', None) + kw.pop('inherit_schema', None) + kw.pop('_create_events', None) + _StringType.__init__(self, length=self.length, **kw) + + def _setup_for_values(self, values, objects, kw): + values, length = self._init_values(values, kw) + return sqltypes.Enum._setup_for_values(self, values, objects, kw) + + def _object_value_for_elem(self, elem): + # mysql sends back a blank string for any value that + # was persisted that was not in the enums; that is, it does no + # validation on the incoming data, it "truncates" it to be + # the blank string. Return it straight. + if elem == "": + return elem + else: + return super(ENUM, self)._object_value_for_elem(elem) + + def __repr__(self): + return util.generic_repr( + self, to_inspect=[ENUM, _StringType, sqltypes.Enum]) + + def adapt(self, cls, **kw): + return sqltypes.Enum.adapt(self, cls, **kw) + + +class SET(_EnumeratedValues): + """MySQL SET type.""" + + __visit_name__ = 'SET' + + def __init__(self, *values, **kw): + """Construct a SET. + + E.g.:: + + Column('myset', SET("foo", "bar", "baz")) + + + The list of potential values is required in the case that this + set will be used to generate DDL for a table, or if the + :paramref:`.SET.retrieve_as_bitwise` flag is set to True. + + :param values: The range of valid values for this SET. + + :param convert_unicode: Same flag as that of + :paramref:`.String.convert_unicode`. + + :param collation: same as that of :paramref:`.String.collation` + + :param charset: same as that of :paramref:`.VARCHAR.charset`. + + :param ascii: same as that of :paramref:`.VARCHAR.ascii`. + + :param unicode: same as that of :paramref:`.VARCHAR.unicode`. + + :param binary: same as that of :paramref:`.VARCHAR.binary`. + + :param quoting: Defaults to 'auto': automatically determine set value + quoting. If all values are surrounded by the same quoting + character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. + + 'quoted': values in enums are already quoted, they will be used + directly when generating the schema - this usage is deprecated. + + 'unquoted': values in enums are not quoted, they will be escaped and + surrounded by single quotes when generating the schema. + + Previous versions of this type always required manually quoted + values to be supplied; future versions will always quote the string + literals for you. This is a transitional option. + + .. versionadded:: 0.9.0 + + :param retrieve_as_bitwise: if True, the data for the set type will be + persisted and selected using an integer value, where a set is coerced + into a bitwise mask for persistence. MySQL allows this mode which + has the advantage of being able to store values unambiguously, + such as the blank string ``''``. The datatype will appear + as the expression ``col + 0`` in a SELECT statement, so that the + value is coerced into an integer value in result sets. + This flag is required if one wishes + to persist a set that can store the blank string ``''`` as a value. + + .. warning:: + + When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is + essential that the list of set values is expressed in the + **exact same order** as exists on the MySQL database. + + .. versionadded:: 1.0.0 + + + """ + self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False) + values, length = self._init_values(values, kw) + self.values = tuple(values) + if not self.retrieve_as_bitwise and '' in values: + raise exc.ArgumentError( + "Can't use the blank value '' in a SET without " + "setting retrieve_as_bitwise=True") + if self.retrieve_as_bitwise: + self._bitmap = dict( + (value, 2 ** idx) + for idx, value in enumerate(self.values) + ) + self._bitmap.update( + (2 ** idx, value) + for idx, value in enumerate(self.values) + ) + kw.setdefault('length', length) + super(SET, self).__init__(**kw) + + def column_expression(self, colexpr): + if self.retrieve_as_bitwise: + return sql.type_coerce( + sql.type_coerce(colexpr, sqltypes.Integer) + 0, + self + ) + else: + return colexpr + + def result_processor(self, dialect, coltype): + if self.retrieve_as_bitwise: + def process(value): + if value is not None: + value = int(value) + + return set( + util.map_bits(self._bitmap.__getitem__, value) + ) + else: + return None + else: + super_convert = super(SET, self).result_processor(dialect, coltype) + + def process(value): + if isinstance(value, util.string_types): + # MySQLdb returns a string, let's parse + if super_convert: + value = super_convert(value) + return set(re.findall(r'[^,]+', value)) + else: + # mysql-connector-python does a naive + # split(",") which throws in an empty string + if value is not None: + value.discard('') + return value + return process + + def bind_processor(self, dialect): + super_convert = super(SET, self).bind_processor(dialect) + if self.retrieve_as_bitwise: + def process(value): + if value is None: + return None + elif isinstance(value, util.int_types + util.string_types): + if super_convert: + return super_convert(value) + else: + return value + else: + int_value = 0 + for v in value: + int_value |= self._bitmap[v] + return int_value + else: + + def process(value): + # accept strings and int (actually bitflag) values directly + if value is not None and not isinstance( + value, util.int_types + util.string_types): + value = ",".join(value) + + if super_convert: + return super_convert(value) + else: + return value + return process + + def adapt(self, impltype, **kw): + kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise + return util.constructor_copy( + self, impltype, + *self.values, + **kw + ) diff --git a/app/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/app/lib/sqlalchemy/dialects/mysql/gaerdbms.py new file mode 100644 index 0000000..1c64823 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/gaerdbms.py @@ -0,0 +1,102 @@ +# mysql/gaerdbms.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" +.. dialect:: mysql+gaerdbms + :name: Google Cloud SQL + :dbapi: rdbms + :connectstring: mysql+gaerdbms:///?instance= + :url: https://developers.google.com/appengine/docs/python/cloud-sql/\ +developers-guide + + This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with + minimal changes. + + .. versionadded:: 0.7.8 + + .. deprecated:: 1.0 This dialect is **no longer necessary** for + Google Cloud SQL; the MySQLdb dialect can be used directly. + Cloud SQL now recommends creating connections via the + mysql dialect using the URL format + + ``mysql+mysqldb://root@/?unix_socket=/cloudsql/:`` + + +Pooling +------- + +Google App Engine connections appear to be randomly recycled, +so the dialect does not pool connections. The :class:`.NullPool` +implementation is installed within the :class:`.Engine` by +default. + +""" + +import os + +from .mysqldb import MySQLDialect_mysqldb +from ...pool import NullPool +import re +from sqlalchemy.util import warn_deprecated + + +def _is_dev_environment(): + return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/') + + +class MySQLDialect_gaerdbms(MySQLDialect_mysqldb): + + @classmethod + def dbapi(cls): + + warn_deprecated( + "Google Cloud SQL now recommends creating connections via the " + "MySQLdb dialect directly, using the URL format " + "mysql+mysqldb://root@/?unix_socket=/cloudsql/" + ":" + ) + + # from django: + # http://code.google.com/p/googleappengine/source/ + # browse/trunk/python/google/storage/speckle/ + # python/django/backend/base.py#118 + # see also [ticket:2649] + # see also http://stackoverflow.com/q/14224679/34549 + from google.appengine.api import apiproxy_stub_map + + if _is_dev_environment(): + from google.appengine.api import rdbms_mysqldb + return rdbms_mysqldb + elif apiproxy_stub_map.apiproxy.GetStub('rdbms'): + from google.storage.speckle.python.api import rdbms_apiproxy + return rdbms_apiproxy + else: + from google.storage.speckle.python.api import rdbms_googleapi + return rdbms_googleapi + + @classmethod + def get_pool_class(cls, url): + # Cloud SQL connections die at any moment + return NullPool + + def create_connect_args(self, url): + opts = url.translate_connect_args() + if not _is_dev_environment(): + # 'dsn' and 'instance' are because we are skipping + # the traditional google.api.rdbms wrapper + opts['dsn'] = '' + opts['instance'] = url.query['instance'] + return [], opts + + def _extract_error_code(self, exception): + match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception)) + # The rdbms api will wrap then re-raise some types of errors + # making this regex return no matches. + code = match.group(1) or match.group(2) if match else None + if code: + return int(code) + +dialect = MySQLDialect_gaerdbms diff --git a/app/lib/sqlalchemy/dialects/mysql/json.py b/app/lib/sqlalchemy/dialects/mysql/json.py new file mode 100644 index 0000000..d7b8666 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/json.py @@ -0,0 +1,79 @@ +# mysql/json.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from __future__ import absolute_import + +import json + +from ...sql import elements +from ... import types as sqltypes +from ... import util + + +class JSON(sqltypes.JSON): + """MySQL JSON type. + + MySQL supports JSON as of version 5.7. Note that MariaDB does **not** + support JSON at the time of this writing. + + The :class:`.mysql.JSON` type supports persistence of JSON values + as well as the core index operations provided by :class:`.types.JSON` + datatype, by adapting the operations to render the ``JSON_EXTRACT`` + function at the database level. + + .. versionadded:: 1.1 + + """ + + pass + + +class _FormatTypeMixin(object): + def _format_value(self, value): + raise NotImplementedError() + + def bind_processor(self, dialect): + super_proc = self.string_bind_processor(dialect) + + def process(value): + value = self._format_value(value) + if super_proc: + value = super_proc(value) + return value + + return process + + def literal_processor(self, dialect): + super_proc = self.string_literal_processor(dialect) + + def process(value): + value = self._format_value(value) + if super_proc: + value = super_proc(value) + return value + + return process + + +class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType): + + def _format_value(self, value): + if isinstance(value, int): + value = "$[%s]" % value + else: + value = '$."%s"' % value + return value + + +class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType): + def _format_value(self, value): + return "$%s" % ( + "".join([ + "[%s]" % elem if isinstance(elem, int) + else '."%s"' % elem for elem in value + ]) + ) diff --git a/app/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/app/lib/sqlalchemy/dialects/mysql/mysqlconnector.py new file mode 100644 index 0000000..ac77ebc --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -0,0 +1,203 @@ +# mysql/mysqlconnector.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: mysql+mysqlconnector + :name: MySQL Connector/Python + :dbapi: myconnpy + :connectstring: mysql+mysqlconnector://:@\ +[:]/ + :url: http://dev.mysql.com/downloads/connector/python/ + + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + +""" + +from .base import (MySQLDialect, MySQLExecutionContext, + MySQLCompiler, MySQLIdentifierPreparer, + BIT) + +from ... import util +import re + + +class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): + + def get_lastrowid(self): + return self.cursor.lastrowid + + +class MySQLCompiler_mysqlconnector(MySQLCompiler): + def visit_mod_binary(self, binary, operator, **kw): + if self.dialect._mysqlconnector_double_percents: + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + else: + return self.process(binary.left, **kw) + " % " + \ + self.process(binary.right, **kw) + + def post_process_text(self, text): + if self.dialect._mysqlconnector_double_percents: + return text.replace('%', '%%') + else: + return text + + def escape_literal_column(self, text): + if self.dialect._mysqlconnector_double_percents: + return text.replace('%', '%%') + else: + return text + + +class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer): + + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + if self.dialect._mysqlconnector_double_percents: + return value.replace("%", "%%") + else: + return value + + +class _myconnpyBIT(BIT): + def result_processor(self, dialect, coltype): + """MySQL-connector already converts mysql bits, so.""" + + return None + + +class MySQLDialect_mysqlconnector(MySQLDialect): + driver = 'mysqlconnector' + + supports_unicode_binds = True + + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + + supports_native_decimal = True + + default_paramstyle = 'format' + execution_ctx_cls = MySQLExecutionContext_mysqlconnector + statement_compiler = MySQLCompiler_mysqlconnector + + preparer = MySQLIdentifierPreparer_mysqlconnector + + colspecs = util.update_copy( + MySQLDialect.colspecs, + { + BIT: _myconnpyBIT, + } + ) + + @util.memoized_property + def supports_unicode_statements(self): + return util.py3k or self._mysqlconnector_version_info > (2, 0) + + @classmethod + def dbapi(cls): + from mysql import connector + return connector + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + + opts.update(url.query) + + util.coerce_kw_type(opts, 'allow_local_infile', bool) + util.coerce_kw_type(opts, 'autocommit', bool) + util.coerce_kw_type(opts, 'buffered', bool) + util.coerce_kw_type(opts, 'compress', bool) + util.coerce_kw_type(opts, 'connection_timeout', int) + util.coerce_kw_type(opts, 'connect_timeout', int) + util.coerce_kw_type(opts, 'consume_results', bool) + util.coerce_kw_type(opts, 'force_ipv6', bool) + util.coerce_kw_type(opts, 'get_warnings', bool) + util.coerce_kw_type(opts, 'pool_reset_session', bool) + util.coerce_kw_type(opts, 'pool_size', int) + util.coerce_kw_type(opts, 'raise_on_warnings', bool) + util.coerce_kw_type(opts, 'raw', bool) + util.coerce_kw_type(opts, 'ssl_verify_cert', bool) + util.coerce_kw_type(opts, 'use_pure', bool) + util.coerce_kw_type(opts, 'use_unicode', bool) + + # unfortunately, MySQL/connector python refuses to release a + # cursor without reading fully, so non-buffered isn't an option + opts.setdefault('buffered', True) + + # FOUND_ROWS must be set in ClientFlag to enable + # supports_sane_rowcount. + if self.dbapi is not None: + try: + from mysql.connector.constants import ClientFlag + client_flags = opts.get( + 'client_flags', ClientFlag.get_default()) + client_flags |= ClientFlag.FOUND_ROWS + opts['client_flags'] = client_flags + except Exception: + pass + return [[], opts] + + @util.memoized_property + def _mysqlconnector_version_info(self): + if self.dbapi and hasattr(self.dbapi, '__version__'): + m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', + self.dbapi.__version__) + if m: + return tuple( + int(x) + for x in m.group(1, 2, 3) + if x is not None) + + @util.memoized_property + def _mysqlconnector_double_percents(self): + return not util.py3k and self._mysqlconnector_version_info < (2, 0) + + def _get_server_version_info(self, connection): + dbapi_con = connection.connection + version = dbapi_con.get_server_version() + return tuple(version) + + def _detect_charset(self, connection): + return connection.connection.charset + + def _extract_error_code(self, exception): + return exception.errno + + def is_disconnect(self, e, connection, cursor): + errnos = (2006, 2013, 2014, 2045, 2055, 2048) + exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError) + if isinstance(e, exceptions): + return e.errno in errnos or \ + "MySQL Connection not available." in str(e) + else: + return False + + def _compat_fetchall(self, rp, charset=None): + return rp.fetchall() + + def _compat_fetchone(self, rp, charset=None): + return rp.fetchone() + + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ', + 'AUTOCOMMIT']) + + def _set_isolation_level(self, connection, level): + if level == 'AUTOCOMMIT': + connection.autocommit = True + else: + connection.autocommit = False + super(MySQLDialect_mysqlconnector, self)._set_isolation_level( + connection, level) + + +dialect = MySQLDialect_mysqlconnector diff --git a/app/lib/sqlalchemy/dialects/mysql/mysqldb.py b/app/lib/sqlalchemy/dialects/mysql/mysqldb.py new file mode 100644 index 0000000..6af8601 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -0,0 +1,228 @@ +# mysql/mysqldb.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: mysql+mysqldb + :name: MySQL-Python + :dbapi: mysqldb + :connectstring: mysql+mysqldb://:@[:]/ + :url: http://sourceforge.net/projects/mysql-python + +.. _mysqldb_unicode: + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + +Py3K Support +------------ + +Currently, MySQLdb only runs on Python 2 and development has been stopped. +`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well +as some bugfixes. + +.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python + +Using MySQLdb with Google Cloud SQL +----------------------------------- + +Google Cloud SQL now recommends use of the MySQLdb dialect. Connect +using a URL like the following:: + + mysql+mysqldb://root@/?unix_socket=/cloudsql/: + +Server Side Cursors +------------------- + +The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`. + +""" + +from .base import (MySQLDialect, MySQLExecutionContext, + MySQLCompiler, MySQLIdentifierPreparer) +from .base import TEXT +from ... import sql +from ... import util +import re + + +class MySQLExecutionContext_mysqldb(MySQLExecutionContext): + + @property + def rowcount(self): + if hasattr(self, '_rowcount'): + return self._rowcount + else: + return self.cursor.rowcount + + +class MySQLCompiler_mysqldb(MySQLCompiler): + def visit_mod_binary(self, binary, operator, **kw): + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + + def post_process_text(self, text): + return text.replace('%', '%%') + + +class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer): + + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + return value.replace("%", "%%") + + +class MySQLDialect_mysqldb(MySQLDialect): + driver = 'mysqldb' + supports_unicode_statements = True + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + + supports_native_decimal = True + + default_paramstyle = 'format' + execution_ctx_cls = MySQLExecutionContext_mysqldb + statement_compiler = MySQLCompiler_mysqldb + preparer = MySQLIdentifierPreparer_mysqldb + + def __init__(self, server_side_cursors=False, **kwargs): + super(MySQLDialect_mysqldb, self).__init__(**kwargs) + self.server_side_cursors = server_side_cursors + + @util.langhelpers.memoized_property + def supports_server_side_cursors(self): + try: + cursors = __import__('MySQLdb.cursors').cursors + self._sscursor = cursors.SSCursor + return True + except (ImportError, AttributeError): + return False + + @classmethod + def dbapi(cls): + return __import__('MySQLdb') + + def do_executemany(self, cursor, statement, parameters, context=None): + rowcount = cursor.executemany(statement, parameters) + if context is not None: + context._rowcount = rowcount + + def _check_unicode_returns(self, connection): + # work around issue fixed in + # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8 + # specific issue w/ the utf8_bin collation and unicode returns + + has_utf8_bin = self.server_version_info > (5, ) and \ + connection.scalar( + "show collation where %s = 'utf8' and %s = 'utf8_bin'" + % ( + self.identifier_preparer.quote("Charset"), + self.identifier_preparer.quote("Collation") + )) + if has_utf8_bin: + additional_tests = [ + sql.collate(sql.cast( + sql.literal_column( + "'test collated returns'"), + TEXT(charset='utf8')), "utf8_bin") + ] + else: + additional_tests = [] + return super(MySQLDialect_mysqldb, self)._check_unicode_returns( + connection, additional_tests) + + def create_connect_args(self, url): + opts = url.translate_connect_args(database='db', username='user', + password='passwd') + opts.update(url.query) + + util.coerce_kw_type(opts, 'compress', bool) + util.coerce_kw_type(opts, 'connect_timeout', int) + util.coerce_kw_type(opts, 'read_timeout', int) + util.coerce_kw_type(opts, 'client_flag', int) + util.coerce_kw_type(opts, 'local_infile', int) + # Note: using either of the below will cause all strings to be + # returned as Unicode, both in raw SQL operations and with column + # types like String and MSString. + util.coerce_kw_type(opts, 'use_unicode', bool) + util.coerce_kw_type(opts, 'charset', str) + + # Rich values 'cursorclass' and 'conv' are not supported via + # query string. + + ssl = {} + keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher'] + for key in keys: + if key in opts: + ssl[key[4:]] = opts[key] + util.coerce_kw_type(ssl, key[4:], str) + del opts[key] + if ssl: + opts['ssl'] = ssl + + # FOUND_ROWS must be set in CLIENT_FLAGS to enable + # supports_sane_rowcount. + client_flag = opts.get('client_flag', 0) + if self.dbapi is not None: + try: + CLIENT_FLAGS = __import__( + self.dbapi.__name__ + '.constants.CLIENT' + ).constants.CLIENT + client_flag |= CLIENT_FLAGS.FOUND_ROWS + except (AttributeError, ImportError): + self.supports_sane_rowcount = False + opts['client_flag'] = client_flag + return [[], opts] + + def _get_server_version_info(self, connection): + dbapi_con = connection.connection + version = [] + r = re.compile(r'[.\-]') + for n in r.split(dbapi_con.get_server_info()): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + + def _extract_error_code(self, exception): + return exception.args[0] + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + + try: + # note: the SQL here would be + # "SHOW VARIABLES LIKE 'character_set%%'" + cset_name = connection.connection.character_set_name + except AttributeError: + util.warn( + "No 'character_set_name' can be detected with " + "this MySQL-Python version; " + "please upgrade to a recent version of MySQL-Python. " + "Assuming latin1.") + return 'latin1' + else: + return cset_name() + + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ', + 'AUTOCOMMIT']) + + def _set_isolation_level(self, connection, level): + if level == 'AUTOCOMMIT': + connection.autocommit(True) + else: + connection.autocommit(False) + super(MySQLDialect_mysqldb, self)._set_isolation_level(connection, + level) + + +dialect = MySQLDialect_mysqldb diff --git a/app/lib/sqlalchemy/dialects/mysql/oursql.py b/app/lib/sqlalchemy/dialects/mysql/oursql.py new file mode 100644 index 0000000..f7f90e9 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/oursql.py @@ -0,0 +1,254 @@ +# mysql/oursql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: mysql+oursql + :name: OurSQL + :dbapi: oursql + :connectstring: mysql+oursql://:@[:]/ + :url: http://packages.python.org/oursql/ + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + + +""" + +import re + +from .base import (BIT, MySQLDialect, MySQLExecutionContext) +from ... import types as sqltypes, util + + +class _oursqlBIT(BIT): + def result_processor(self, dialect, coltype): + """oursql already converts mysql bits, so.""" + + return None + + +class MySQLExecutionContext_oursql(MySQLExecutionContext): + + @property + def plain_query(self): + return self.execution_options.get('_oursql_plain_query', False) + + +class MySQLDialect_oursql(MySQLDialect): + driver = 'oursql' + + if util.py2k: + supports_unicode_binds = True + supports_unicode_statements = True + + supports_native_decimal = True + + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + execution_ctx_cls = MySQLExecutionContext_oursql + + colspecs = util.update_copy( + MySQLDialect.colspecs, + { + sqltypes.Time: sqltypes.Time, + BIT: _oursqlBIT, + } + ) + + @classmethod + def dbapi(cls): + return __import__('oursql') + + def do_execute(self, cursor, statement, parameters, context=None): + """Provide an implementation of + *cursor.execute(statement, parameters)*.""" + + if context and context.plain_query: + cursor.execute(statement, plain_query=True) + else: + cursor.execute(statement, parameters) + + def do_begin(self, connection): + connection.cursor().execute('BEGIN', plain_query=True) + + def _xa_query(self, connection, query, xid): + if util.py2k: + arg = connection.connection._escape_string(xid) + else: + charset = self._connection_charset + arg = connection.connection._escape_string( + xid.encode(charset)).decode(charset) + arg = "'%s'" % arg + connection.execution_options( + _oursql_plain_query=True).execute(query % arg) + + # Because mysql is bad, these methods have to be + # reimplemented to use _PlainQuery. Basically, some queries + # refuse to return any data if they're run through + # the parameterized query API, or refuse to be parameterized + # in the first place. + def do_begin_twophase(self, connection, xid): + self._xa_query(connection, 'XA BEGIN %s', xid) + + def do_prepare_twophase(self, connection, xid): + self._xa_query(connection, 'XA END %s', xid) + self._xa_query(connection, 'XA PREPARE %s', xid) + + def do_rollback_twophase(self, connection, xid, is_prepared=True, + recover=False): + if not is_prepared: + self._xa_query(connection, 'XA END %s', xid) + self._xa_query(connection, 'XA ROLLBACK %s', xid) + + def do_commit_twophase(self, connection, xid, is_prepared=True, + recover=False): + if not is_prepared: + self.do_prepare_twophase(connection, xid) + self._xa_query(connection, 'XA COMMIT %s', xid) + + # Q: why didn't we need all these "plain_query" overrides earlier ? + # am i on a newer/older version of OurSQL ? + def has_table(self, connection, table_name, schema=None): + return MySQLDialect.has_table( + self, + connection.connect().execution_options(_oursql_plain_query=True), + table_name, + schema + ) + + def get_table_options(self, connection, table_name, schema=None, **kw): + return MySQLDialect.get_table_options( + self, + connection.connect().execution_options(_oursql_plain_query=True), + table_name, + schema=schema, + **kw + ) + + def get_columns(self, connection, table_name, schema=None, **kw): + return MySQLDialect.get_columns( + self, + connection.connect().execution_options(_oursql_plain_query=True), + table_name, + schema=schema, + **kw + ) + + def get_view_names(self, connection, schema=None, **kw): + return MySQLDialect.get_view_names( + self, + connection.connect().execution_options(_oursql_plain_query=True), + schema=schema, + **kw + ) + + def get_table_names(self, connection, schema=None, **kw): + return MySQLDialect.get_table_names( + self, + connection.connect().execution_options(_oursql_plain_query=True), + schema + ) + + def get_schema_names(self, connection, **kw): + return MySQLDialect.get_schema_names( + self, + connection.connect().execution_options(_oursql_plain_query=True), + **kw + ) + + def initialize(self, connection): + return MySQLDialect.initialize( + self, + connection.execution_options(_oursql_plain_query=True) + ) + + def _show_create_table(self, connection, table, charset=None, + full_name=None): + return MySQLDialect._show_create_table( + self, + connection.contextual_connect(close_with_result=True). + execution_options(_oursql_plain_query=True), + table, charset, full_name + ) + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, self.dbapi.ProgrammingError): + return e.errno is None and 'cursor' not in e.args[1] \ + and e.args[1].endswith('closed') + else: + return e.errno in (2006, 2013, 2014, 2045, 2055) + + def create_connect_args(self, url): + opts = url.translate_connect_args(database='db', username='user', + password='passwd') + opts.update(url.query) + + util.coerce_kw_type(opts, 'port', int) + util.coerce_kw_type(opts, 'compress', bool) + util.coerce_kw_type(opts, 'autoping', bool) + util.coerce_kw_type(opts, 'raise_on_warnings', bool) + + util.coerce_kw_type(opts, 'default_charset', bool) + if opts.pop('default_charset', False): + opts['charset'] = None + else: + util.coerce_kw_type(opts, 'charset', str) + opts['use_unicode'] = opts.get('use_unicode', True) + util.coerce_kw_type(opts, 'use_unicode', bool) + + # FOUND_ROWS must be set in CLIENT_FLAGS to enable + # supports_sane_rowcount. + opts.setdefault('found_rows', True) + + ssl = {} + for key in ['ssl_ca', 'ssl_key', 'ssl_cert', + 'ssl_capath', 'ssl_cipher']: + if key in opts: + ssl[key[4:]] = opts[key] + util.coerce_kw_type(ssl, key[4:], str) + del opts[key] + if ssl: + opts['ssl'] = ssl + + return [[], opts] + + def _get_server_version_info(self, connection): + dbapi_con = connection.connection + version = [] + r = re.compile(r'[.\-]') + for n in r.split(dbapi_con.server_info): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + + def _extract_error_code(self, exception): + return exception.errno + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + + return connection.connection.charset + + def _compat_fetchall(self, rp, charset=None): + """oursql isn't super-broken like MySQLdb, yaaay.""" + return rp.fetchall() + + def _compat_fetchone(self, rp, charset=None): + """oursql isn't super-broken like MySQLdb, yaaay.""" + return rp.fetchone() + + def _compat_first(self, rp, charset=None): + return rp.first() + + +dialect = MySQLDialect_oursql diff --git a/app/lib/sqlalchemy/dialects/mysql/pymysql.py b/app/lib/sqlalchemy/dialects/mysql/pymysql.py new file mode 100644 index 0000000..b787bc2 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -0,0 +1,70 @@ +# mysql/pymysql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: mysql+pymysql + :name: PyMySQL + :dbapi: pymysql + :connectstring: mysql+pymysql://:@/\ +[?] + :url: http://www.pymysql.org/ + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + +MySQL-Python Compatibility +-------------------------- + +The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, +and targets 100% compatibility. Most behavioral notes for MySQL-python apply +to the pymysql driver as well. + +""" + +from .mysqldb import MySQLDialect_mysqldb +from ...util import langhelpers, py3k + + +class MySQLDialect_pymysql(MySQLDialect_mysqldb): + driver = 'pymysql' + + description_encoding = None + + # generally, these two values should be both True + # or both False. PyMySQL unicode tests pass all the way back + # to 0.4 either way. See [ticket:3337] + supports_unicode_statements = True + supports_unicode_binds = True + + def __init__(self, server_side_cursors=False, **kwargs): + super(MySQLDialect_pymysql, self).__init__(**kwargs) + self.server_side_cursors = server_side_cursors + + @langhelpers.memoized_property + def supports_server_side_cursors(self): + try: + cursors = __import__('pymysql.cursors').cursors + self._sscursor = cursors.SSCursor + return True + except (ImportError, AttributeError): + return False + + @classmethod + def dbapi(cls): + return __import__('pymysql') + + if py3k: + def _extract_error_code(self, exception): + if isinstance(exception.args[0], Exception): + exception = exception.args[0] + return exception.args[0] + +dialect = MySQLDialect_pymysql diff --git a/app/lib/sqlalchemy/dialects/mysql/pyodbc.py b/app/lib/sqlalchemy/dialects/mysql/pyodbc.py new file mode 100644 index 0000000..2ec6edf --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -0,0 +1,79 @@ +# mysql/pyodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + + +.. dialect:: mysql+pyodbc + :name: PyODBC + :dbapi: pyodbc + :connectstring: mysql+pyodbc://:@ + :url: http://pypi.python.org/pypi/pyodbc/ + + .. note:: The PyODBC for MySQL dialect is not well supported, and + is subject to unresolved character encoding issues + which exist within the current ODBC drivers available. + (see http://code.google.com/p/pyodbc/issues/detail?id=25). + Other dialects for MySQL are recommended. + +""" + +from .base import MySQLDialect, MySQLExecutionContext +from ...connectors.pyodbc import PyODBCConnector +from ... import util +import re + + +class MySQLExecutionContext_pyodbc(MySQLExecutionContext): + + def get_lastrowid(self): + cursor = self.create_cursor() + cursor.execute("SELECT LAST_INSERT_ID()") + lastrowid = cursor.fetchone()[0] + cursor.close() + return lastrowid + + +class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect): + supports_unicode_statements = False + execution_ctx_cls = MySQLExecutionContext_pyodbc + + pyodbc_driver_name = "MySQL" + + def __init__(self, **kw): + # deal with http://code.google.com/p/pyodbc/issues/detail?id=25 + kw.setdefault('convert_unicode', True) + super(MySQLDialect_pyodbc, self).__init__(**kw) + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + + # Prefer 'character_set_results' for the current connection over the + # value in the driver. SET NAMES or individual variable SETs will + # change the charset without updating the driver's view of the world. + # + # If it's decided that issuing that sort of SQL leaves you SOL, then + # this can prefer the driver value. + rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") + opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)]) + for key in ('character_set_connection', 'character_set'): + if opts.get(key, None): + return opts[key] + + util.warn("Could not detect the connection character set. " + "Assuming latin1.") + return 'latin1' + + def _extract_error_code(self, exception): + m = re.compile(r"\((\d+)\)").search(str(exception.args)) + c = m.group(1) + if c: + return int(c) + else: + return None + +dialect = MySQLDialect_pyodbc diff --git a/app/lib/sqlalchemy/dialects/mysql/reflection.py b/app/lib/sqlalchemy/dialects/mysql/reflection.py new file mode 100644 index 0000000..f5f09b8 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/reflection.py @@ -0,0 +1,450 @@ +# mysql/reflection.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import re +from ... import log, util +from ... import types as sqltypes +from .enumerated import _EnumeratedValues, SET +from .types import DATETIME, TIME, TIMESTAMP + + +class ReflectedState(object): + """Stores raw information about a SHOW CREATE TABLE statement.""" + + def __init__(self): + self.columns = [] + self.table_options = {} + self.table_name = None + self.keys = [] + self.constraints = [] + + +@log.class_logger +class MySQLTableDefinitionParser(object): + """Parses the results of a SHOW CREATE TABLE statement.""" + + def __init__(self, dialect, preparer): + self.dialect = dialect + self.preparer = preparer + self._prep_regexes() + + def parse(self, show_create, charset): + state = ReflectedState() + state.charset = charset + for line in re.split(r'\r?\n', show_create): + if line.startswith(' ' + self.preparer.initial_quote): + self._parse_column(line, state) + # a regular table options line + elif line.startswith(') '): + self._parse_table_options(line, state) + # an ANSI-mode table options line + elif line == ')': + pass + elif line.startswith('CREATE '): + self._parse_table_name(line, state) + # Not present in real reflection, but may be if + # loading from a file. + elif not line: + pass + else: + type_, spec = self._parse_constraints(line) + if type_ is None: + util.warn("Unknown schema content: %r" % line) + elif type_ == 'key': + state.keys.append(spec) + elif type_ == 'constraint': + state.constraints.append(spec) + else: + pass + return state + + def _parse_constraints(self, line): + """Parse a KEY or CONSTRAINT line. + + :param line: A line of SHOW CREATE TABLE output + """ + + # KEY + m = self._re_key.match(line) + if m: + spec = m.groupdict() + # convert columns into name, length pairs + spec['columns'] = self._parse_keyexprs(spec['columns']) + return 'key', spec + + # CONSTRAINT + m = self._re_constraint.match(line) + if m: + spec = m.groupdict() + spec['table'] = \ + self.preparer.unformat_identifiers(spec['table']) + spec['local'] = [c[0] + for c in self._parse_keyexprs(spec['local'])] + spec['foreign'] = [c[0] + for c in self._parse_keyexprs(spec['foreign'])] + return 'constraint', spec + + # PARTITION and SUBPARTITION + m = self._re_partition.match(line) + if m: + # Punt! + return 'partition', line + + # No match. + return (None, line) + + def _parse_table_name(self, line, state): + """Extract the table name. + + :param line: The first line of SHOW CREATE TABLE + """ + + regex, cleanup = self._pr_name + m = regex.match(line) + if m: + state.table_name = cleanup(m.group('name')) + + def _parse_table_options(self, line, state): + """Build a dictionary of all reflected table-level options. + + :param line: The final line of SHOW CREATE TABLE output. + """ + + options = {} + + if not line or line == ')': + pass + + else: + rest_of_line = line[:] + for regex, cleanup in self._pr_options: + m = regex.search(rest_of_line) + if not m: + continue + directive, value = m.group('directive'), m.group('val') + if cleanup: + value = cleanup(value) + options[directive.lower()] = value + rest_of_line = regex.sub('', rest_of_line) + + for nope in ('auto_increment', 'data directory', 'index directory'): + options.pop(nope, None) + + for opt, val in options.items(): + state.table_options['%s_%s' % (self.dialect.name, opt)] = val + + def _parse_column(self, line, state): + """Extract column details. + + Falls back to a 'minimal support' variant if full parse fails. + + :param line: Any column-bearing line from SHOW CREATE TABLE + """ + + spec = None + m = self._re_column.match(line) + if m: + spec = m.groupdict() + spec['full'] = True + else: + m = self._re_column_loose.match(line) + if m: + spec = m.groupdict() + spec['full'] = False + if not spec: + util.warn("Unknown column definition %r" % line) + return + if not spec['full']: + util.warn("Incomplete reflection of column definition %r" % line) + + name, type_, args = spec['name'], spec['coltype'], spec['arg'] + + try: + col_type = self.dialect.ischema_names[type_] + except KeyError: + util.warn("Did not recognize type '%s' of column '%s'" % + (type_, name)) + col_type = sqltypes.NullType + + # Column type positional arguments eg. varchar(32) + if args is None or args == '': + type_args = [] + elif args[0] == "'" and args[-1] == "'": + type_args = self._re_csv_str.findall(args) + else: + type_args = [int(v) for v in self._re_csv_int.findall(args)] + + # Column type keyword options + type_kw = {} + + if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)): + if type_args: + type_kw['fsp'] = type_args.pop(0) + + for kw in ('unsigned', 'zerofill'): + if spec.get(kw, False): + type_kw[kw] = True + for kw in ('charset', 'collate'): + if spec.get(kw, False): + type_kw[kw] = spec[kw] + if issubclass(col_type, _EnumeratedValues): + type_args = _EnumeratedValues._strip_values(type_args) + + if issubclass(col_type, SET) and '' in type_args: + type_kw['retrieve_as_bitwise'] = True + + type_instance = col_type(*type_args, **type_kw) + + col_kw = {} + + # NOT NULL + col_kw['nullable'] = True + # this can be "NULL" in the case of TIMESTAMP + if spec.get('notnull', False) == 'NOT NULL': + col_kw['nullable'] = False + + # AUTO_INCREMENT + if spec.get('autoincr', False): + col_kw['autoincrement'] = True + elif issubclass(col_type, sqltypes.Integer): + col_kw['autoincrement'] = False + + # DEFAULT + default = spec.get('default', None) + + if default == 'NULL': + # eliminates the need to deal with this later. + default = None + + col_d = dict(name=name, type=type_instance, default=default) + col_d.update(col_kw) + state.columns.append(col_d) + + def _describe_to_create(self, table_name, columns): + """Re-format DESCRIBE output as a SHOW CREATE TABLE string. + + DESCRIBE is a much simpler reflection and is sufficient for + reflecting views for runtime use. This method formats DDL + for columns only- keys are omitted. + + :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples. + SHOW FULL COLUMNS FROM rows must be rearranged for use with + this function. + """ + + buffer = [] + for row in columns: + (name, col_type, nullable, default, extra) = \ + [row[i] for i in (0, 1, 2, 4, 5)] + + line = [' '] + line.append(self.preparer.quote_identifier(name)) + line.append(col_type) + if not nullable: + line.append('NOT NULL') + if default: + if 'auto_increment' in default: + pass + elif (col_type.startswith('timestamp') and + default.startswith('C')): + line.append('DEFAULT') + line.append(default) + elif default == 'NULL': + line.append('DEFAULT') + line.append(default) + else: + line.append('DEFAULT') + line.append("'%s'" % default.replace("'", "''")) + if extra: + line.append(extra) + + buffer.append(' '.join(line)) + + return ''.join([('CREATE TABLE %s (\n' % + self.preparer.quote_identifier(table_name)), + ',\n'.join(buffer), + '\n) ']) + + def _parse_keyexprs(self, identifiers): + """Unpack '"col"(2),"col" ASC'-ish strings into components.""" + + return self._re_keyexprs.findall(identifiers) + + def _prep_regexes(self): + """Pre-compile regular expressions.""" + + self._re_columns = [] + self._pr_options = [] + + _final = self.preparer.final_quote + + quotes = dict(zip(('iq', 'fq', 'esc_fq'), + [re.escape(s) for s in + (self.preparer.initial_quote, + _final, + self.preparer._escape_identifier(_final))])) + + self._pr_name = _pr_compile( + r'^CREATE (?:\w+ +)?TABLE +' + r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes, + self.preparer._unescape_identifier) + + # `col`,`col2`(32),`col3`(15) DESC + # + # Note: ASC and DESC aren't reflected, so we'll punt... + self._re_keyexprs = _re_compile( + r'(?:' + r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)' + r'(?:\((\d+)\))?(?=\,|$))+' % quotes) + + # 'foo' or 'foo','bar' or 'fo,o','ba''a''r' + self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27') + + # 123 or 123,456 + self._re_csv_int = _re_compile(r'\d+') + + # `colname` [type opts] + # (NOT NULL | NULL) + # DEFAULT ('value' | CURRENT_TIMESTAMP...) + # COMMENT 'comment' + # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT) + # STORAGE (DISK|MEMORY) + self._re_column = _re_compile( + r' ' + r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' + r'(?P\w+)' + r'(?:\((?P(?:\d+|\d+,\d+|' + r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?' + r'(?: +(?PUNSIGNED))?' + r'(?: +(?PZEROFILL))?' + r'(?: +CHARACTER SET +(?P[\w_]+))?' + r'(?: +COLLATE +(?P[\w_]+))?' + r'(?: +(?P(?:NOT )?NULL))?' + r'(?: +DEFAULT +(?P' + r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+' + r'(?: +ON UPDATE \w+)?)' + r'))?' + r'(?: +(?PAUTO_INCREMENT))?' + r'(?: +COMMENT +(P(?:\x27\x27|[^\x27])+))?' + r'(?: +COLUMN_FORMAT +(?P\w+))?' + r'(?: +STORAGE +(?P\w+))?' + r'(?: +(?P.*))?' + r',?$' + % quotes + ) + + # Fallback, try to parse as little as possible + self._re_column_loose = _re_compile( + r' ' + r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' + r'(?P\w+)' + r'(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?' + r'.*?(?P(?:NOT )NULL)?' + % quotes + ) + + # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))? + # (`col` (ASC|DESC)?, `col` (ASC|DESC)?) + # KEY_BLOCK_SIZE size | WITH PARSER name + self._re_key = _re_compile( + r' ' + r'(?:(?P\S+) )?KEY' + r'(?: +%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?' + r'(?: +USING +(?P\S+))?' + r' +\((?P.+?)\)' + r'(?: +USING +(?P\S+))?' + r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P\S+))?' + r'(?: +WITH PARSER +(?P\S+))?' + r'(?: +COMMENT +(?P(\x27\x27|\x27([^\x27])*?\x27)+))?' + r',?$' + % quotes + ) + + # CONSTRAINT `name` FOREIGN KEY (`local_col`) + # REFERENCES `remote` (`remote_col`) + # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE + # ON DELETE CASCADE ON UPDATE RESTRICT + # + # unique constraints come back as KEYs + kw = quotes.copy() + kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION' + self._re_constraint = _re_compile( + r' ' + r'CONSTRAINT +' + r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' + r'FOREIGN KEY +' + r'\((?P[^\)]+?)\) REFERENCES +' + r'(?P%(iq)s[^%(fq)s]+%(fq)s' + r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +' + r'\((?P[^\)]+?)\)' + r'(?: +(?PMATCH \w+))?' + r'(?: +ON DELETE (?P%(on)s))?' + r'(?: +ON UPDATE (?P%(on)s))?' + % kw + ) + + # PARTITION + # + # punt! + self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)') + + # Table-level options (COLLATE, ENGINE, etc.) + # Do the string options first, since they have quoted + # strings we need to get rid of. + for option in _options_of_type_string: + self._add_option_string(option) + + for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT', + 'AVG_ROW_LENGTH', 'CHARACTER SET', + 'DEFAULT CHARSET', 'CHECKSUM', + 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD', + 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT', + 'KEY_BLOCK_SIZE'): + self._add_option_word(option) + + self._add_option_regex('UNION', r'\([^\)]+\)') + self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK') + self._add_option_regex( + 'RAID_TYPE', + r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+') + + _optional_equals = r'(?:\s*(?:=\s*)|\s+)' + + def _add_option_string(self, directive): + regex = (r'(?P%s)%s' + r"'(?P(?:[^']|'')*?)'(?!')" % + (re.escape(directive), self._optional_equals)) + self._pr_options.append(_pr_compile( + regex, lambda v: v.replace("\\\\", "\\").replace("''", "'") + )) + + def _add_option_word(self, directive): + regex = (r'(?P%s)%s' + r'(?P\w+)' % + (re.escape(directive), self._optional_equals)) + self._pr_options.append(_pr_compile(regex)) + + def _add_option_regex(self, directive, regex): + regex = (r'(?P%s)%s' + r'(?P%s)' % + (re.escape(directive), self._optional_equals, regex)) + self._pr_options.append(_pr_compile(regex)) + +_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY', + 'PASSWORD', 'CONNECTION') + + +def _pr_compile(regex, cleanup=None): + """Prepare a 2-tuple of compiled regex and callable.""" + + return (_re_compile(regex), cleanup) + + +def _re_compile(regex): + """Compile a string to regex, I and UNICODE.""" + + return re.compile(regex, re.I | re.UNICODE) diff --git a/app/lib/sqlalchemy/dialects/mysql/types.py b/app/lib/sqlalchemy/dialects/mysql/types.py new file mode 100644 index 0000000..cf80d79 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/types.py @@ -0,0 +1,766 @@ +# mysql/types.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import datetime +from ... import exc, util +from ... import types as sqltypes + + +class _NumericType(object): + """Base for MySQL numeric types. + + This is the base both for NUMERIC as well as INTEGER, hence + it's a mixin. + + """ + + def __init__(self, unsigned=False, zerofill=False, **kw): + self.unsigned = unsigned + self.zerofill = zerofill + super(_NumericType, self).__init__(**kw) + + def __repr__(self): + return util.generic_repr(self, + to_inspect=[_NumericType, sqltypes.Numeric]) + + +class _FloatType(_NumericType, sqltypes.Float): + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + if isinstance(self, (REAL, DOUBLE)) and \ + ( + (precision is None and scale is not None) or + (precision is not None and scale is None) + ): + raise exc.ArgumentError( + "You must specify both precision and scale or omit " + "both altogether.") + super(_FloatType, self).__init__( + precision=precision, asdecimal=asdecimal, **kw) + self.scale = scale + + def __repr__(self): + return util.generic_repr(self, to_inspect=[_FloatType, + _NumericType, + sqltypes.Float]) + + +class _IntegerType(_NumericType, sqltypes.Integer): + def __init__(self, display_width=None, **kw): + self.display_width = display_width + super(_IntegerType, self).__init__(**kw) + + def __repr__(self): + return util.generic_repr(self, to_inspect=[_IntegerType, + _NumericType, + sqltypes.Integer]) + + +class _StringType(sqltypes.String): + """Base for MySQL string types.""" + + def __init__(self, charset=None, collation=None, + ascii=False, binary=False, unicode=False, + national=False, **kw): + self.charset = charset + + # allow collate= or collation= + kw.setdefault('collation', kw.pop('collate', collation)) + + self.ascii = ascii + self.unicode = unicode + self.binary = binary + self.national = national + super(_StringType, self).__init__(**kw) + + def __repr__(self): + return util.generic_repr(self, + to_inspect=[_StringType, sqltypes.String]) + + +class _MatchType(sqltypes.Float, sqltypes.MatchType): + def __init__(self, **kw): + # TODO: float arguments? + sqltypes.Float.__init__(self) + sqltypes.MatchType.__init__(self) + + + +class NUMERIC(_NumericType, sqltypes.NUMERIC): + """MySQL NUMERIC type.""" + + __visit_name__ = 'NUMERIC' + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a NUMERIC. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(NUMERIC, self).__init__(precision=precision, + scale=scale, asdecimal=asdecimal, **kw) + + +class DECIMAL(_NumericType, sqltypes.DECIMAL): + """MySQL DECIMAL type.""" + + __visit_name__ = 'DECIMAL' + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a DECIMAL. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(DECIMAL, self).__init__(precision=precision, scale=scale, + asdecimal=asdecimal, **kw) + + +class DOUBLE(_FloatType): + """MySQL DOUBLE type.""" + + __visit_name__ = 'DOUBLE' + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a DOUBLE. + + .. note:: + + The :class:`.DOUBLE` type by default converts from float + to Decimal, using a truncation that defaults to 10 digits. + Specify either ``scale=n`` or ``decimal_return_scale=n`` in order + to change this scale, or ``asdecimal=False`` to return values + directly as Python floating points. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(DOUBLE, self).__init__(precision=precision, scale=scale, + asdecimal=asdecimal, **kw) + + +class REAL(_FloatType, sqltypes.REAL): + """MySQL REAL type.""" + + __visit_name__ = 'REAL' + + def __init__(self, precision=None, scale=None, asdecimal=True, **kw): + """Construct a REAL. + + .. note:: + + The :class:`.REAL` type by default converts from float + to Decimal, using a truncation that defaults to 10 digits. + Specify either ``scale=n`` or ``decimal_return_scale=n`` in order + to change this scale, or ``asdecimal=False`` to return values + directly as Python floating points. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(REAL, self).__init__(precision=precision, scale=scale, + asdecimal=asdecimal, **kw) + + +class FLOAT(_FloatType, sqltypes.FLOAT): + """MySQL FLOAT type.""" + + __visit_name__ = 'FLOAT' + + def __init__(self, precision=None, scale=None, asdecimal=False, **kw): + """Construct a FLOAT. + + :param precision: Total digits in this number. If scale and precision + are both None, values are stored to limits allowed by the server. + + :param scale: The number of digits after the decimal point. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(FLOAT, self).__init__(precision=precision, scale=scale, + asdecimal=asdecimal, **kw) + + def bind_processor(self, dialect): + return None + + +class INTEGER(_IntegerType, sqltypes.INTEGER): + """MySQL INTEGER type.""" + + __visit_name__ = 'INTEGER' + + def __init__(self, display_width=None, **kw): + """Construct an INTEGER. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(INTEGER, self).__init__(display_width=display_width, **kw) + + +class BIGINT(_IntegerType, sqltypes.BIGINT): + """MySQL BIGINTEGER type.""" + + __visit_name__ = 'BIGINT' + + def __init__(self, display_width=None, **kw): + """Construct a BIGINTEGER. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(BIGINT, self).__init__(display_width=display_width, **kw) + + +class MEDIUMINT(_IntegerType): + """MySQL MEDIUMINTEGER type.""" + + __visit_name__ = 'MEDIUMINT' + + def __init__(self, display_width=None, **kw): + """Construct a MEDIUMINTEGER + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(MEDIUMINT, self).__init__(display_width=display_width, **kw) + + +class TINYINT(_IntegerType): + """MySQL TINYINT type.""" + + __visit_name__ = 'TINYINT' + + def __init__(self, display_width=None, **kw): + """Construct a TINYINT. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(TINYINT, self).__init__(display_width=display_width, **kw) + + +class SMALLINT(_IntegerType, sqltypes.SMALLINT): + """MySQL SMALLINTEGER type.""" + + __visit_name__ = 'SMALLINT' + + def __init__(self, display_width=None, **kw): + """Construct a SMALLINTEGER. + + :param display_width: Optional, maximum display width for this number. + + :param unsigned: a boolean, optional. + + :param zerofill: Optional. If true, values will be stored as strings + left-padded with zeros. Note that this does not effect the values + returned by the underlying database API, which continue to be + numeric. + + """ + super(SMALLINT, self).__init__(display_width=display_width, **kw) + + +class BIT(sqltypes.TypeEngine): + """MySQL BIT type. + + This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater + for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a + MSTinyInteger() type. + + """ + + __visit_name__ = 'BIT' + + def __init__(self, length=None): + """Construct a BIT. + + :param length: Optional, number of bits. + + """ + self.length = length + + def result_processor(self, dialect, coltype): + """Convert a MySQL's 64 bit, variable length binary string to a long. + + TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector + already do this, so this logic should be moved to those dialects. + + """ + + def process(value): + if value is not None: + v = 0 + for i in value: + if not isinstance(i, int): + i = ord(i) # convert byte to int on Python 2 + v = v << 8 | i + return v + return value + return process + + +class TIME(sqltypes.TIME): + """MySQL TIME type. """ + + __visit_name__ = 'TIME' + + def __init__(self, timezone=False, fsp=None): + """Construct a MySQL TIME type. + + :param timezone: not used by the MySQL dialect. + :param fsp: fractional seconds precision value. + MySQL 5.6 supports storage of fractional seconds; + this parameter will be used when emitting DDL + for the TIME type. + + .. note:: + + DBAPI driver support for fractional seconds may + be limited; current support includes + MySQL Connector/Python. + + .. versionadded:: 0.8 The MySQL-specific TIME + type as well as fractional seconds support. + + """ + super(TIME, self).__init__(timezone=timezone) + self.fsp = fsp + + def result_processor(self, dialect, coltype): + time = datetime.time + + def process(value): + # convert from a timedelta value + if value is not None: + microseconds = value.microseconds + seconds = value.seconds + minutes = seconds // 60 + return time(minutes // 60, + minutes % 60, + seconds - minutes * 60, + microsecond=microseconds) + else: + return None + return process + + +class TIMESTAMP(sqltypes.TIMESTAMP): + """MySQL TIMESTAMP type. + + """ + + __visit_name__ = 'TIMESTAMP' + + def __init__(self, timezone=False, fsp=None): + """Construct a MySQL TIMESTAMP type. + + :param timezone: not used by the MySQL dialect. + :param fsp: fractional seconds precision value. + MySQL 5.6.4 supports storage of fractional seconds; + this parameter will be used when emitting DDL + for the TIMESTAMP type. + + .. note:: + + DBAPI driver support for fractional seconds may + be limited; current support includes + MySQL Connector/Python. + + .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP` + with fractional seconds support. + + """ + super(TIMESTAMP, self).__init__(timezone=timezone) + self.fsp = fsp + + +class DATETIME(sqltypes.DATETIME): + """MySQL DATETIME type. + + """ + + __visit_name__ = 'DATETIME' + + def __init__(self, timezone=False, fsp=None): + """Construct a MySQL DATETIME type. + + :param timezone: not used by the MySQL dialect. + :param fsp: fractional seconds precision value. + MySQL 5.6.4 supports storage of fractional seconds; + this parameter will be used when emitting DDL + for the DATETIME type. + + .. note:: + + DBAPI driver support for fractional seconds may + be limited; current support includes + MySQL Connector/Python. + + .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME` + with fractional seconds support. + + """ + super(DATETIME, self).__init__(timezone=timezone) + self.fsp = fsp + + +class YEAR(sqltypes.TypeEngine): + """MySQL YEAR type, for single byte storage of years 1901-2155.""" + + __visit_name__ = 'YEAR' + + def __init__(self, display_width=None): + self.display_width = display_width + + +class TEXT(_StringType, sqltypes.TEXT): + """MySQL TEXT type, for text up to 2^16 characters.""" + + __visit_name__ = 'TEXT' + + def __init__(self, length=None, **kw): + """Construct a TEXT. + + :param length: Optional, if provided the server may optimize storage + by substituting the smallest TEXT type sufficient to store + ``length`` characters. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super(TEXT, self).__init__(length=length, **kw) + + +class TINYTEXT(_StringType): + """MySQL TINYTEXT type, for text up to 2^8 characters.""" + + __visit_name__ = 'TINYTEXT' + + def __init__(self, **kwargs): + """Construct a TINYTEXT. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super(TINYTEXT, self).__init__(**kwargs) + + +class MEDIUMTEXT(_StringType): + """MySQL MEDIUMTEXT type, for text up to 2^24 characters.""" + + __visit_name__ = 'MEDIUMTEXT' + + def __init__(self, **kwargs): + """Construct a MEDIUMTEXT. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super(MEDIUMTEXT, self).__init__(**kwargs) + + +class LONGTEXT(_StringType): + """MySQL LONGTEXT type, for text up to 2^32 characters.""" + + __visit_name__ = 'LONGTEXT' + + def __init__(self, **kwargs): + """Construct a LONGTEXT. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super(LONGTEXT, self).__init__(**kwargs) + + +class VARCHAR(_StringType, sqltypes.VARCHAR): + """MySQL VARCHAR type, for variable-length character data.""" + + __visit_name__ = 'VARCHAR' + + def __init__(self, length=None, **kwargs): + """Construct a VARCHAR. + + :param charset: Optional, a column-level character set for this string + value. Takes precedence to 'ascii' or 'unicode' short-hand. + + :param collation: Optional, a column-level collation for this string + value. Takes precedence to 'binary' short-hand. + + :param ascii: Defaults to False: short-hand for the ``latin1`` + character set, generates ASCII in schema. + + :param unicode: Defaults to False: short-hand for the ``ucs2`` + character set, generates UNICODE in schema. + + :param national: Optional. If true, use the server's configured + national character set. + + :param binary: Defaults to False: short-hand, pick the binary + collation type that matches the column's character set. Generates + BINARY in schema. This does not affect the type of data stored, + only the collation of character data. + + """ + super(VARCHAR, self).__init__(length=length, **kwargs) + + +class CHAR(_StringType, sqltypes.CHAR): + """MySQL CHAR type, for fixed-length character data.""" + + __visit_name__ = 'CHAR' + + def __init__(self, length=None, **kwargs): + """Construct a CHAR. + + :param length: Maximum data length, in characters. + + :param binary: Optional, use the default binary collation for the + national character set. This does not affect the type of data + stored, use a BINARY type for binary data. + + :param collation: Optional, request a particular collation. Must be + compatible with the national character set. + + """ + super(CHAR, self).__init__(length=length, **kwargs) + + @classmethod + def _adapt_string_for_cast(self, type_): + # copy the given string type into a CHAR + # for the purposes of rendering a CAST expression + type_ = sqltypes.to_instance(type_) + if isinstance(type_, sqltypes.CHAR): + return type_ + elif isinstance(type_, _StringType): + return CHAR( + length=type_.length, + charset=type_.charset, + collation=type_.collation, + ascii=type_.ascii, + binary=type_.binary, + unicode=type_.unicode, + national=False # not supported in CAST + ) + else: + return CHAR(length=type_.length) + + +class NVARCHAR(_StringType, sqltypes.NVARCHAR): + """MySQL NVARCHAR type. + + For variable-length character data in the server's configured national + character set. + """ + + __visit_name__ = 'NVARCHAR' + + def __init__(self, length=None, **kwargs): + """Construct an NVARCHAR. + + :param length: Maximum data length, in characters. + + :param binary: Optional, use the default binary collation for the + national character set. This does not affect the type of data + stored, use a BINARY type for binary data. + + :param collation: Optional, request a particular collation. Must be + compatible with the national character set. + + """ + kwargs['national'] = True + super(NVARCHAR, self).__init__(length=length, **kwargs) + + +class NCHAR(_StringType, sqltypes.NCHAR): + """MySQL NCHAR type. + + For fixed-length character data in the server's configured national + character set. + """ + + __visit_name__ = 'NCHAR' + + def __init__(self, length=None, **kwargs): + """Construct an NCHAR. + + :param length: Maximum data length, in characters. + + :param binary: Optional, use the default binary collation for the + national character set. This does not affect the type of data + stored, use a BINARY type for binary data. + + :param collation: Optional, request a particular collation. Must be + compatible with the national character set. + + """ + kwargs['national'] = True + super(NCHAR, self).__init__(length=length, **kwargs) + + +class TINYBLOB(sqltypes._Binary): + """MySQL TINYBLOB type, for binary data up to 2^8 bytes.""" + + __visit_name__ = 'TINYBLOB' + + +class MEDIUMBLOB(sqltypes._Binary): + """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes.""" + + __visit_name__ = 'MEDIUMBLOB' + + +class LONGBLOB(sqltypes._Binary): + """MySQL LONGBLOB type, for binary data up to 2^32 bytes.""" + + __visit_name__ = 'LONGBLOB' diff --git a/app/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/app/lib/sqlalchemy/dialects/mysql/zxjdbc.py new file mode 100644 index 0000000..9c92be4 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/mysql/zxjdbc.py @@ -0,0 +1,117 @@ +# mysql/zxjdbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: mysql+zxjdbc + :name: zxjdbc for Jython + :dbapi: zxjdbc + :connectstring: mysql+zxjdbc://:@[:]/\ + + :driverurl: http://dev.mysql.com/downloads/connector/j/ + + .. note:: Jython is not supported by current versions of SQLAlchemy. The + zxjdbc dialect should be considered as experimental. + +Character Sets +-------------- + +SQLAlchemy zxjdbc dialects pass unicode straight through to the +zxjdbc/JDBC layer. To allow multiple character sets to be sent from the +MySQL Connector/J JDBC driver, by default SQLAlchemy sets its +``characterEncoding`` connection property to ``UTF-8``. It may be +overridden via a ``create_engine`` URL parameter. + +""" +import re + +from ... import types as sqltypes, util +from ...connectors.zxJDBC import ZxJDBCConnector +from .base import BIT, MySQLDialect, MySQLExecutionContext + + +class _ZxJDBCBit(BIT): + def result_processor(self, dialect, coltype): + """Converts boolean or byte arrays from MySQL Connector/J to longs.""" + def process(value): + if value is None: + return value + if isinstance(value, bool): + return int(value) + v = 0 + for i in value: + v = v << 8 | (i & 0xff) + value = v + return value + return process + + +class MySQLExecutionContext_zxjdbc(MySQLExecutionContext): + def get_lastrowid(self): + cursor = self.create_cursor() + cursor.execute("SELECT LAST_INSERT_ID()") + lastrowid = cursor.fetchone()[0] + cursor.close() + return lastrowid + + +class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect): + jdbc_db_name = 'mysql' + jdbc_driver_name = 'com.mysql.jdbc.Driver' + + execution_ctx_cls = MySQLExecutionContext_zxjdbc + + colspecs = util.update_copy( + MySQLDialect.colspecs, + { + sqltypes.Time: sqltypes.Time, + BIT: _ZxJDBCBit + } + ) + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + # Prefer 'character_set_results' for the current connection over the + # value in the driver. SET NAMES or individual variable SETs will + # change the charset without updating the driver's view of the world. + # + # If it's decided that issuing that sort of SQL leaves you SOL, then + # this can prefer the driver value. + rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") + opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs)) + for key in ('character_set_connection', 'character_set'): + if opts.get(key, None): + return opts[key] + + util.warn("Could not detect the connection character set. " + "Assuming latin1.") + return 'latin1' + + def _driver_kwargs(self): + """return kw arg dict to be sent to connect().""" + return dict(characterEncoding='UTF-8', yearIsDateType='false') + + def _extract_error_code(self, exception): + # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist + # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' () + m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args)) + c = m.group(1) + if c: + return int(c) + + def _get_server_version_info(self, connection): + dbapi_con = connection.connection + version = [] + r = re.compile(r'[.\-]') + for n in r.split(dbapi_con.dbversion): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + +dialect = MySQLDialect_zxjdbc diff --git a/app/lib/sqlalchemy/dialects/oracle/__init__.py b/app/lib/sqlalchemy/dialects/oracle/__init__.py new file mode 100644 index 0000000..210fe50 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/oracle/__init__.py @@ -0,0 +1,24 @@ +# oracle/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc + +base.dialect = cx_oracle.dialect + +from sqlalchemy.dialects.oracle.base import \ + VARCHAR, NVARCHAR, CHAR, DATE, NUMBER,\ + BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\ + FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\ + VARCHAR2, NVARCHAR2, ROWID, dialect + + +__all__ = ( + 'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER', + 'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW', + 'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL', + 'VARCHAR2', 'NVARCHAR2', 'ROWID' +) diff --git a/app/lib/sqlalchemy/dialects/oracle/base.py b/app/lib/sqlalchemy/dialects/oracle/base.py new file mode 100644 index 0000000..7c23e9c --- /dev/null +++ b/app/lib/sqlalchemy/dialects/oracle/base.py @@ -0,0 +1,1602 @@ +# oracle/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: oracle + :name: Oracle + + Oracle version 8 through current (11g at the time of this writing) are + supported. + +Connect Arguments +----------------- + +The dialect supports several :func:`~sqlalchemy.create_engine()` arguments +which affect the behavior of the dialect regardless of driver in use. + +* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8). + Defaults to ``True``. If ``False``, Oracle-8 compatible constructs are used + for joins. + +* ``optimize_limits`` - defaults to ``False``. see the section on + LIMIT/OFFSET. + +* ``use_binds_for_limits`` - defaults to ``True``. see the section on + LIMIT/OFFSET. + +Auto Increment Behavior +----------------------- + +SQLAlchemy Table objects which include integer primary keys are usually +assumed to have "autoincrementing" behavior, meaning they can generate their +own primary key values upon INSERT. Since Oracle has no "autoincrement" +feature, SQLAlchemy relies upon sequences to produce these values. With the +Oracle dialect, *a sequence must always be explicitly specified to enable +autoincrement*. This is divergent with the majority of documentation +examples which assume the usage of an autoincrement-capable database. To +specify sequences, use the sqlalchemy.schema.Sequence object which is passed +to a Column construct:: + + t = Table('mytable', metadata, + Column('id', Integer, Sequence('id_seq'), primary_key=True), + Column(...), ... + ) + +This step is also required when using table reflection, i.e. autoload=True:: + + t = Table('mytable', metadata, + Column('id', Integer, Sequence('id_seq'), primary_key=True), + autoload=True + ) + +Identifier Casing +----------------- + +In Oracle, the data dictionary represents all case insensitive identifier +names using UPPERCASE text. SQLAlchemy on the other hand considers an +all-lower case identifier name to be case insensitive. The Oracle dialect +converts all case insensitive identifiers to and from those two formats during +schema level communication, such as reflection of tables and indexes. Using +an UPPERCASE name on the SQLAlchemy side indicates a case sensitive +identifier, and SQLAlchemy will quote the name - this will cause mismatches +against data dictionary data received from Oracle, so unless identifier names +have been truly created as case sensitive (i.e. using quoted names), all +lowercase names should be used on the SQLAlchemy side. + + +LIMIT/OFFSET Support +-------------------- + +Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses +a wrapped subquery approach in conjunction with ROWNUM. The exact methodology +is taken from +http://www.oracle.com/technetwork/issue-archive/2006/06-sep/o56asktom-086197.html . + +There are two options which affect its behavior: + +* the "FIRST ROWS()" optimization keyword is not used by default. To enable + the usage of this optimization directive, specify ``optimize_limits=True`` + to :func:`.create_engine`. +* the values passed for the limit/offset are sent as bound parameters. Some + users have observed that Oracle produces a poor query plan when the values + are sent as binds and not rendered literally. To render the limit/offset + values literally within the SQL statement, specify + ``use_binds_for_limits=False`` to :func:`.create_engine`. + +Some users have reported better performance when the entirely different +approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to +provide LIMIT/OFFSET (note that the majority of users don't observe this). +To suit this case the method used for LIMIT/OFFSET can be replaced entirely. +See the recipe at +http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault +which installs a select compiler that overrides the generation of limit/offset +with a window function. + +.. _oracle_returning: + +RETURNING Support +----------------- + +The Oracle database supports a limited form of RETURNING, in order to retrieve +result sets of matched rows from INSERT, UPDATE and DELETE statements. +Oracle's RETURNING..INTO syntax only supports one row being returned, as it +relies upon OUT parameters in order to function. In addition, supported +DBAPIs have further limitations (see :ref:`cx_oracle_returning`). + +SQLAlchemy's "implicit returning" feature, which employs RETURNING within an +INSERT and sometimes an UPDATE statement in order to fetch newly generated +primary key values and other SQL defaults and expressions, is normally enabled +on the Oracle backend. By default, "implicit returning" typically only +fetches the value of a single ``nextval(some_seq)`` expression embedded into +an INSERT in order to increment a sequence within an INSERT statement and get +the value back at the same time. To disable this feature across the board, +specify ``implicit_returning=False`` to :func:`.create_engine`:: + + engine = create_engine("oracle://scott:tiger@dsn", + implicit_returning=False) + +Implicit returning can also be disabled on a table-by-table basis as a table +option:: + + # Core Table + my_table = Table("my_table", metadata, ..., implicit_returning=False) + + + # declarative + class MyClass(Base): + __tablename__ = 'my_table' + __table_args__ = {"implicit_returning": False} + +.. seealso:: + + :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on + implicit returning. + +ON UPDATE CASCADE +----------------- + +Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based +solution is available at +http://asktom.oracle.com/tkyte/update_cascade/index.html . + +When using the SQLAlchemy ORM, the ORM has limited ability to manually issue +cascading updates - specify ForeignKey objects using the +"deferrable=True, initially='deferred'" keyword arguments, +and specify "passive_updates=False" on each relationship(). + +Oracle 8 Compatibility +---------------------- + +When Oracle 8 is detected, the dialect internally configures itself to the +following behaviors: + +* the use_ansi flag is set to False. This has the effect of converting all + JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN + makes use of Oracle's (+) operator. + +* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when + the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are + issued instead. This because these types don't seem to work correctly on + Oracle 8 even though they are available. The + :class:`~sqlalchemy.types.NVARCHAR` and + :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate + NVARCHAR2 and NCLOB. + +* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy + encodes all Python unicode objects to "string" before passing in as bind + parameters. + +Synonym/DBLINK Reflection +------------------------- + +When using reflection with Table objects, the dialect can optionally search +for tables indicated by synonyms, either in local or remote schemas or +accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as +a keyword argument to the :class:`.Table` construct:: + + some_table = Table('some_table', autoload=True, + autoload_with=some_engine, + oracle_resolve_synonyms=True) + +When this flag is set, the given name (such as ``some_table`` above) will +be searched not just in the ``ALL_TABLES`` view, but also within the +``ALL_SYNONYMS`` view to see if this name is actually a synonym to another +name. If the synonym is located and refers to a DBLINK, the oracle dialect +knows how to locate the table's information using DBLINK syntax(e.g. +``@dblink``). + +``oracle_resolve_synonyms`` is accepted wherever reflection arguments are +accepted, including methods such as :meth:`.MetaData.reflect` and +:meth:`.Inspector.get_columns`. + +If synonyms are not in use, this flag should be left disabled. + +Table names with SYSTEM/SYSAUX tablespaces +------------------------------------------- + +The :meth:`.Inspector.get_table_names` and +:meth:`.Inspector.get_temp_table_names` +methods each return a list of table names for the current engine. These methods +are also part of the reflection which occurs within an operation such as +:meth:`.MetaData.reflect`. By default, these operations exclude the ``SYSTEM`` +and ``SYSAUX`` tablespaces from the operation. In order to change this, the +default list of tablespaces excluded can be changed at the engine level using +the ``exclude_tablespaces`` parameter:: + + # exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM + e = create_engine( + "oracle://scott:tiger@xe", + exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"]) + +.. versionadded:: 1.1 + +DateTime Compatibility +---------------------- + +Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``, +which can actually store a date and time value. For this reason, the Oracle +dialect provides a type :class:`.oracle.DATE` which is a subclass of +:class:`.DateTime`. This type has no special behavior, and is only +present as a "marker" for this type; additionally, when a database column +is reflected and the type is reported as ``DATE``, the time-supporting +:class:`.oracle.DATE` type is used. + +.. versionchanged:: 0.9.4 Added :class:`.oracle.DATE` to subclass + :class:`.DateTime`. This is a change as previous versions + would reflect a ``DATE`` column as :class:`.types.DATE`, which subclasses + :class:`.Date`. The only significance here is for schemes that are + examining the type of column for use in special Python translations or + for migrating schemas to other database backends. + +.. _oracle_table_options: + +Oracle Table Options +------------------------- + +The CREATE TABLE phrase supports the following options with Oracle +in conjunction with the :class:`.Table` construct: + + +* ``ON COMMIT``:: + + Table( + "some_table", metadata, ..., + prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS') + +.. versionadded:: 1.0.0 + +* ``COMPRESS``:: + + Table('mytable', metadata, Column('data', String(32)), + oracle_compress=True) + + Table('mytable', metadata, Column('data', String(32)), + oracle_compress=6) + + The ``oracle_compress`` parameter accepts either an integer compression + level, or ``True`` to use the default compression level. + +.. versionadded:: 1.0.0 + +.. _oracle_index_options: + +Oracle Specific Index Options +----------------------------- + +Bitmap Indexes +~~~~~~~~~~~~~~ + +You can specify the ``oracle_bitmap`` parameter to create a bitmap index +instead of a B-tree index:: + + Index('my_index', my_table.c.data, oracle_bitmap=True) + +Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not +check for such limitations, only the database will. + +.. versionadded:: 1.0.0 + +Index compression +~~~~~~~~~~~~~~~~~ + +Oracle has a more efficient storage mode for indexes containing lots of +repeated values. Use the ``oracle_compress`` parameter to turn on key c +ompression:: + + Index('my_index', my_table.c.data, oracle_compress=True) + + Index('my_index', my_table.c.data1, my_table.c.data2, unique=True, + oracle_compress=1) + +The ``oracle_compress`` parameter accepts either an integer specifying the +number of prefix columns to compress, or ``True`` to use the default (all +columns for non-unique indexes, all but the last column for unique indexes). + +.. versionadded:: 1.0.0 + +""" + +import re + +from sqlalchemy import util, sql +from sqlalchemy.engine import default, reflection +from sqlalchemy.sql import compiler, visitors, expression, util as sql_util +from sqlalchemy.sql import operators as sql_operators +from sqlalchemy.sql.elements import quoted_name +from sqlalchemy import types as sqltypes, schema as sa_schema +from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \ + BLOB, CLOB, TIMESTAMP, FLOAT + +RESERVED_WORDS = \ + set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN ' + 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED ' + 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE ' + 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE ' + 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES ' + 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS ' + 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER ' + 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR ' + 'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split()) + +NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER ' + 'CURRENT_TIME CURRENT_TIMESTAMP'.split()) + + +class RAW(sqltypes._Binary): + __visit_name__ = 'RAW' +OracleRaw = RAW + + +class NCLOB(sqltypes.Text): + __visit_name__ = 'NCLOB' + + +class VARCHAR2(VARCHAR): + __visit_name__ = 'VARCHAR2' + +NVARCHAR2 = NVARCHAR + + +class NUMBER(sqltypes.Numeric, sqltypes.Integer): + __visit_name__ = 'NUMBER' + + def __init__(self, precision=None, scale=None, asdecimal=None): + if asdecimal is None: + asdecimal = bool(scale and scale > 0) + + super(NUMBER, self).__init__( + precision=precision, scale=scale, asdecimal=asdecimal) + + def adapt(self, impltype): + ret = super(NUMBER, self).adapt(impltype) + # leave a hint for the DBAPI handler + ret._is_oracle_number = True + return ret + + @property + def _type_affinity(self): + if bool(self.scale and self.scale > 0): + return sqltypes.Numeric + else: + return sqltypes.Integer + + +class DOUBLE_PRECISION(sqltypes.Numeric): + __visit_name__ = 'DOUBLE_PRECISION' + + def __init__(self, precision=None, scale=None, asdecimal=None): + if asdecimal is None: + asdecimal = False + + super(DOUBLE_PRECISION, self).__init__( + precision=precision, scale=scale, asdecimal=asdecimal) + + +class BFILE(sqltypes.LargeBinary): + __visit_name__ = 'BFILE' + + +class LONG(sqltypes.Text): + __visit_name__ = 'LONG' + + +class DATE(sqltypes.DateTime): + """Provide the oracle DATE type. + + This type has no special Python behavior, except that it subclasses + :class:`.types.DateTime`; this is to suit the fact that the Oracle + ``DATE`` type supports a time value. + + .. versionadded:: 0.9.4 + + """ + __visit_name__ = 'DATE' + + def _compare_type_affinity(self, other): + return other._type_affinity in (sqltypes.DateTime, sqltypes.Date) + + +class INTERVAL(sqltypes.TypeEngine): + __visit_name__ = 'INTERVAL' + + def __init__(self, + day_precision=None, + second_precision=None): + """Construct an INTERVAL. + + Note that only DAY TO SECOND intervals are currently supported. + This is due to a lack of support for YEAR TO MONTH intervals + within available DBAPIs (cx_oracle and zxjdbc). + + :param day_precision: the day precision value. this is the number of + digits to store for the day field. Defaults to "2" + :param second_precision: the second precision value. this is the + number of digits to store for the fractional seconds field. + Defaults to "6". + + """ + self.day_precision = day_precision + self.second_precision = second_precision + + @classmethod + def _adapt_from_generic_interval(cls, interval): + return INTERVAL(day_precision=interval.day_precision, + second_precision=interval.second_precision) + + @property + def _type_affinity(self): + return sqltypes.Interval + + +class ROWID(sqltypes.TypeEngine): + """Oracle ROWID type. + + When used in a cast() or similar, generates ROWID. + + """ + __visit_name__ = 'ROWID' + + +class _OracleBoolean(sqltypes.Boolean): + def get_dbapi_type(self, dbapi): + return dbapi.NUMBER + +colspecs = { + sqltypes.Boolean: _OracleBoolean, + sqltypes.Interval: INTERVAL, + sqltypes.DateTime: DATE +} + +ischema_names = { + 'VARCHAR2': VARCHAR, + 'NVARCHAR2': NVARCHAR, + 'CHAR': CHAR, + 'DATE': DATE, + 'NUMBER': NUMBER, + 'BLOB': BLOB, + 'BFILE': BFILE, + 'CLOB': CLOB, + 'NCLOB': NCLOB, + 'TIMESTAMP': TIMESTAMP, + 'TIMESTAMP WITH TIME ZONE': TIMESTAMP, + 'INTERVAL DAY TO SECOND': INTERVAL, + 'RAW': RAW, + 'FLOAT': FLOAT, + 'DOUBLE PRECISION': DOUBLE_PRECISION, + 'LONG': LONG, +} + + +class OracleTypeCompiler(compiler.GenericTypeCompiler): + # Note: + # Oracle DATE == DATETIME + # Oracle does not allow milliseconds in DATE + # Oracle does not support TIME columns + + def visit_datetime(self, type_, **kw): + return self.visit_DATE(type_, **kw) + + def visit_float(self, type_, **kw): + return self.visit_FLOAT(type_, **kw) + + def visit_unicode(self, type_, **kw): + if self.dialect._supports_nchar: + return self.visit_NVARCHAR2(type_, **kw) + else: + return self.visit_VARCHAR2(type_, **kw) + + def visit_INTERVAL(self, type_, **kw): + return "INTERVAL DAY%s TO SECOND%s" % ( + type_.day_precision is not None and + "(%d)" % type_.day_precision or + "", + type_.second_precision is not None and + "(%d)" % type_.second_precision or + "", + ) + + def visit_LONG(self, type_, **kw): + return "LONG" + + def visit_TIMESTAMP(self, type_, **kw): + if type_.timezone: + return "TIMESTAMP WITH TIME ZONE" + else: + return "TIMESTAMP" + + def visit_DOUBLE_PRECISION(self, type_, **kw): + return self._generate_numeric(type_, "DOUBLE PRECISION", **kw) + + def visit_NUMBER(self, type_, **kw): + return self._generate_numeric(type_, "NUMBER", **kw) + + def _generate_numeric(self, type_, name, precision=None, scale=None, **kw): + if precision is None: + precision = type_.precision + + if scale is None: + scale = getattr(type_, 'scale', None) + + if precision is None: + return name + elif scale is None: + n = "%(name)s(%(precision)s)" + return n % {'name': name, 'precision': precision} + else: + n = "%(name)s(%(precision)s, %(scale)s)" + return n % {'name': name, 'precision': precision, 'scale': scale} + + def visit_string(self, type_, **kw): + return self.visit_VARCHAR2(type_, **kw) + + def visit_VARCHAR2(self, type_, **kw): + return self._visit_varchar(type_, '', '2') + + def visit_NVARCHAR2(self, type_, **kw): + return self._visit_varchar(type_, 'N', '2') + visit_NVARCHAR = visit_NVARCHAR2 + + def visit_VARCHAR(self, type_, **kw): + return self._visit_varchar(type_, '', '') + + def _visit_varchar(self, type_, n, num): + if not type_.length: + return "%(n)sVARCHAR%(two)s" % {'two': num, 'n': n} + elif not n and self.dialect._supports_char_length: + varchar = "VARCHAR%(two)s(%(length)s CHAR)" + return varchar % {'length': type_.length, 'two': num} + else: + varchar = "%(n)sVARCHAR%(two)s(%(length)s)" + return varchar % {'length': type_.length, 'two': num, 'n': n} + + def visit_text(self, type_, **kw): + return self.visit_CLOB(type_, **kw) + + def visit_unicode_text(self, type_, **kw): + if self.dialect._supports_nchar: + return self.visit_NCLOB(type_, **kw) + else: + return self.visit_CLOB(type_, **kw) + + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_, **kw) + + def visit_big_integer(self, type_, **kw): + return self.visit_NUMBER(type_, precision=19, **kw) + + def visit_boolean(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) + + def visit_RAW(self, type_, **kw): + if type_.length: + return "RAW(%(length)s)" % {'length': type_.length} + else: + return "RAW" + + def visit_ROWID(self, type_, **kw): + return "ROWID" + + +class OracleCompiler(compiler.SQLCompiler): + """Oracle compiler modifies the lexical structure of Select + statements to work under non-ANSI configured Oracle databases, if + the use_ansi flag is False. + """ + + compound_keywords = util.update_copy( + compiler.SQLCompiler.compound_keywords, + { + expression.CompoundSelect.EXCEPT: 'MINUS' + } + ) + + def __init__(self, *args, **kwargs): + self.__wheres = {} + self._quoted_bind_names = {} + super(OracleCompiler, self).__init__(*args, **kwargs) + + def visit_mod_binary(self, binary, operator, **kw): + return "mod(%s, %s)" % (self.process(binary.left, **kw), + self.process(binary.right, **kw)) + + def visit_now_func(self, fn, **kw): + return "CURRENT_TIMESTAMP" + + def visit_char_length_func(self, fn, **kw): + return "LENGTH" + self.function_argspec(fn, **kw) + + def visit_match_op_binary(self, binary, operator, **kw): + return "CONTAINS (%s, %s)" % (self.process(binary.left), + self.process(binary.right)) + + def visit_true(self, expr, **kw): + return '1' + + def visit_false(self, expr, **kw): + return '0' + + def get_cte_preamble(self, recursive): + return "WITH" + + def get_select_hint_text(self, byfroms): + return " ".join( + "/*+ %s */" % text for table, text in byfroms.items() + ) + + def function_argspec(self, fn, **kw): + if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS: + return compiler.SQLCompiler.function_argspec(self, fn, **kw) + else: + return "" + + def default_from(self): + """Called when a ``SELECT`` statement has no froms, + and no ``FROM`` clause is to be appended. + + The Oracle compiler tacks a "FROM DUAL" to the statement. + """ + + return " FROM DUAL" + + def visit_join(self, join, **kwargs): + if self.dialect.use_ansi: + return compiler.SQLCompiler.visit_join(self, join, **kwargs) + else: + kwargs['asfrom'] = True + if isinstance(join.right, expression.FromGrouping): + right = join.right.element + else: + right = join.right + return self.process(join.left, **kwargs) + \ + ", " + self.process(right, **kwargs) + + def _get_nonansi_join_whereclause(self, froms): + clauses = [] + + def visit_join(join): + if join.isouter: + def visit_binary(binary): + if binary.operator == sql_operators.eq: + if join.right.is_derived_from(binary.left.table): + binary.left = _OuterJoinColumn(binary.left) + elif join.right.is_derived_from(binary.right.table): + binary.right = _OuterJoinColumn(binary.right) + clauses.append(visitors.cloned_traverse( + join.onclause, {}, {'binary': visit_binary})) + else: + clauses.append(join.onclause) + + for j in join.left, join.right: + if isinstance(j, expression.Join): + visit_join(j) + elif isinstance(j, expression.FromGrouping): + visit_join(j.element) + + for f in froms: + if isinstance(f, expression.Join): + visit_join(f) + + if not clauses: + return None + else: + return sql.and_(*clauses) + + def visit_outer_join_column(self, vc, **kw): + return self.process(vc.column, **kw) + "(+)" + + def visit_sequence(self, seq): + return (self.dialect.identifier_preparer.format_sequence(seq) + + ".nextval") + + def get_render_as_alias_suffix(self, alias_name_text): + """Oracle doesn't like ``FROM table AS alias``""" + + return " " + alias_name_text + + def returning_clause(self, stmt, returning_cols): + columns = [] + binds = [] + for i, column in enumerate( + expression._select_iterables(returning_cols)): + if column.type._has_column_expression: + col_expr = column.type.column_expression(column) + else: + col_expr = column + outparam = sql.outparam("ret_%d" % i, type_=column.type) + self.binds[outparam.key] = outparam + binds.append( + self.bindparam_string(self._truncate_bindparam(outparam))) + columns.append( + self.process(col_expr, within_columns_clause=False)) + + self._add_to_result_map( + outparam.key, outparam.key, + (column, getattr(column, 'name', None), + getattr(column, 'key', None)), + column.type + ) + + return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) + + def _TODO_visit_compound_select(self, select): + """Need to determine how to get ``LIMIT``/``OFFSET`` into a + ``UNION`` for Oracle. + """ + pass + + def visit_select(self, select, **kwargs): + """Look for ``LIMIT`` and OFFSET in a select statement, and if + so tries to wrap it in a subquery with ``rownum`` criterion. + """ + + if not getattr(select, '_oracle_visit', None): + if not self.dialect.use_ansi: + froms = self._display_froms_for_select( + select, kwargs.get('asfrom', False)) + whereclause = self._get_nonansi_join_whereclause(froms) + if whereclause is not None: + select = select.where(whereclause) + select._oracle_visit = True + + limit_clause = select._limit_clause + offset_clause = select._offset_clause + if limit_clause is not None or offset_clause is not None: + # See http://www.oracle.com/technology/oramag/oracle/06-sep/\ + # o56asktom.html + # + # Generalized form of an Oracle pagination query: + # select ... from ( + # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from + # ( select distinct ... where ... order by ... + # ) where ROWNUM <= :limit+:offset + # ) where ora_rn > :offset + # Outer select and "ROWNUM as ora_rn" can be dropped if + # limit=0 + + kwargs['select_wraps_for'] = select + select = select._generate() + select._oracle_visit = True + + # Wrap the middle select and add the hint + limitselect = sql.select([c for c in select.c]) + if limit_clause is not None and \ + self.dialect.optimize_limits and \ + select._simple_int_limit: + limitselect = limitselect.prefix_with( + "/*+ FIRST_ROWS(%d) */" % + select._limit) + + limitselect._oracle_visit = True + limitselect._is_wrapper = True + + # add expressions to accommodate FOR UPDATE OF + for_update = select._for_update_arg + if for_update is not None and for_update.of: + for_update = for_update._clone() + for_update._copy_internals() + + for elem in for_update.of: + select.append_column(elem) + + adapter = sql_util.ClauseAdapter(select) + for_update.of = [ + adapter.traverse(elem) + for elem in for_update.of] + + # If needed, add the limiting clause + if limit_clause is not None: + if not self.dialect.use_binds_for_limits: + # use simple int limits, will raise an exception + # if the limit isn't specified this way + max_row = select._limit + + if offset_clause is not None: + max_row += select._offset + max_row = sql.literal_column("%d" % max_row) + else: + max_row = limit_clause + if offset_clause is not None: + max_row = max_row + offset_clause + limitselect.append_whereclause( + sql.literal_column("ROWNUM") <= max_row) + + # If needed, add the ora_rn, and wrap again with offset. + if offset_clause is None: + limitselect._for_update_arg = for_update + select = limitselect + else: + limitselect = limitselect.column( + sql.literal_column("ROWNUM").label("ora_rn")) + limitselect._oracle_visit = True + limitselect._is_wrapper = True + + offsetselect = sql.select( + [c for c in limitselect.c if c.key != 'ora_rn']) + offsetselect._oracle_visit = True + offsetselect._is_wrapper = True + + if for_update is not None and for_update.of: + for elem in for_update.of: + if limitselect.corresponding_column(elem) is None: + limitselect.append_column(elem) + + if not self.dialect.use_binds_for_limits: + offset_clause = sql.literal_column( + "%d" % select._offset) + offsetselect.append_whereclause( + sql.literal_column("ora_rn") > offset_clause) + + offsetselect._for_update_arg = for_update + select = offsetselect + + return compiler.SQLCompiler.visit_select(self, select, **kwargs) + + def limit_clause(self, select, **kw): + return "" + + def for_update_clause(self, select, **kw): + if self.is_subquery(): + return "" + + tmp = ' FOR UPDATE' + + if select._for_update_arg.of: + tmp += ' OF ' + ', '.join( + self.process(elem, **kw) for elem in + select._for_update_arg.of + ) + + if select._for_update_arg.nowait: + tmp += " NOWAIT" + if select._for_update_arg.skip_locked: + tmp += " SKIP LOCKED" + + return tmp + + +class OracleDDLCompiler(compiler.DDLCompiler): + + def define_constraint_cascades(self, constraint): + text = "" + if constraint.ondelete is not None: + text += " ON DELETE %s" % constraint.ondelete + + # oracle has no ON UPDATE CASCADE - + # its only available via triggers + # http://asktom.oracle.com/tkyte/update_cascade/index.html + if constraint.onupdate is not None: + util.warn( + "Oracle does not contain native UPDATE CASCADE " + "functionality - onupdates will not be rendered for foreign " + "keys. Consider using deferrable=True, initially='deferred' " + "or triggers.") + + return text + + def visit_create_index(self, create): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + if index.dialect_options['oracle']['bitmap']: + text += "BITMAP " + text += "INDEX %s ON %s (%s)" % ( + self._prepared_index_name(index, include_schema=True), + preparer.format_table(index.table, use_schema=True), + ', '.join( + self.sql_compiler.process( + expr, + include_table=False, literal_binds=True) + for expr in index.expressions) + ) + if index.dialect_options['oracle']['compress'] is not False: + if index.dialect_options['oracle']['compress'] is True: + text += " COMPRESS" + else: + text += " COMPRESS %d" % ( + index.dialect_options['oracle']['compress'] + ) + return text + + def post_create_table(self, table): + table_opts = [] + opts = table.dialect_options['oracle'] + + if opts['on_commit']: + on_commit_options = opts['on_commit'].replace("_", " ").upper() + table_opts.append('\n ON COMMIT %s' % on_commit_options) + + if opts['compress']: + if opts['compress'] is True: + table_opts.append("\n COMPRESS") + else: + table_opts.append("\n COMPRESS FOR %s" % ( + opts['compress'] + )) + + return ''.join(table_opts) + + +class OracleIdentifierPreparer(compiler.IdentifierPreparer): + + reserved_words = set([x.lower() for x in RESERVED_WORDS]) + illegal_initial_characters = set( + (str(dig) for dig in range(0, 10))).union(["_", "$"]) + + def _bindparam_requires_quotes(self, value): + """Return True if the given identifier requires quoting.""" + lc_value = value.lower() + return (lc_value in self.reserved_words + or value[0] in self.illegal_initial_characters + or not self.legal_characters.match(util.text_type(value)) + ) + + def format_savepoint(self, savepoint): + name = savepoint.ident.lstrip('_') + return super( + OracleIdentifierPreparer, self).format_savepoint(savepoint, name) + + +class OracleExecutionContext(default.DefaultExecutionContext): + def fire_sequence(self, seq, type_): + return self._execute_scalar( + "SELECT " + + self.dialect.identifier_preparer.format_sequence(seq) + + ".nextval FROM DUAL", type_) + + +class OracleDialect(default.DefaultDialect): + name = 'oracle' + supports_alter = True + supports_unicode_statements = False + supports_unicode_binds = False + max_identifier_length = 30 + supports_sane_rowcount = True + supports_sane_multi_rowcount = False + + supports_simple_order_by_label = False + + supports_sequences = True + sequences_optional = False + postfetch_lastrowid = False + + default_paramstyle = 'named' + colspecs = colspecs + ischema_names = ischema_names + requires_name_normalize = True + + supports_default_values = False + supports_empty_insert = False + + statement_compiler = OracleCompiler + ddl_compiler = OracleDDLCompiler + type_compiler = OracleTypeCompiler + preparer = OracleIdentifierPreparer + execution_ctx_cls = OracleExecutionContext + + reflection_options = ('oracle_resolve_synonyms', ) + + construct_arguments = [ + (sa_schema.Table, { + "resolve_synonyms": False, + "on_commit": None, + "compress": False + }), + (sa_schema.Index, { + "bitmap": False, + "compress": False + }) + ] + + def __init__(self, + use_ansi=True, + optimize_limits=False, + use_binds_for_limits=True, + exclude_tablespaces=('SYSTEM', 'SYSAUX', ), + **kwargs): + default.DefaultDialect.__init__(self, **kwargs) + self.use_ansi = use_ansi + self.optimize_limits = optimize_limits + self.use_binds_for_limits = use_binds_for_limits + self.exclude_tablespaces = exclude_tablespaces + + def initialize(self, connection): + super(OracleDialect, self).initialize(connection) + self.implicit_returning = self.__dict__.get( + 'implicit_returning', + self.server_version_info > (10, ) + ) + + if self._is_oracle_8: + self.colspecs = self.colspecs.copy() + self.colspecs.pop(sqltypes.Interval) + self.use_ansi = False + + @property + def _is_oracle_8(self): + return self.server_version_info and \ + self.server_version_info < (9, ) + + @property + def _supports_table_compression(self): + return self.server_version_info and \ + self.server_version_info >= (10, 1, ) + + @property + def _supports_table_compress_for(self): + return self.server_version_info and \ + self.server_version_info >= (11, ) + + @property + def _supports_char_length(self): + return not self._is_oracle_8 + + @property + def _supports_nchar(self): + return not self._is_oracle_8 + + def do_release_savepoint(self, connection, name): + # Oracle does not support RELEASE SAVEPOINT + pass + + def has_table(self, connection, table_name, schema=None): + if not schema: + schema = self.default_schema_name + cursor = connection.execute( + sql.text("SELECT table_name FROM all_tables " + "WHERE table_name = :name AND owner = :schema_name"), + name=self.denormalize_name(table_name), + schema_name=self.denormalize_name(schema)) + return cursor.first() is not None + + def has_sequence(self, connection, sequence_name, schema=None): + if not schema: + schema = self.default_schema_name + cursor = connection.execute( + sql.text("SELECT sequence_name FROM all_sequences " + "WHERE sequence_name = :name AND " + "sequence_owner = :schema_name"), + name=self.denormalize_name(sequence_name), + schema_name=self.denormalize_name(schema)) + return cursor.first() is not None + + def normalize_name(self, name): + if name is None: + return None + if util.py2k: + if isinstance(name, str): + name = name.decode(self.encoding) + if name.upper() == name and not \ + self.identifier_preparer._requires_quotes(name.lower()): + return name.lower() + elif name.lower() == name: + return quoted_name(name, quote=True) + else: + return name + + def denormalize_name(self, name): + if name is None: + return None + elif name.lower() == name and not \ + self.identifier_preparer._requires_quotes(name.lower()): + name = name.upper() + if util.py2k: + if not self.supports_unicode_binds: + name = name.encode(self.encoding) + else: + name = unicode(name) + return name + + def _get_default_schema_name(self, connection): + return self.normalize_name( + connection.execute('SELECT USER FROM DUAL').scalar()) + + def _resolve_synonym(self, connection, desired_owner=None, + desired_synonym=None, desired_table=None): + """search for a local synonym matching the given desired owner/name. + + if desired_owner is None, attempts to locate a distinct owner. + + returns the actual name, owner, dblink name, and synonym name if + found. + """ + + q = "SELECT owner, table_owner, table_name, db_link, "\ + "synonym_name FROM all_synonyms WHERE " + clauses = [] + params = {} + if desired_synonym: + clauses.append("synonym_name = :synonym_name") + params['synonym_name'] = desired_synonym + if desired_owner: + clauses.append("owner = :desired_owner") + params['desired_owner'] = desired_owner + if desired_table: + clauses.append("table_name = :tname") + params['tname'] = desired_table + + q += " AND ".join(clauses) + + result = connection.execute(sql.text(q), **params) + if desired_owner: + row = result.first() + if row: + return (row['table_name'], row['table_owner'], + row['db_link'], row['synonym_name']) + else: + return None, None, None, None + else: + rows = result.fetchall() + if len(rows) > 1: + raise AssertionError( + "There are multiple tables visible to the schema, you " + "must specify owner") + elif len(rows) == 1: + row = rows[0] + return (row['table_name'], row['table_owner'], + row['db_link'], row['synonym_name']) + else: + return None, None, None, None + + @reflection.cache + def _prepare_reflection_args(self, connection, table_name, schema=None, + resolve_synonyms=False, dblink='', **kw): + + if resolve_synonyms: + actual_name, owner, dblink, synonym = self._resolve_synonym( + connection, + desired_owner=self.denormalize_name(schema), + desired_synonym=self.denormalize_name(table_name) + ) + else: + actual_name, owner, dblink, synonym = None, None, None, None + if not actual_name: + actual_name = self.denormalize_name(table_name) + + if dblink: + # using user_db_links here since all_db_links appears + # to have more restricted permissions. + # http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm + # will need to hear from more users if we are doing + # the right thing here. See [ticket:2619] + owner = connection.scalar( + sql.text("SELECT username FROM user_db_links " + "WHERE db_link=:link"), link=dblink) + dblink = "@" + dblink + elif not owner: + owner = self.denormalize_name(schema or self.default_schema_name) + + return (actual_name, owner, dblink or '', synonym) + + @reflection.cache + def get_schema_names(self, connection, **kw): + s = "SELECT username FROM all_users ORDER BY username" + cursor = connection.execute(s,) + return [self.normalize_name(row[0]) for row in cursor] + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + schema = self.denormalize_name(schema or self.default_schema_name) + + # note that table_names() isn't loading DBLINKed or synonym'ed tables + if schema is None: + schema = self.default_schema_name + + sql_str = "SELECT table_name FROM all_tables WHERE " + if self.exclude_tablespaces: + sql_str += ( + "nvl(tablespace_name, 'no tablespace') " + "NOT IN (%s) AND " % ( + ', '.join(["'%s'" % ts for ts in self.exclude_tablespaces]) + ) + ) + sql_str += ( + "OWNER = :owner " + "AND IOT_NAME IS NULL " + "AND DURATION IS NULL") + + cursor = connection.execute(sql.text(sql_str), owner=schema) + return [self.normalize_name(row[0]) for row in cursor] + + @reflection.cache + def get_temp_table_names(self, connection, **kw): + schema = self.denormalize_name(self.default_schema_name) + + sql_str = "SELECT table_name FROM all_tables WHERE " + if self.exclude_tablespaces: + sql_str += ( + "nvl(tablespace_name, 'no tablespace') " + "NOT IN (%s) AND " % ( + ', '.join(["'%s'" % ts for ts in self.exclude_tablespaces]) + ) + ) + sql_str += ( + "OWNER = :owner " + "AND IOT_NAME IS NULL " + "AND DURATION IS NOT NULL") + + cursor = connection.execute(sql.text(sql_str), owner=schema) + return [self.normalize_name(row[0]) for row in cursor] + + @reflection.cache + def get_view_names(self, connection, schema=None, **kw): + schema = self.denormalize_name(schema or self.default_schema_name) + s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner") + cursor = connection.execute(s, owner=self.denormalize_name(schema)) + return [self.normalize_name(row[0]) for row in cursor] + + @reflection.cache + def get_table_options(self, connection, table_name, schema=None, **kw): + options = {} + + resolve_synonyms = kw.get('oracle_resolve_synonyms', False) + dblink = kw.get('dblink', '') + info_cache = kw.get('info_cache') + + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + + params = {"table_name": table_name} + + columns = ["table_name"] + if self._supports_table_compression: + columns.append("compression") + if self._supports_table_compress_for: + columns.append("compress_for") + + text = "SELECT %(columns)s "\ + "FROM ALL_TABLES%(dblink)s "\ + "WHERE table_name = :table_name" + + if schema is not None: + params['owner'] = schema + text += " AND owner = :owner " + text = text % {'dblink': dblink, 'columns': ", ".join(columns)} + + result = connection.execute(sql.text(text), **params) + + enabled = dict(DISABLED=False, ENABLED=True) + + row = result.first() + if row: + if "compression" in row and enabled.get(row.compression, False): + if "compress_for" in row: + options['oracle_compress'] = row.compress_for + else: + options['oracle_compress'] = True + + return options + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + """ + + kw arguments can be: + + oracle_resolve_synonyms + + dblink + + """ + + resolve_synonyms = kw.get('oracle_resolve_synonyms', False) + dblink = kw.get('dblink', '') + info_cache = kw.get('info_cache') + + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + columns = [] + if self._supports_char_length: + char_length_col = 'char_length' + else: + char_length_col = 'data_length' + + params = {"table_name": table_name} + text = "SELECT column_name, data_type, %(char_length_col)s, "\ + "data_precision, data_scale, "\ + "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\ + "WHERE table_name = :table_name" + if schema is not None: + params['owner'] = schema + text += " AND owner = :owner " + text += " ORDER BY column_id" + text = text % {'dblink': dblink, 'char_length_col': char_length_col} + + c = connection.execute(sql.text(text), **params) + + for row in c: + (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \ + (self.normalize_name(row[0]), row[0], row[1], row[ + 2], row[3], row[4], row[5] == 'Y', row[6]) + + if coltype == 'NUMBER': + coltype = NUMBER(precision, scale) + elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'): + coltype = self.ischema_names.get(coltype)(length) + elif 'WITH TIME ZONE' in coltype: + coltype = TIMESTAMP(timezone=True) + else: + coltype = re.sub(r'\(\d+\)', '', coltype) + try: + coltype = self.ischema_names[coltype] + except KeyError: + util.warn("Did not recognize type '%s' of column '%s'" % + (coltype, colname)) + coltype = sqltypes.NULLTYPE + + cdict = { + 'name': colname, + 'type': coltype, + 'nullable': nullable, + 'default': default, + 'autoincrement': 'auto', + } + if orig_colname.lower() == orig_colname: + cdict['quote'] = True + + columns.append(cdict) + return columns + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, + resolve_synonyms=False, dblink='', **kw): + + info_cache = kw.get('info_cache') + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + indexes = [] + + params = {'table_name': table_name} + text = \ + "SELECT a.index_name, a.column_name, "\ + "\nb.index_type, b.uniqueness, b.compression, b.prefix_length "\ + "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\ + "\nALL_INDEXES%(dblink)s b "\ + "\nWHERE "\ + "\na.index_name = b.index_name "\ + "\nAND a.table_owner = b.table_owner "\ + "\nAND a.table_name = b.table_name "\ + "\nAND a.table_name = :table_name " + + if schema is not None: + params['schema'] = schema + text += "AND a.table_owner = :schema " + + text += "ORDER BY a.index_name, a.column_position" + + text = text % {'dblink': dblink} + + q = sql.text(text) + rp = connection.execute(q, **params) + indexes = [] + last_index_name = None + pk_constraint = self.get_pk_constraint( + connection, table_name, schema, resolve_synonyms=resolve_synonyms, + dblink=dblink, info_cache=kw.get('info_cache')) + pkeys = pk_constraint['constrained_columns'] + uniqueness = dict(NONUNIQUE=False, UNIQUE=True) + enabled = dict(DISABLED=False, ENABLED=True) + + oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE) + + def upper_name_set(names): + return set([i.upper() for i in names]) + + pk_names = upper_name_set(pkeys) + + def remove_if_primary_key(index): + # don't include the primary key index + if index is not None and \ + upper_name_set(index['column_names']) == pk_names: + indexes.pop() + + index = None + for rset in rp: + if rset.index_name != last_index_name: + remove_if_primary_key(index) + index = dict(name=self.normalize_name(rset.index_name), + column_names=[], dialect_options={}) + indexes.append(index) + index['unique'] = uniqueness.get(rset.uniqueness, False) + + if rset.index_type in ('BITMAP', 'FUNCTION-BASED BITMAP'): + index['dialect_options']['oracle_bitmap'] = True + if enabled.get(rset.compression, False): + index['dialect_options']['oracle_compress'] = rset.prefix_length + + # filter out Oracle SYS_NC names. could also do an outer join + # to the all_tab_columns table and check for real col names there. + if not oracle_sys_col.match(rset.column_name): + index['column_names'].append( + self.normalize_name(rset.column_name)) + last_index_name = rset.index_name + remove_if_primary_key(index) + return indexes + + @reflection.cache + def _get_constraint_data(self, connection, table_name, schema=None, + dblink='', **kw): + + params = {'table_name': table_name} + + text = \ + "SELECT"\ + "\nac.constraint_name,"\ + "\nac.constraint_type,"\ + "\nloc.column_name AS local_column,"\ + "\nrem.table_name AS remote_table,"\ + "\nrem.column_name AS remote_column,"\ + "\nrem.owner AS remote_owner,"\ + "\nloc.position as loc_pos,"\ + "\nrem.position as rem_pos"\ + "\nFROM all_constraints%(dblink)s ac,"\ + "\nall_cons_columns%(dblink)s loc,"\ + "\nall_cons_columns%(dblink)s rem"\ + "\nWHERE ac.table_name = :table_name"\ + "\nAND ac.constraint_type IN ('R','P')" + + if schema is not None: + params['owner'] = schema + text += "\nAND ac.owner = :owner" + + text += \ + "\nAND ac.owner = loc.owner"\ + "\nAND ac.constraint_name = loc.constraint_name"\ + "\nAND ac.r_owner = rem.owner(+)"\ + "\nAND ac.r_constraint_name = rem.constraint_name(+)"\ + "\nAND (rem.position IS NULL or loc.position=rem.position)"\ + "\nORDER BY ac.constraint_name, loc.position" + + text = text % {'dblink': dblink} + rp = connection.execute(sql.text(text), **params) + constraint_data = rp.fetchall() + return constraint_data + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + resolve_synonyms = kw.get('oracle_resolve_synonyms', False) + dblink = kw.get('dblink', '') + info_cache = kw.get('info_cache') + + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + pkeys = [] + constraint_name = None + constraint_data = self._get_constraint_data( + connection, table_name, schema, dblink, + info_cache=kw.get('info_cache')) + + for row in constraint_data: + (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ + row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) + if cons_type == 'P': + if constraint_name is None: + constraint_name = self.normalize_name(cons_name) + pkeys.append(local_column) + return {'constrained_columns': pkeys, 'name': constraint_name} + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + """ + + kw arguments can be: + + oracle_resolve_synonyms + + dblink + + """ + + requested_schema = schema # to check later on + resolve_synonyms = kw.get('oracle_resolve_synonyms', False) + dblink = kw.get('dblink', '') + info_cache = kw.get('info_cache') + + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + + constraint_data = self._get_constraint_data( + connection, table_name, schema, dblink, + info_cache=kw.get('info_cache')) + + def fkey_rec(): + return { + 'name': None, + 'constrained_columns': [], + 'referred_schema': None, + 'referred_table': None, + 'referred_columns': [] + } + + fkeys = util.defaultdict(fkey_rec) + + for row in constraint_data: + (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ + row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) + + if cons_type == 'R': + if remote_table is None: + # ticket 363 + util.warn( + ("Got 'None' querying 'table_name' from " + "all_cons_columns%(dblink)s - does the user have " + "proper rights to the table?") % {'dblink': dblink}) + continue + + rec = fkeys[cons_name] + rec['name'] = cons_name + local_cols, remote_cols = rec[ + 'constrained_columns'], rec['referred_columns'] + + if not rec['referred_table']: + if resolve_synonyms: + ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \ + self._resolve_synonym( + connection, + desired_owner=self.denormalize_name( + remote_owner), + desired_table=self.denormalize_name( + remote_table) + ) + if ref_synonym: + remote_table = self.normalize_name(ref_synonym) + remote_owner = self.normalize_name( + ref_remote_owner) + + rec['referred_table'] = remote_table + + if requested_schema is not None or \ + self.denormalize_name(remote_owner) != schema: + rec['referred_schema'] = remote_owner + + local_cols.append(local_column) + remote_cols.append(remote_column) + + return list(fkeys.values()) + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, + resolve_synonyms=False, dblink='', **kw): + info_cache = kw.get('info_cache') + (view_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, view_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + + params = {'view_name': view_name} + text = "SELECT text FROM all_views WHERE view_name=:view_name" + + if schema is not None: + text += " AND owner = :schema" + params['schema'] = schema + + rp = connection.execute(sql.text(text), **params).scalar() + if rp: + if util.py2k: + rp = rp.decode(self.encoding) + return rp + else: + return None + + +class _OuterJoinColumn(sql.ClauseElement): + __visit_name__ = 'outer_join_column' + + def __init__(self, column): + self.column = column diff --git a/app/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/app/lib/sqlalchemy/dialects/oracle/cx_oracle.py new file mode 100644 index 0000000..f85324f --- /dev/null +++ b/app/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -0,0 +1,1020 @@ +# oracle/cx_oracle.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: oracle+cx_oracle + :name: cx-Oracle + :dbapi: cx_oracle + :connectstring: oracle+cx_oracle://user:pass@host:port/dbname\ +[?key=value&key=value...] + :url: http://cx-oracle.sourceforge.net/ + +Additional Connect Arguments +---------------------------- + +When connecting with ``dbname`` present, the host, port, and dbname tokens are +converted to a TNS name using +the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken +directly as a TNS name. + +Additional arguments which may be specified either as query string arguments +on the URL, or as keyword arguments to :func:`.create_engine()` are: + +* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``. + +* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted + to 50. This setting is significant with cx_Oracle as the contents of LOB + objects are only readable within a "live" row (e.g. within a batch of + 50 rows). + +* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`. + +* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for + all bind parameters. This is required for LOB datatypes but can be + disabled to reduce overhead. Defaults to ``True``. Specific types + can be excluded from this process using the ``exclude_setinputsizes`` + parameter. + +* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail. + +* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail. + +* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to + be excluded from the "auto setinputsizes" feature. The type names here + must match DBAPI types that are found in the "cx_Oracle" module namespace, + such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to + ``(STRING, UNICODE)``. + + .. versionadded:: 0.8 specific DBAPI types can be excluded from the + auto_setinputsizes feature via the exclude_setinputsizes attribute. + +* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or + alternatively an integer value. This value is only available as a URL query + string argument. + +* ``threaded`` - enable multithreaded access to cx_oracle connections. + Defaults to ``True``. Note that this is the opposite default of the + cx_Oracle DBAPI itself. + +* ``service_name`` - An option to use connection string (DSN) with + ``SERVICE_NAME`` instead of ``SID``. It can't be passed when a ``database`` + part is given. + E.g. ``oracle+cx_oracle://scott:tiger@host:1521/?service_name=hr`` + is a valid url. This value is only available as a URL query string argument. + + .. versionadded:: 1.0.0 + +.. _cx_oracle_unicode: + +Unicode +------- + +The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the +ability to return string results as Python unicode objects natively. + +When used in Python 3, cx_Oracle returns all strings as Python unicode objects +(that is, plain ``str`` in Python 3). In Python 2, it will return as Python +unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For +column values that are of type ``VARCHAR`` or other non-unicode string types, +it will return values as Python strings (e.g. bytestrings). + +The cx_Oracle SQLAlchemy dialect presents two different options for the use +case of returning ``VARCHAR`` column values as Python unicode objects under +Python 2: + +* the cx_Oracle DBAPI has the ability to coerce all string results to Python + unicode objects unconditionally using output type handlers. This has + the advantage that the unicode conversion is global to all statements + at the cx_Oracle driver level, meaning it works with raw textual SQL + statements that have no typing information associated. However, this system + has been observed to incur signfiicant performance overhead, not only + because it takes effect for all string values unconditionally, but also + because cx_Oracle under Python 2 seems to use a pure-Python function call in + order to do the decode operation, which under cPython can orders of + magnitude slower than doing it using C functions alone. + +* SQLAlchemy has unicode-decoding services built in, and when using + SQLAlchemy's C extensions, these functions do not use any Python function + calls and are very fast. The disadvantage to this approach is that the + unicode conversion only takes effect for statements where the + :class:`.Unicode` type or :class:`.String` type with + ``convert_unicode=True`` is explicitly associated with the result column. + This is the case for any ORM or Core query or SQL expression as well as for + a :func:`.text` construct that specifies output column types, so in the vast + majority of cases this is not an issue. However, when sending a completely + raw string to :meth:`.Connection.execute`, this typing information isn't + present, unless the string is handled within a :func:`.text` construct that + adds typing information. + +As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's +typing system. This keeps cx_Oracle's expensive Python 2 approach +disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy +detects that cx_Oracle is returning unicode objects natively and cx_Oracle's +system is used. + +To re-enable cx_Oracle's output type handler under Python 2, the +``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to +:func:`.create_engine`:: + + engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True) + +Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results +as Python unicode under Python 2 without using cx_Oracle's native handlers, +the :func:`.text` feature can be used:: + + from sqlalchemy import text, Unicode + result = conn.execute( + text("select username from user").columns(username=Unicode)) + +.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used + for unicode results of non-unicode datatypes in Python 2, after they were + identified as a major performance bottleneck. SQLAlchemy's own unicode + facilities are used instead. + +.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable + cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior. + +.. _cx_oracle_returning: + +RETURNING Support +----------------- + +The cx_oracle DBAPI supports a limited subset of Oracle's already limited +RETURNING support. Typically, results can only be guaranteed for at most one +column being returned; this is the typical case when SQLAlchemy uses RETURNING +to get just the value of a primary-key-associated sequence value. +Additional column expressions will cause problems in a non-determinative way, +due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is +required for more complex RETURNING scenarios. + +For this reason, stability may be enhanced by disabling RETURNING support +completely; SQLAlchemy otherwise will use RETURNING to fetch newly +sequence-generated primary keys. As illustrated in :ref:`oracle_returning`:: + + engine = create_engine("oracle://scott:tiger@dsn", + implicit_returning=False) + +.. seealso:: + + http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 + - OCI documentation for RETURNING + + http://sourceforge.net/mailarchive/message.php?msg_id=31338136 + - cx_oracle developer commentary + +.. _cx_oracle_lob: + +LOB Objects +----------- + +cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy +converts these to strings so that the interface of the Binary type is +consistent with that of other backends, and so that the linkage to a live +cursor is not needed in scenarios like result.fetchmany() and +result.fetchall(). This means that by default, LOB objects are fully fetched +unconditionally by SQLAlchemy, and the linkage to a live cursor is broken. + +To disable this processing, pass ``auto_convert_lobs=False`` to +:func:`.create_engine()`. + +Two Phase Transaction Support +----------------------------- + +Two Phase transactions are implemented using XA transactions, and are known +to work in a rudimental fashion with recent versions of cx_Oracle +as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet +considered to be robust and should still be regarded as experimental. + +In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding +two phase which prevents +a particular DBAPI connection from being consistently usable in both +prepared transactions as well as traditional DBAPI usage patterns; therefore +once a particular connection is used via :meth:`.Connection.begin_prepared`, +all subsequent usages of the underlying DBAPI connection must be within +the context of prepared transactions. + +The default behavior of :class:`.Engine` is to maintain a pool of DBAPI +connections. Therefore, due to the above glitch, a DBAPI connection that has +been used in a two-phase operation, and is then returned to the pool, will +not be usable in a non-two-phase context. To avoid this situation, +the application can make one of several choices: + +* Disable connection pooling using :class:`.NullPool` + +* Ensure that the particular :class:`.Engine` in use is only used + for two-phase operations. A :class:`.Engine` bound to an ORM + :class:`.Session` which includes ``twophase=True`` will consistently + use the two-phase transaction style. + +* For ad-hoc two-phase operations without disabling pooling, the DBAPI + connection in use can be evicted from the connection pool using the + :meth:`.Connection.detach` method. + +.. versionchanged:: 0.8.0b2,0.7.10 + Support for cx_oracle prepared transactions has been implemented + and tested. + +.. _cx_oracle_numeric: + +Precision Numerics +------------------ + +The SQLAlchemy dialect goes through a lot of steps to ensure +that decimal numbers are sent and received with full accuracy. +An "outputtypehandler" callable is associated with each +cx_oracle connection object which detects numeric types and +receives them as string values, instead of receiving a Python +``float`` directly, which is then passed to the Python +``Decimal`` constructor. The :class:`.Numeric` and +:class:`.Float` types under the cx_oracle dialect are aware of +this behavior, and will coerce the ``Decimal`` to ``float`` if +the ``asdecimal`` flag is ``False`` (default on :class:`.Float`, +optional on :class:`.Numeric`). + +Because the handler coerces to ``Decimal`` in all cases first, +the feature can detract significantly from performance. +If precision numerics aren't required, the decimal handling +can be disabled by passing the flag ``coerce_to_decimal=False`` +to :func:`.create_engine`:: + + engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False) + +.. versionadded:: 0.7.6 + Add the ``coerce_to_decimal`` flag. + +Another alternative to performance is to use the +`cdecimal `_ library; +see :class:`.Numeric` for additional notes. + +The handler attempts to use the "precision" and "scale" +attributes of the result set column to best determine if +subsequent incoming values should be received as ``Decimal`` as +opposed to int (in which case no processing is added). There are +several scenarios where OCI_ does not provide unambiguous data +as to the numeric type, including some situations where +individual rows may return a combination of floating point and +integer values. Certain values for "precision" and "scale" have +been observed to determine this scenario. When it occurs, the +outputtypehandler receives as string and then passes off to a +processing function which detects, for each returned value, if a +decimal point is present, and if so converts to ``Decimal``, +otherwise to int. The intention is that simple int-based +statements like "SELECT my_seq.nextval() FROM DUAL" continue to +return ints and not ``Decimal`` objects, and that any kind of +floating point value is received as a string so that there is no +floating point loss of precision. + +The "decimal point is present" logic itself is also sensitive to +locale. Under OCI_, this is controlled by the NLS_LANG +environment variable. Upon first connection, the dialect runs a +test to determine the current "decimal" character, which can be +a comma "," for European locales. From that point forward the +outputtypehandler uses that character to represent a decimal +point. Note that cx_oracle 5.0.3 or greater is required +when dealing with numerics with locale settings that don't use +a period "." as the decimal character. + +.. versionchanged:: 0.6.6 + The outputtypehandler supports the case where the locale uses a + comma "," character to represent a decimal point. + +.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html + +""" + +from __future__ import absolute_import + +from .base import OracleCompiler, OracleDialect, OracleExecutionContext +from . import base as oracle +from ...engine import result as _result +from sqlalchemy import types as sqltypes, util, exc, processors +from sqlalchemy import util +import random +import collections +import decimal +import re +import time + + +class _OracleNumeric(sqltypes.Numeric): + def bind_processor(self, dialect): + # cx_oracle accepts Decimal objects and floats + return None + + def result_processor(self, dialect, coltype): + # we apply a cx_oracle type handler to all connections + # that converts floating point strings to Decimal(). + # However, in some subquery situations, Oracle doesn't + # give us enough information to determine int or Decimal. + # It could even be int/Decimal differently on each row, + # regardless of the scale given for the originating type. + # So we still need an old school isinstance() handler + # here for decimals. + + if dialect.supports_native_decimal: + if self.asdecimal: + fstring = "%%.%df" % self._effective_decimal_return_scale + + def to_decimal(value): + if value is None: + return None + elif isinstance(value, decimal.Decimal): + return value + else: + return decimal.Decimal(fstring % value) + + return to_decimal + else: + if self.precision is None and self.scale is None: + return processors.to_float + elif not getattr(self, '_is_oracle_number', False) \ + and self.scale is not None: + return processors.to_float + else: + return None + else: + # cx_oracle 4 behavior, will assume + # floats + return super(_OracleNumeric, self).\ + result_processor(dialect, coltype) + + +class _OracleDate(sqltypes.Date): + def bind_processor(self, dialect): + return None + + def result_processor(self, dialect, coltype): + def process(value): + if value is not None: + return value.date() + else: + return value + return process + + +class _LOBMixin(object): + def result_processor(self, dialect, coltype): + if not dialect.auto_convert_lobs: + # return the cx_oracle.LOB directly. + return None + + def process(value): + if value is not None: + return value.read() + else: + return value + return process + + +class _NativeUnicodeMixin(object): + if util.py2k: + def bind_processor(self, dialect): + if dialect._cx_oracle_with_unicode: + def process(value): + if value is None: + return value + else: + return unicode(value) + return process + else: + return super( + _NativeUnicodeMixin, self).bind_processor(dialect) + + # we apply a connection output handler that returns + # unicode in all cases, so the "native_unicode" flag + # will be set for the default String.result_processor. + + +class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR): + def get_dbapi_type(self, dbapi): + return dbapi.FIXED_CHAR + + +class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR): + def get_dbapi_type(self, dbapi): + return getattr(dbapi, 'UNICODE', dbapi.STRING) + + +class _OracleText(_LOBMixin, sqltypes.Text): + def get_dbapi_type(self, dbapi): + return dbapi.CLOB + + +class _OracleLong(oracle.LONG): + # a raw LONG is a text type, but does *not* + # get the LobMixin with cx_oracle. + + def get_dbapi_type(self, dbapi): + return dbapi.LONG_STRING + + +class _OracleString(_NativeUnicodeMixin, sqltypes.String): + pass + +class _OracleEnum(_NativeUnicodeMixin, sqltypes.Enum): + def bind_processor(self, dialect): + enum_proc = sqltypes.Enum.bind_processor(self, dialect) + if util.py2k: + unicode_proc = _NativeUnicodeMixin.bind_processor(self, dialect) + else: + unicode_proc = None + + def process(value): + raw_str = enum_proc(value) + if unicode_proc: + raw_str = unicode_proc(raw_str) + return raw_str + return process + + +class _OracleUnicodeText( + _LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText): + def get_dbapi_type(self, dbapi): + return dbapi.NCLOB + + def result_processor(self, dialect, coltype): + lob_processor = _LOBMixin.result_processor(self, dialect, coltype) + if lob_processor is None: + return None + + string_processor = sqltypes.UnicodeText.result_processor( + self, dialect, coltype) + + if string_processor is None: + return lob_processor + else: + def process(value): + return string_processor(lob_processor(value)) + return process + + +class _OracleInteger(sqltypes.Integer): + def result_processor(self, dialect, coltype): + def to_int(val): + if val is not None: + val = int(val) + return val + return to_int + + +class _OracleBinary(_LOBMixin, sqltypes.LargeBinary): + def get_dbapi_type(self, dbapi): + return dbapi.BLOB + + def bind_processor(self, dialect): + return None + + +class _OracleInterval(oracle.INTERVAL): + def get_dbapi_type(self, dbapi): + return dbapi.INTERVAL + + +class _OracleRaw(oracle.RAW): + pass + + +class _OracleRowid(oracle.ROWID): + def get_dbapi_type(self, dbapi): + return dbapi.ROWID + + +class OracleCompiler_cx_oracle(OracleCompiler): + def bindparam_string(self, name, **kw): + quote = getattr(name, 'quote', None) + if quote is True or quote is not False and \ + self.preparer._bindparam_requires_quotes(name): + quoted_name = '"%s"' % name + self._quoted_bind_names[name] = quoted_name + return OracleCompiler.bindparam_string(self, quoted_name, **kw) + else: + return OracleCompiler.bindparam_string(self, name, **kw) + + +class OracleExecutionContext_cx_oracle(OracleExecutionContext): + + def pre_exec(self): + quoted_bind_names = \ + getattr(self.compiled, '_quoted_bind_names', None) + if quoted_bind_names: + if not self.dialect.supports_unicode_statements: + # if DBAPI doesn't accept unicode statements, + # keys in self.parameters would have been encoded + # here. so convert names in quoted_bind_names + # to encoded as well. + quoted_bind_names = \ + dict( + (fromname.encode(self.dialect.encoding), + toname.encode(self.dialect.encoding)) + for fromname, toname in + quoted_bind_names.items() + ) + for param in self.parameters: + for fromname, toname in quoted_bind_names.items(): + param[toname] = param[fromname] + del param[fromname] + + if self.dialect.auto_setinputsizes: + # cx_oracle really has issues when you setinputsizes + # on String, including that outparams/RETURNING + # breaks for varchars + self.set_input_sizes( + quoted_bind_names, + exclude_types=self.dialect.exclude_setinputsizes + ) + + # if a single execute, check for outparams + if len(self.compiled_parameters) == 1: + for bindparam in self.compiled.binds.values(): + if bindparam.isoutparam: + dbtype = bindparam.type.dialect_impl(self.dialect).\ + get_dbapi_type(self.dialect.dbapi) + if not hasattr(self, 'out_parameters'): + self.out_parameters = {} + if dbtype is None: + raise exc.InvalidRequestError( + "Cannot create out parameter for parameter " + "%r - its type %r is not supported by" + " cx_oracle" % + (bindparam.key, bindparam.type) + ) + name = self.compiled.bind_names[bindparam] + self.out_parameters[name] = self.cursor.var(dbtype) + self.parameters[0][quoted_bind_names.get(name, name)] = \ + self.out_parameters[name] + + def create_cursor(self): + c = self._dbapi_connection.cursor() + if self.dialect.arraysize: + c.arraysize = self.dialect.arraysize + + return c + + def get_result_proxy(self): + if hasattr(self, 'out_parameters') and self.compiled.returning: + returning_params = dict( + (k, v.getvalue()) + for k, v in self.out_parameters.items() + ) + return ReturningResultProxy(self, returning_params) + + result = None + if self.cursor.description is not None: + for column in self.cursor.description: + type_code = column[1] + if type_code in self.dialect._cx_oracle_binary_types: + result = _result.BufferedColumnResultProxy(self) + + if result is None: + result = _result.ResultProxy(self) + + if hasattr(self, 'out_parameters'): + if self.compiled_parameters is not None and \ + len(self.compiled_parameters) == 1: + result.out_parameters = out_parameters = {} + + for bind, name in self.compiled.bind_names.items(): + if name in self.out_parameters: + type = bind.type + impl_type = type.dialect_impl(self.dialect) + dbapi_type = impl_type.get_dbapi_type( + self.dialect.dbapi) + result_processor = impl_type.\ + result_processor(self.dialect, + dbapi_type) + if result_processor is not None: + out_parameters[name] = \ + result_processor( + self.out_parameters[name].getvalue()) + else: + out_parameters[name] = self.out_parameters[ + name].getvalue() + else: + result.out_parameters = dict( + (k, v.getvalue()) + for k, v in self.out_parameters.items() + ) + + return result + + +class OracleExecutionContext_cx_oracle_with_unicode( + OracleExecutionContext_cx_oracle): + """Support WITH_UNICODE in Python 2.xx. + + WITH_UNICODE allows cx_Oracle's Python 3 unicode handling + behavior under Python 2.x. This mode in some cases disallows + and in other cases silently passes corrupted data when + non-Python-unicode strings (a.k.a. plain old Python strings) + are passed as arguments to connect(), the statement sent to execute(), + or any of the bind parameter keys or values sent to execute(). + This optional context therefore ensures that all statements are + passed as Python unicode objects. + + """ + + def __init__(self, *arg, **kw): + OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw) + self.statement = util.text_type(self.statement) + + def _execute_scalar(self, stmt, type_): + return super(OracleExecutionContext_cx_oracle_with_unicode, self).\ + _execute_scalar(util.text_type(stmt), type_) + + +class ReturningResultProxy(_result.FullyBufferedResultProxy): + """Result proxy which stuffs the _returning clause + outparams + into the fetch.""" + + def __init__(self, context, returning_params): + self._returning_params = returning_params + super(ReturningResultProxy, self).__init__(context) + + def _cursor_description(self): + returning = self.context.compiled.returning + return [ + ("ret_%d" % i, None) + for i, col in enumerate(returning) + ] + + def _buffer_rows(self): + return collections.deque( + [tuple(self._returning_params["ret_%d" % i] + for i, c in enumerate(self._returning_params))] + ) + + +class OracleDialect_cx_oracle(OracleDialect): + execution_ctx_cls = OracleExecutionContext_cx_oracle + statement_compiler = OracleCompiler_cx_oracle + + driver = "cx_oracle" + + colspecs = colspecs = { + sqltypes.Numeric: _OracleNumeric, + # generic type, assume datetime.date is desired + sqltypes.Date: _OracleDate, + sqltypes.LargeBinary: _OracleBinary, + sqltypes.Boolean: oracle._OracleBoolean, + sqltypes.Interval: _OracleInterval, + oracle.INTERVAL: _OracleInterval, + sqltypes.Text: _OracleText, + sqltypes.String: _OracleString, + sqltypes.UnicodeText: _OracleUnicodeText, + sqltypes.CHAR: _OracleChar, + sqltypes.Enum: _OracleEnum, + + # a raw LONG is a text type, but does *not* + # get the LobMixin with cx_oracle. + oracle.LONG: _OracleLong, + + # this is only needed for OUT parameters. + # it would be nice if we could not use it otherwise. + sqltypes.Integer: _OracleInteger, + + oracle.RAW: _OracleRaw, + sqltypes.Unicode: _OracleNVarChar, + sqltypes.NVARCHAR: _OracleNVarChar, + oracle.ROWID: _OracleRowid, + } + + execute_sequence_format = list + + def __init__(self, + auto_setinputsizes=True, + exclude_setinputsizes=("STRING", "UNICODE"), + auto_convert_lobs=True, + threaded=True, + allow_twophase=True, + coerce_to_decimal=True, + coerce_to_unicode=False, + arraysize=50, _retry_on_12516=False, + **kwargs): + OracleDialect.__init__(self, **kwargs) + self.threaded = threaded + self.arraysize = arraysize + self.allow_twophase = allow_twophase + self.supports_timestamp = self.dbapi is None or \ + hasattr(self.dbapi, 'TIMESTAMP') + self.auto_setinputsizes = auto_setinputsizes + self.auto_convert_lobs = auto_convert_lobs + self._retry_on_12516 = _retry_on_12516 + + if hasattr(self.dbapi, 'version'): + self.cx_oracle_ver = tuple([int(x) for x in + self.dbapi.version.split('.')]) + else: + self.cx_oracle_ver = (0, 0, 0) + + def types(*names): + return set( + getattr(self.dbapi, name, None) for name in names + ).difference([None]) + + self.exclude_setinputsizes = types(*(exclude_setinputsizes or ())) + self._cx_oracle_string_types = types("STRING", "UNICODE", + "NCLOB", "CLOB") + self._cx_oracle_unicode_types = types("UNICODE", "NCLOB") + self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") + self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0) + + self.coerce_to_unicode = ( + self.cx_oracle_ver >= (5, 0) and + coerce_to_unicode + ) + + self.supports_native_decimal = ( + self.cx_oracle_ver >= (5, 0) and + coerce_to_decimal + ) + + self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0) + + if self.cx_oracle_ver is None: + # this occurs in tests with mock DBAPIs + self._cx_oracle_string_types = set() + self._cx_oracle_with_unicode = False + elif util.py3k or ( + self.cx_oracle_ver >= (5,) and not \ + hasattr(self.dbapi, 'UNICODE') + ): + # cx_Oracle WITH_UNICODE mode. *only* python + # unicode objects accepted for anything + self.supports_unicode_statements = True + self.supports_unicode_binds = True + self._cx_oracle_with_unicode = True + + if util.py2k: + # There's really no reason to run with WITH_UNICODE under + # Python 2.x. However as of cx_oracle 5.3 it seems to be + # set to ON for default builds + self.execution_ctx_cls = \ + OracleExecutionContext_cx_oracle_with_unicode + else: + self._cx_oracle_with_unicode = False + + if self.cx_oracle_ver is None or \ + not self.auto_convert_lobs or \ + not hasattr(self.dbapi, 'CLOB'): + self.dbapi_type_map = {} + else: + # only use this for LOB objects. using it for strings, dates + # etc. leads to a little too much magic, reflection doesn't know + # if it should expect encoded strings or unicodes, etc. + self.dbapi_type_map = { + self.dbapi.CLOB: oracle.CLOB(), + self.dbapi.NCLOB: oracle.NCLOB(), + self.dbapi.BLOB: oracle.BLOB(), + self.dbapi.BINARY: oracle.RAW(), + } + + @classmethod + def dbapi(cls): + import cx_Oracle + return cx_Oracle + + def connect(self, *cargs, **cparams): + if self._retry_on_12516: + # emergency flag for the SQLAlchemy test suite, which has + # decreased in stability since cx_oracle 5.3; generalized + # "retry on connect" functionality is part of an upcoming + # SQLAlchemy feature + try: + return self.dbapi.connect(*cargs, **cparams) + except self.dbapi.DatabaseError as err: + if "ORA-12516" in str(err): + time.sleep(2) + return self.dbapi.connect(*cargs, **cparams) + else: + raise + else: + return super(OracleDialect_cx_oracle, self).connect( + *cargs, **cparams) + + def initialize(self, connection): + super(OracleDialect_cx_oracle, self).initialize(connection) + if self._is_oracle_8: + self.supports_unicode_binds = False + self._detect_decimal_char(connection) + + def _detect_decimal_char(self, connection): + """detect if the decimal separator character is not '.', as + is the case with European locale settings for NLS_LANG. + + cx_oracle itself uses similar logic when it formats Python + Decimal objects to strings on the bind side (as of 5.0.3), + as Oracle sends/receives string numerics only in the + current locale. + + """ + if self.cx_oracle_ver < (5,): + # no output type handlers before version 5 + return + + cx_Oracle = self.dbapi + conn = connection.connection + + # override the output_type_handler that's + # on the cx_oracle connection with a plain + # one on the cursor + + def output_type_handler(cursor, name, defaultType, + size, precision, scale): + return cursor.var( + cx_Oracle.STRING, + 255, arraysize=cursor.arraysize) + + cursor = conn.cursor() + cursor.outputtypehandler = output_type_handler + cursor.execute("SELECT 0.1 FROM DUAL") + val = cursor.fetchone()[0] + cursor.close() + char = re.match(r"([\.,])", val).group(1) + if char != '.': + _detect_decimal = self._detect_decimal + self._detect_decimal = \ + lambda value: _detect_decimal(value.replace(char, '.')) + self._to_decimal = \ + lambda value: decimal.Decimal(value.replace(char, '.')) + + def _detect_decimal(self, value): + if "." in value: + return decimal.Decimal(value) + else: + return int(value) + + _to_decimal = decimal.Decimal + + def on_connect(self): + if self.cx_oracle_ver < (5,): + # no output type handlers before version 5 + return + + cx_Oracle = self.dbapi + + def output_type_handler(cursor, name, defaultType, + size, precision, scale): + # convert all NUMBER with precision + positive scale to Decimal + # this almost allows "native decimal" mode. + if self.supports_native_decimal and \ + defaultType == cx_Oracle.NUMBER and \ + precision and scale > 0: + return cursor.var( + cx_Oracle.STRING, + 255, + outconverter=self._to_decimal, + arraysize=cursor.arraysize) + # if NUMBER with zero precision and 0 or neg scale, this appears + # to indicate "ambiguous". Use a slower converter that will + # make a decision based on each value received - the type + # may change from row to row (!). This kills + # off "native decimal" mode, handlers still needed. + elif self.supports_native_decimal and \ + defaultType == cx_Oracle.NUMBER \ + and not precision and scale <= 0: + return cursor.var( + cx_Oracle.STRING, + 255, + outconverter=self._detect_decimal, + arraysize=cursor.arraysize) + # allow all strings to come back natively as Unicode + elif self.coerce_to_unicode and \ + defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR): + return cursor.var(util.text_type, size, cursor.arraysize) + + def on_connect(conn): + conn.outputtypehandler = output_type_handler + + return on_connect + + def create_connect_args(self, url): + dialect_opts = dict(url.query) + for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs', + 'threaded', 'allow_twophase'): + if opt in dialect_opts: + util.coerce_kw_type(dialect_opts, opt, bool) + setattr(self, opt, dialect_opts[opt]) + + database = url.database + service_name = dialect_opts.get('service_name', None) + if database or service_name: + # if we have a database, then we have a remote host + port = url.port + if port: + port = int(port) + else: + port = 1521 + + if database and service_name: + raise exc.InvalidRequestError( + '"service_name" option shouldn\'t ' + 'be used with a "database" part of the url') + if database: + makedsn_kwargs = {'sid': database} + if service_name: + makedsn_kwargs = {'service_name': service_name} + + dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs) + else: + # we have a local tnsname + dsn = url.host + + opts = dict( + threaded=self.threaded, + twophase=self.allow_twophase, + ) + + if dsn is not None: + opts['dsn'] = dsn + if url.password is not None: + opts['password'] = url.password + if url.username is not None: + opts['user'] = url.username + + if util.py2k: + if self._cx_oracle_with_unicode: + for k, v in opts.items(): + if isinstance(v, str): + opts[k] = unicode(v) + else: + for k, v in opts.items(): + if isinstance(v, unicode): + opts[k] = str(v) + + if 'mode' in url.query: + opts['mode'] = url.query['mode'] + if isinstance(opts['mode'], util.string_types): + mode = opts['mode'].upper() + if mode == 'SYSDBA': + opts['mode'] = self.dbapi.SYSDBA + elif mode == 'SYSOPER': + opts['mode'] = self.dbapi.SYSOPER + else: + util.coerce_kw_type(opts, 'mode', int) + return ([], opts) + + def _get_server_version_info(self, connection): + return tuple( + int(x) + for x in connection.connection.version.split('.') + ) + + def is_disconnect(self, e, connection, cursor): + error, = e.args + if isinstance(e, self.dbapi.InterfaceError): + return "not connected" in str(e) + elif hasattr(error, 'code'): + # ORA-00028: your session has been killed + # ORA-03114: not connected to ORACLE + # ORA-03113: end-of-file on communication channel + # ORA-03135: connection lost contact + # ORA-01033: ORACLE initialization or shutdown in progress + # ORA-02396: exceeded maximum idle time, please connect again + # TODO: Others ? + return error.code in (28, 3114, 3113, 3135, 1033, 2396) + else: + return False + + def create_xid(self): + """create a two-phase transaction ID. + + this id will be passed to do_begin_twophase(), do_rollback_twophase(), + do_commit_twophase(). its format is unspecified.""" + + id = random.randint(0, 2 ** 128) + return (0x1234, "%032x" % id, "%032x" % 9) + + def do_executemany(self, cursor, statement, parameters, context=None): + if isinstance(parameters, tuple): + parameters = list(parameters) + cursor.executemany(statement, parameters) + + def do_begin_twophase(self, connection, xid): + connection.connection.begin(*xid) + + def do_prepare_twophase(self, connection, xid): + result = connection.connection.prepare() + connection.info['cx_oracle_prepared'] = result + + def do_rollback_twophase(self, connection, xid, is_prepared=True, + recover=False): + self.do_rollback(connection.connection) + + def do_commit_twophase(self, connection, xid, is_prepared=True, + recover=False): + if not is_prepared: + self.do_commit(connection.connection) + else: + oci_prepared = connection.info['cx_oracle_prepared'] + if oci_prepared: + self.do_commit(connection.connection) + + def do_recover_twophase(self, connection): + connection.info.pop('cx_oracle_prepared', None) + +dialect = OracleDialect_cx_oracle diff --git a/app/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/app/lib/sqlalchemy/dialects/oracle/zxjdbc.py new file mode 100644 index 0000000..c8a31f1 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/oracle/zxjdbc.py @@ -0,0 +1,235 @@ +# oracle/zxjdbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: oracle+zxjdbc + :name: zxJDBC for Jython + :dbapi: zxjdbc + :connectstring: oracle+zxjdbc://user:pass@host/dbname + :driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html + + .. note:: Jython is not supported by current versions of SQLAlchemy. The + zxjdbc dialect should be considered as experimental. + +""" +import decimal +import re + +from sqlalchemy import sql, types as sqltypes, util +from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector +from sqlalchemy.dialects.oracle.base import (OracleCompiler, + OracleDialect, + OracleExecutionContext) +from sqlalchemy.engine import result as _result +from sqlalchemy.sql import expression +import collections + +SQLException = zxJDBC = None + + +class _ZxJDBCDate(sqltypes.Date): + + def result_processor(self, dialect, coltype): + def process(value): + if value is None: + return None + else: + return value.date() + return process + + +class _ZxJDBCNumeric(sqltypes.Numeric): + + def result_processor(self, dialect, coltype): + # XXX: does the dialect return Decimal or not??? + # if it does (in all cases), we could use a None processor as well as + # the to_float generic processor + if self.asdecimal: + def process(value): + if isinstance(value, decimal.Decimal): + return value + else: + return decimal.Decimal(str(value)) + else: + def process(value): + if isinstance(value, decimal.Decimal): + return float(value) + else: + return value + return process + + +class OracleCompiler_zxjdbc(OracleCompiler): + + def returning_clause(self, stmt, returning_cols): + self.returning_cols = list( + expression._select_iterables(returning_cols)) + + # within_columns_clause=False so that labels (foo AS bar) don't render + columns = [self.process(c, within_columns_clause=False) + for c in self.returning_cols] + + if not hasattr(self, 'returning_parameters'): + self.returning_parameters = [] + + binds = [] + for i, col in enumerate(self.returning_cols): + dbtype = col.type.dialect_impl( + self.dialect).get_dbapi_type(self.dialect.dbapi) + self.returning_parameters.append((i + 1, dbtype)) + + bindparam = sql.bindparam( + "ret_%d" % i, value=ReturningParam(dbtype)) + self.binds[bindparam.key] = bindparam + binds.append( + self.bindparam_string(self._truncate_bindparam(bindparam))) + + return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) + + +class OracleExecutionContext_zxjdbc(OracleExecutionContext): + + def pre_exec(self): + if hasattr(self.compiled, 'returning_parameters'): + # prepare a zxJDBC statement so we can grab its underlying + # OraclePreparedStatement's getReturnResultSet later + self.statement = self.cursor.prepare(self.statement) + + def get_result_proxy(self): + if hasattr(self.compiled, 'returning_parameters'): + rrs = None + try: + try: + rrs = self.statement.__statement__.getReturnResultSet() + next(rrs) + except SQLException as sqle: + msg = '%s [SQLCode: %d]' % ( + sqle.getMessage(), sqle.getErrorCode()) + if sqle.getSQLState() is not None: + msg += ' [SQLState: %s]' % sqle.getSQLState() + raise zxJDBC.Error(msg) + else: + row = tuple( + self.cursor.datahandler.getPyObject( + rrs, index, dbtype) + for index, dbtype in + self.compiled.returning_parameters) + return ReturningResultProxy(self, row) + finally: + if rrs is not None: + try: + rrs.close() + except SQLException: + pass + self.statement.close() + + return _result.ResultProxy(self) + + def create_cursor(self): + cursor = self._dbapi_connection.cursor() + cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) + return cursor + + +class ReturningResultProxy(_result.FullyBufferedResultProxy): + + """ResultProxy backed by the RETURNING ResultSet results.""" + + def __init__(self, context, returning_row): + self._returning_row = returning_row + super(ReturningResultProxy, self).__init__(context) + + def _cursor_description(self): + ret = [] + for c in self.context.compiled.returning_cols: + if hasattr(c, 'name'): + ret.append((c.name, c.type)) + else: + ret.append((c.anon_label, c.type)) + return ret + + def _buffer_rows(self): + return collections.deque([self._returning_row]) + + +class ReturningParam(object): + + """A bindparam value representing a RETURNING parameter. + + Specially handled by OracleReturningDataHandler. + """ + + def __init__(self, type): + self.type = type + + def __eq__(self, other): + if isinstance(other, ReturningParam): + return self.type == other.type + return NotImplemented + + def __ne__(self, other): + if isinstance(other, ReturningParam): + return self.type != other.type + return NotImplemented + + def __repr__(self): + kls = self.__class__ + return '<%s.%s object at 0x%x type=%s>' % ( + kls.__module__, kls.__name__, id(self), self.type) + + +class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): + jdbc_db_name = 'oracle' + jdbc_driver_name = 'oracle.jdbc.OracleDriver' + + statement_compiler = OracleCompiler_zxjdbc + execution_ctx_cls = OracleExecutionContext_zxjdbc + + colspecs = util.update_copy( + OracleDialect.colspecs, + { + sqltypes.Date: _ZxJDBCDate, + sqltypes.Numeric: _ZxJDBCNumeric + } + ) + + def __init__(self, *args, **kwargs): + super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs) + global SQLException, zxJDBC + from java.sql import SQLException + from com.ziclix.python.sql import zxJDBC + from com.ziclix.python.sql.handler import OracleDataHandler + + class OracleReturningDataHandler(OracleDataHandler): + """zxJDBC DataHandler that specially handles ReturningParam.""" + + def setJDBCObject(self, statement, index, object, dbtype=None): + if type(object) is ReturningParam: + statement.registerReturnParameter(index, object.type) + elif dbtype is None: + OracleDataHandler.setJDBCObject( + self, statement, index, object) + else: + OracleDataHandler.setJDBCObject( + self, statement, index, object, dbtype) + self.DataHandler = OracleReturningDataHandler + + def initialize(self, connection): + super(OracleDialect_zxjdbc, self).initialize(connection) + self.implicit_returning = \ + connection.connection.driverversion >= '10.2' + + def _create_jdbc_url(self, url): + return 'jdbc:oracle:thin:@%s:%s:%s' % ( + url.host, url.port or 1521, url.database) + + def _get_server_version_info(self, connection): + version = re.search( + r'Release ([\d\.]+)', connection.connection.dbversion).group(1) + return tuple(int(x) for x in version.split('.')) + +dialect = OracleDialect_zxjdbc diff --git a/app/lib/sqlalchemy/dialects/postgresql/__init__.py b/app/lib/sqlalchemy/dialects/postgresql/__init__.py new file mode 100644 index 0000000..a6872cf --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -0,0 +1,36 @@ +# postgresql/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from . import base, psycopg2, pg8000, pypostgresql, pygresql, \ + zxjdbc, psycopg2cffi + +base.dialect = psycopg2.dialect + +from .base import \ + INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \ + INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \ + DATE, BYTEA, BOOLEAN, INTERVAL, ENUM, dialect, TSVECTOR, DropEnumType, \ + CreateEnumType +from .hstore import HSTORE, hstore +from .json import JSON, JSONB +from .array import array, ARRAY, Any, All +from .ext import aggregate_order_by, ExcludeConstraint, array_agg +from .dml import insert, Insert + +from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \ + TSTZRANGE + +__all__ = ( + 'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', + 'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID', + 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN', + 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'array', 'HSTORE', + 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE', + 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'Any', 'All', + 'DropEnumType', 'CreateEnumType', 'ExcludeConstraint', + 'aggregate_order_by', 'array_agg', 'insert', 'Insert' +) diff --git a/app/lib/sqlalchemy/dialects/postgresql/array.py b/app/lib/sqlalchemy/dialects/postgresql/array.py new file mode 100644 index 0000000..98cab95 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/array.py @@ -0,0 +1,314 @@ +# postgresql/array.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .base import ischema_names +from ...sql import expression, operators +from ...sql.base import SchemaEventTarget +from ... import types as sqltypes + +try: + from uuid import UUID as _python_UUID +except ImportError: + _python_UUID = None + + +def Any(other, arrexpr, operator=operators.eq): + """A synonym for the :meth:`.ARRAY.Comparator.any` method. + + This method is legacy and is here for backwards-compatibility. + + .. seealso:: + + :func:`.expression.any_` + + """ + + return arrexpr.any(other, operator) + + +def All(other, arrexpr, operator=operators.eq): + """A synonym for the :meth:`.ARRAY.Comparator.all` method. + + This method is legacy and is here for backwards-compatibility. + + .. seealso:: + + :func:`.expression.all_` + + """ + + return arrexpr.all(other, operator) + + +class array(expression.Tuple): + + """A PostgreSQL ARRAY literal. + + This is used to produce ARRAY literals in SQL expressions, e.g.:: + + from sqlalchemy.dialects.postgresql import array + from sqlalchemy.dialects import postgresql + from sqlalchemy import select, func + + stmt = select([ + array([1,2]) + array([3,4,5]) + ]) + + print stmt.compile(dialect=postgresql.dialect()) + + Produces the SQL:: + + SELECT ARRAY[%(param_1)s, %(param_2)s] || + ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1 + + An instance of :class:`.array` will always have the datatype + :class:`.ARRAY`. The "inner" type of the array is inferred from + the values present, unless the ``type_`` keyword argument is passed:: + + array(['foo', 'bar'], type_=CHAR) + + .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type. + + See also: + + :class:`.postgresql.ARRAY` + + """ + __visit_name__ = 'array' + + def __init__(self, clauses, **kw): + super(array, self).__init__(*clauses, **kw) + self.type = ARRAY(self.type) + + def _bind_param(self, operator, obj, _assume_scalar=False, type_=None): + if _assume_scalar or operator is operators.getitem: + # if getitem->slice were called, Indexable produces + # a Slice object from that + assert isinstance(obj, int) + return expression.BindParameter( + None, obj, _compared_to_operator=operator, + type_=type_, + _compared_to_type=self.type, unique=True) + + else: + return array([ + self._bind_param(operator, o, _assume_scalar=True, type_=type_) + for o in obj]) + + def self_group(self, against=None): + if (against in ( + operators.any_op, operators.all_op, operators.getitem)): + return expression.Grouping(self) + else: + return self + + +CONTAINS = operators.custom_op("@>", precedence=5) + +CONTAINED_BY = operators.custom_op("<@", precedence=5) + +OVERLAP = operators.custom_op("&&", precedence=5) + + +class ARRAY(SchemaEventTarget, sqltypes.ARRAY): + + """PostgreSQL ARRAY type. + + .. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now + a subclass of the core :class:`.types.ARRAY` type. + + The :class:`.postgresql.ARRAY` type is constructed in the same way + as the core :class:`.types.ARRAY` type; a member type is required, and a + number of dimensions is recommended if the type is to be used for more + than one dimension:: + + from sqlalchemy.dialects import postgresql + + mytable = Table("mytable", metadata, + Column("data", postgresql.ARRAY(Integer, dimensions=2)) + ) + + The :class:`.postgresql.ARRAY` type provides all operations defined on the + core :class:`.types.ARRAY` type, including support for "dimensions", indexed + access, and simple matching such as :meth:`.types.ARRAY.Comparator.any` + and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also + provides PostgreSQL-specific methods for containment operations, including + :meth:`.postgresql.ARRAY.Comparator.contains` + :meth:`.postgresql.ARRAY.Comparator.contained_by`, + and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.:: + + mytable.c.data.contains([1, 2]) + + The :class:`.postgresql.ARRAY` type may not be supported on all + PostgreSQL DBAPIs; it is currently known to work on psycopg2 only. + + Additionally, the :class:`.postgresql.ARRAY` type does not work directly in + conjunction with the :class:`.ENUM` type. For a workaround, see the + special type at :ref:`postgresql_array_of_enum`. + + .. seealso:: + + :class:`.types.ARRAY` - base array type + + :class:`.postgresql.array` - produces a literal array value. + + """ + + class Comparator(sqltypes.ARRAY.Comparator): + + """Define comparison operations for :class:`.ARRAY`. + + Note that these operations are in addition to those provided + by the base :class:`.types.ARRAY.Comparator` class, including + :meth:`.types.ARRAY.Comparator.any` and + :meth:`.types.ARRAY.Comparator.all`. + + """ + + def contains(self, other, **kwargs): + """Boolean expression. Test if elements are a superset of the + elements of the argument array expression. + """ + return self.operate(CONTAINS, other, result_type=sqltypes.Boolean) + + def contained_by(self, other): + """Boolean expression. Test if elements are a proper subset of the + elements of the argument array expression. + """ + return self.operate( + CONTAINED_BY, other, result_type=sqltypes.Boolean) + + def overlap(self, other): + """Boolean expression. Test if array has elements in common with + an argument array expression. + """ + return self.operate(OVERLAP, other, result_type=sqltypes.Boolean) + + comparator_factory = Comparator + + def __init__(self, item_type, as_tuple=False, dimensions=None, + zero_indexes=False): + """Construct an ARRAY. + + E.g.:: + + Column('myarray', ARRAY(Integer)) + + Arguments are: + + :param item_type: The data type of items of this array. Note that + dimensionality is irrelevant here, so multi-dimensional arrays like + ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as + ``ARRAY(ARRAY(Integer))`` or such. + + :param as_tuple=False: Specify whether return results + should be converted to tuples from lists. DBAPIs such + as psycopg2 return lists by default. When tuples are + returned, the results are hashable. + + :param dimensions: if non-None, the ARRAY will assume a fixed + number of dimensions. This will cause the DDL emitted for this + ARRAY to include the exact number of bracket clauses ``[]``, + and will also optimize the performance of the type overall. + Note that PG arrays are always implicitly "non-dimensioned", + meaning they can store any number of dimensions no matter how + they were declared. + + :param zero_indexes=False: when True, index values will be converted + between Python zero-based and PostgreSQL one-based indexes, e.g. + a value of one will be added to all index values before passing + to the database. + + .. versionadded:: 0.9.5 + + + """ + if isinstance(item_type, ARRAY): + raise ValueError("Do not nest ARRAY types; ARRAY(basetype) " + "handles multi-dimensional arrays of basetype") + if isinstance(item_type, type): + item_type = item_type() + self.item_type = item_type + self.as_tuple = as_tuple + self.dimensions = dimensions + self.zero_indexes = zero_indexes + + @property + def hashable(self): + return self.as_tuple + + @property + def python_type(self): + return list + + def compare_values(self, x, y): + return x == y + + def _set_parent(self, column): + """Support SchemaEventTarget""" + + if isinstance(self.item_type, SchemaEventTarget): + self.item_type._set_parent(column) + + def _set_parent_with_dispatch(self, parent): + """Support SchemaEventTarget""" + + if isinstance(self.item_type, SchemaEventTarget): + self.item_type._set_parent_with_dispatch(parent) + + def _proc_array(self, arr, itemproc, dim, collection): + if dim is None: + arr = list(arr) + if dim == 1 or dim is None and ( + # this has to be (list, tuple), or at least + # not hasattr('__iter__'), since Py3K strings + # etc. have __iter__ + not arr or not isinstance(arr[0], (list, tuple))): + if itemproc: + return collection(itemproc(x) for x in arr) + else: + return collection(arr) + else: + return collection( + self._proc_array( + x, itemproc, + dim - 1 if dim is not None else None, + collection) + for x in arr + ) + + def bind_processor(self, dialect): + item_proc = self.item_type.dialect_impl(dialect).\ + bind_processor(dialect) + + def process(value): + if value is None: + return value + else: + return self._proc_array( + value, + item_proc, + self.dimensions, + list) + return process + + def result_processor(self, dialect, coltype): + item_proc = self.item_type.dialect_impl(dialect).\ + result_processor(dialect, coltype) + + def process(value): + if value is None: + return value + else: + return self._proc_array( + value, + item_proc, + self.dimensions, + tuple if self.as_tuple else list) + return process + +ischema_names['_array'] = ARRAY diff --git a/app/lib/sqlalchemy/dialects/postgresql/base.py b/app/lib/sqlalchemy/dialects/postgresql/base.py new file mode 100644 index 0000000..26d974e --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/base.py @@ -0,0 +1,2989 @@ +# postgresql/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r""" +.. dialect:: postgresql + :name: PostgreSQL + +.. _postgresql_sequences: + +Sequences/SERIAL +---------------- + +PostgreSQL supports sequences, and SQLAlchemy uses these as the default means +of creating new primary key values for integer-based primary key columns. When +creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for +integer-based primary key columns, which generates a sequence and server side +default corresponding to the column. + +To specify a specific named sequence to be used for primary key generation, +use the :func:`~sqlalchemy.schema.Sequence` construct:: + + Table('sometable', metadata, + Column('id', Integer, Sequence('some_id_seq'), primary_key=True) + ) + +When SQLAlchemy issues a single INSERT statement, to fulfill the contract of +having the "last insert identifier" available, a RETURNING clause is added to +the INSERT statement which specifies the primary key columns should be +returned after the statement completes. The RETURNING functionality only takes +place if PostgreSQL 8.2 or later is in use. As a fallback approach, the +sequence, whether specified explicitly or implicitly via ``SERIAL``, is +executed independently beforehand, the returned value to be used in the +subsequent insert. Note that when an +:func:`~sqlalchemy.sql.expression.insert()` construct is executed using +"executemany" semantics, the "last inserted identifier" functionality does not +apply; no RETURNING clause is emitted nor is the sequence pre-executed in this +case. + +To force the usage of RETURNING by default off, specify the flag +``implicit_returning=False`` to :func:`.create_engine`. + +.. _postgresql_isolation_level: + +Transaction Isolation Level +--------------------------- + +All PostgreSQL dialects support setting of transaction isolation level +both via a dialect-specific parameter +:paramref:`.create_engine.isolation_level` accepted by :func:`.create_engine`, +as well as the :paramref:`.Connection.execution_options.isolation_level` +argument as passed to :meth:`.Connection.execution_options`. +When using a non-psycopg2 dialect, this feature works by issuing the command +``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL `` for +each new connection. For the special AUTOCOMMIT isolation level, +DBAPI-specific techniques are used. + +To set isolation level using :func:`.create_engine`:: + + engine = create_engine( + "postgresql+pg8000://scott:tiger@localhost/test", + isolation_level="READ UNCOMMITTED" + ) + +To set using per-connection execution options:: + + connection = engine.connect() + connection = connection.execution_options( + isolation_level="READ COMMITTED" + ) + +Valid values for ``isolation_level`` include: + +* ``READ COMMITTED`` +* ``READ UNCOMMITTED`` +* ``REPEATABLE READ`` +* ``SERIALIZABLE`` +* ``AUTOCOMMIT`` - on psycopg2 / pg8000 only + +.. seealso:: + + :ref:`psycopg2_isolation_level` + + :ref:`pg8000_isolation_level` + +.. _postgresql_schema_reflection: + +Remote-Schema Table Introspection and PostgreSQL search_path +------------------------------------------------------------ + +The PostgreSQL dialect can reflect tables from any schema. The +:paramref:`.Table.schema` argument, or alternatively the +:paramref:`.MetaData.reflect.schema` argument determines which schema will +be searched for the table or tables. The reflected :class:`.Table` objects +will in all cases retain this ``.schema`` attribute as was specified. +However, with regards to tables which these :class:`.Table` objects refer to +via foreign key constraint, a decision must be made as to how the ``.schema`` +is represented in those remote tables, in the case where that remote +schema name is also a member of the current +`PostgreSQL search path +`_. + +By default, the PostgreSQL dialect mimics the behavior encouraged by +PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function +returns a sample definition for a particular foreign key constraint, +omitting the referenced schema name from that definition when the name is +also in the PostgreSQL schema search path. The interaction below +illustrates this behavior:: + + test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY); + CREATE TABLE + test=> CREATE TABLE referring( + test(> id INTEGER PRIMARY KEY, + test(> referred_id INTEGER REFERENCES test_schema.referred(id)); + CREATE TABLE + test=> SET search_path TO public, test_schema; + test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM + test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n + test-> ON n.oid = c.relnamespace + test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid + test-> WHERE c.relname='referring' AND r.contype = 'f' + test-> ; + pg_get_constraintdef + --------------------------------------------------- + FOREIGN KEY (referred_id) REFERENCES referred(id) + (1 row) + +Above, we created a table ``referred`` as a member of the remote schema +``test_schema``, however when we added ``test_schema`` to the +PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the +``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of +the function. + +On the other hand, if we set the search path back to the typical default +of ``public``:: + + test=> SET search_path TO public; + SET + +The same query against ``pg_get_constraintdef()`` now returns the fully +schema-qualified name for us:: + + test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM + test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n + test-> ON n.oid = c.relnamespace + test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid + test-> WHERE c.relname='referring' AND r.contype = 'f'; + pg_get_constraintdef + --------------------------------------------------------------- + FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id) + (1 row) + +SQLAlchemy will by default use the return value of ``pg_get_constraintdef()`` +in order to determine the remote schema name. That is, if our ``search_path`` +were set to include ``test_schema``, and we invoked a table +reflection process as follows:: + + >>> from sqlalchemy import Table, MetaData, create_engine + >>> engine = create_engine("postgresql://scott:tiger@localhost/test") + >>> with engine.connect() as conn: + ... conn.execute("SET search_path TO test_schema, public") + ... meta = MetaData() + ... referring = Table('referring', meta, + ... autoload=True, autoload_with=conn) + ... + + +The above process would deliver to the :attr:`.MetaData.tables` collection +``referred`` table named **without** the schema:: + + >>> meta.tables['referred'].schema is None + True + +To alter the behavior of reflection such that the referred schema is +maintained regardless of the ``search_path`` setting, use the +``postgresql_ignore_search_path`` option, which can be specified as a +dialect-specific argument to both :class:`.Table` as well as +:meth:`.MetaData.reflect`:: + + >>> with engine.connect() as conn: + ... conn.execute("SET search_path TO test_schema, public") + ... meta = MetaData() + ... referring = Table('referring', meta, autoload=True, + ... autoload_with=conn, + ... postgresql_ignore_search_path=True) + ... + + +We will now have ``test_schema.referred`` stored as schema-qualified:: + + >>> meta.tables['test_schema.referred'].schema + 'test_schema' + +.. sidebar:: Best Practices for PostgreSQL Schema reflection + + The description of PostgreSQL schema reflection behavior is complex, and + is the product of many years of dealing with widely varied use cases and + user preferences. But in fact, there's no need to understand any of it if + you just stick to the simplest use pattern: leave the ``search_path`` set + to its default of ``public`` only, never refer to the name ``public`` as + an explicit schema name otherwise, and refer to all other schema names + explicitly when building up a :class:`.Table` object. The options + described here are only for those users who can't, or prefer not to, stay + within these guidelines. + +Note that **in all cases**, the "default" schema is always reflected as +``None``. The "default" schema on PostgreSQL is that which is returned by the +PostgreSQL ``current_schema()`` function. On a typical PostgreSQL +installation, this is the name ``public``. So a table that refers to another +which is in the ``public`` (i.e. default) schema will always have the +``.schema`` attribute set to ``None``. + +.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` + dialect-level option accepted by :class:`.Table` and + :meth:`.MetaData.reflect`. + + +.. seealso:: + + `The Schema Search Path + `_ + - on the PostgreSQL website. + +INSERT/UPDATE...RETURNING +------------------------- + +The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and +``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default +for single-row INSERT statements in order to fetch newly generated +primary key identifiers. To specify an explicit ``RETURNING`` clause, +use the :meth:`._UpdateBase.returning` method on a per-statement basis:: + + # INSERT..RETURNING + result = table.insert().returning(table.c.col1, table.c.col2).\ + values(name='foo') + print result.fetchall() + + # UPDATE..RETURNING + result = table.update().returning(table.c.col1, table.c.col2).\ + where(table.c.name=='foo').values(name='bar') + print result.fetchall() + + # DELETE..RETURNING + result = table.delete().returning(table.c.col1, table.c.col2).\ + where(table.c.name=='foo') + print result.fetchall() + +.. _postgresql_insert_on_conflict: + +INSERT...ON CONFLICT (Upsert) +------------------------------ + +Starting with version 9.5, PostgreSQL allows "upserts" (update or insert) +of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement. +A candidate row will only be inserted if that row does not violate +any unique constraints. In the case of a unique constraint violation, +a secondary action can occur which can be either "DO UPDATE", indicating +that the data in the target row should be updated, or "DO NOTHING", +which indicates to silently skip this row. + +Conflicts are determined using existing unique constraints and indexes. These +constraints may be identified either using their name as stated in DDL, +or they may be *inferred* by stating the columns and conditions that comprise +the indexes. + +SQLAlchemy provides ``ON CONFLICT`` support via the PostgreSQL-specific +:func:`.postgresql.dml.insert()` function, which provides +the generative methods :meth:`~.postgresql.dml.Insert.on_conflict_do_update` +and :meth:`~.postgresql.dml.Insert.on_conflict_do_nothing`:: + + from sqlalchemy.dialects.postgresql import insert + + insert_stmt = insert(my_table).values( + id='some_existing_id', + data='inserted value') + + do_nothing_stmt = insert_stmt.on_conflict_do_nothing( + index_elements=['id'] + ) + + conn.execute(do_nothing_stmt) + + do_update_stmt = insert_stmt.on_conflict_do_update( + constraint='pk_my_table', + set_=dict(data='updated value') + ) + + conn.execute(do_update_stmt) + +Both methods supply the "target" of the conflict using either the +named constraint or by column inference: + +* The :paramref:`.Insert.on_conflict_do_update.index_elements` argument + specifies a sequence containing string column names, :class:`.Column` objects, + and/or SQL expression elements, which would identify a unique index:: + + do_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=['id'], + set_=dict(data='updated value') + ) + + do_update_stmt = insert_stmt.on_conflict_do_update( + index_elements=[my_table.c.id], + set_=dict(data='updated value') + ) + +* When using :paramref:`.Insert.on_conflict_do_update.index_elements` to + infer an index, a partial index can be inferred by also specifying the + use the :paramref:`.Insert.on_conflict_do_update.index_where` parameter:: + + from sqlalchemy.dialects.postgresql import insert + + stmt = insert(my_table).values(user_email='a@b.com', data='inserted data') + stmt = stmt.on_conflict_do_update( + index_elements=[my_table.c.user_email], + index_where=my_table.c.user_email.like('%@gmail.com'), + set_=dict(data=stmt.excluded.data) + ) + conn.execute(stmt) + + +* The :paramref:`.Insert.on_conflict_do_update.constraint` argument is + used to specify an index directly rather than inferring it. This can be + the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX:: + + do_update_stmt = insert_stmt.on_conflict_do_update( + constraint='my_table_idx_1', + set_=dict(data='updated value') + ) + + do_update_stmt = insert_stmt.on_conflict_do_update( + constraint='my_table_pk', + set_=dict(data='updated value') + ) + +* The :paramref:`.Insert.on_conflict_do_update.constraint` argument may + also refer to a SQLAlchemy construct representing a constraint, + e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`, + :class:`.Index`, or :class:`.ExcludeConstraint`. In this use, + if the constraint has a name, it is used directly. Otherwise, if the + constraint is unnamed, then inference will be used, where the expressions + and optional WHERE clause of the constraint will be spelled out in the + construct. This use is especially convenient + to refer to the named or unnamed primary key of a :class:`.Table` using the + :attr:`.Table.primary_key` attribute:: + + do_update_stmt = insert_stmt.on_conflict_do_update( + constraint=my_table.primary_key, + set_=dict(data='updated value') + ) + +``ON CONFLICT...DO UPDATE`` is used to perform an update of the already +existing row, using any combination of new values as well as values +from the proposed insertion. These values are specified using the +:paramref:`.Insert.on_conflict_do_update.set_` parameter. This +parameter accepts a dictionary which consists of direct values +for UPDATE:: + + from sqlalchemy.dialects.postgresql import insert + + stmt = insert(my_table).values(id='some_id', data='inserted value') + do_update_stmt = stmt.on_conflict_do_update( + index_elements=['id'], + set_=dict(data='updated value') + ) + conn.execute(do_update_stmt) + +.. warning:: + + The :meth:`.Insert.on_conflict_do_update` method does **not** take into + account Python-side default UPDATE values or generation functions, e.g. + e.g. those specified using :paramref:`.Column.onupdate`. + These values will not be exercised for an ON CONFLICT style of UPDATE, + unless they are manually specified in the + :paramref:`.Insert.on_conflict_do_update.set_` dictionary. + +In order to refer to the proposed insertion row, the special alias +:attr:`~.postgresql.dml.Insert.excluded` is available as an attribute on +the :class:`.postgresql.dml.Insert` object; this object is a +:class:`.ColumnCollection` which alias contains all columns of the target +table:: + + from sqlalchemy.dialects.postgresql import insert + + stmt = insert(my_table).values( + id='some_id', + data='inserted value', + author='jlh') + do_update_stmt = stmt.on_conflict_do_update( + index_elements=['id'], + set_=dict(data='updated value', author=stmt.excluded.author) + ) + conn.execute(do_update_stmt) + +The :meth:`.Insert.on_conflict_do_update` method also accepts +a WHERE clause using the :paramref:`.Insert.on_conflict_do_update.where` +parameter, which will limit those rows which receive an UPDATE:: + + from sqlalchemy.dialects.postgresql import insert + + stmt = insert(my_table).values( + id='some_id', + data='inserted value', + author='jlh') + on_update_stmt = stmt.on_conflict_do_update( + index_elements=['id'], + set_=dict(data='updated value', author=stmt.excluded.author) + where=(my_table.c.status == 2) + ) + conn.execute(on_update_stmt) + +``ON CONFLICT`` may also be used to skip inserting a row entirely +if any conflict with a unique or exclusion constraint occurs; below +this is illustrated using the +:meth:`~.postgresql.dml.Insert.on_conflict_do_nothing` method:: + + from sqlalchemy.dialects.postgresql import insert + + stmt = insert(my_table).values(id='some_id', data='inserted value') + stmt = stmt.on_conflict_do_nothing(index_elements=['id']) + conn.execute(stmt) + +If ``DO NOTHING`` is used without specifying any columns or constraint, +it has the effect of skipping the INSERT for any unique or exclusion +constraint violation which occurs:: + + from sqlalchemy.dialects.postgresql import insert + + stmt = insert(my_table).values(id='some_id', data='inserted value') + stmt = stmt.on_conflict_do_nothing() + conn.execute(stmt) + +.. versionadded:: 1.1 Added support for PostgreSQL ON CONFLICT clauses + +.. seealso:: + + `INSERT .. ON CONFLICT `_ - in the PostgreSQL documentation. + +.. _postgresql_match: + +Full Text Search +---------------- + +SQLAlchemy makes available the PostgreSQL ``@@`` operator via the +:meth:`.ColumnElement.match` method on any textual column expression. +On a PostgreSQL dialect, an expression like the following:: + + select([sometable.c.text.match("search string")]) + +will emit to the database:: + + SELECT text @@ to_tsquery('search string') FROM table + +The PostgreSQL text search functions such as ``to_tsquery()`` +and ``to_tsvector()`` are available +explicitly using the standard :data:`.func` construct. For example:: + + select([ + func.to_tsvector('fat cats ate rats').match('cat & rat') + ]) + +Emits the equivalent of:: + + SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') + +The :class:`.postgresql.TSVECTOR` type can provide for explicit CAST:: + + from sqlalchemy.dialects.postgresql import TSVECTOR + from sqlalchemy import select, cast + select([cast("some text", TSVECTOR)]) + +produces a statement equivalent to:: + + SELECT CAST('some text' AS TSVECTOR) AS anon_1 + +Full Text Searches in PostgreSQL are influenced by a combination of: the +PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used +to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in +during a query. + +When performing a Full Text Search against a column that has a GIN or +GiST index that is already pre-computed (which is common on full text +searches) one may need to explicitly pass in a particular PostgresSQL +``regconfig`` value to ensure the query-planner utilizes the index and does +not re-compute the column on demand. + +In order to provide for this explicit query planning, or to use different +search strategies, the ``match`` method accepts a ``postgresql_regconfig`` +keyword argument:: + + select([mytable.c.id]).where( + mytable.c.title.match('somestring', postgresql_regconfig='english') + ) + +Emits the equivalent of:: + + SELECT mytable.id FROM mytable + WHERE mytable.title @@ to_tsquery('english', 'somestring') + +One can also specifically pass in a `'regconfig'` value to the +``to_tsvector()`` command as the initial argument:: + + select([mytable.c.id]).where( + func.to_tsvector('english', mytable.c.title )\ + .match('somestring', postgresql_regconfig='english') + ) + +produces a statement equivalent to:: + + SELECT mytable.id FROM mytable + WHERE to_tsvector('english', mytable.title) @@ + to_tsquery('english', 'somestring') + +It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from +PostgresSQL to ensure that you are generating queries with SQLAlchemy that +take full advantage of any indexes you may have created for full text search. + +FROM ONLY ... +------------------------ + +The dialect supports PostgreSQL's ONLY keyword for targeting only a particular +table in an inheritance hierarchy. This can be used to produce the +``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...`` +syntaxes. It uses SQLAlchemy's hints mechanism:: + + # SELECT ... FROM ONLY ... + result = table.select().with_hint(table, 'ONLY', 'postgresql') + print result.fetchall() + + # UPDATE ONLY ... + table.update(values=dict(foo='bar')).with_hint('ONLY', + dialect_name='postgresql') + + # DELETE FROM ONLY ... + table.delete().with_hint('ONLY', dialect_name='postgresql') + + +.. _postgresql_indexes: + +PostgreSQL-Specific Index Options +--------------------------------- + +Several extensions to the :class:`.Index` construct are available, specific +to the PostgreSQL dialect. + +.. _postgresql_partial_indexes: + +Partial Indexes +^^^^^^^^^^^^^^^^ + +Partial indexes add criterion to the index definition so that the index is +applied to a subset of rows. These can be specified on :class:`.Index` +using the ``postgresql_where`` keyword argument:: + + Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10) + +Operator Classes +^^^^^^^^^^^^^^^^^ + +PostgreSQL allows the specification of an *operator class* for each column of +an index (see +http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). +The :class:`.Index` construct allows these to be specified via the +``postgresql_ops`` keyword argument:: + + Index('my_index', my_table.c.id, my_table.c.data, + postgresql_ops={ + 'data': 'text_pattern_ops', + 'id': 'int4_ops' + }) + +.. versionadded:: 0.7.2 + ``postgresql_ops`` keyword argument to :class:`.Index` construct. + +Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of +the :class:`.Column`, i.e. the name used to access it from the ``.c`` +collection of :class:`.Table`, which can be configured to be different than +the actual name of the column as expressed in the database. + +Index Types +^^^^^^^^^^^^ + +PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well +as the ability for users to create their own (see +http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be +specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: + + Index('my_index', my_table.c.data, postgresql_using='gin') + +The value passed to the keyword argument will be simply passed through to the +underlying CREATE INDEX command, so it *must* be a valid index type for your +version of PostgreSQL. + +.. _postgresql_index_storage: + +Index Storage Parameters +^^^^^^^^^^^^^^^^^^^^^^^^ + +PostgreSQL allows storage parameters to be set on indexes. The storage +parameters available depend on the index method used by the index. Storage +parameters can be specified on :class:`.Index` using the ``postgresql_with`` +keyword argument:: + + Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50}) + +.. versionadded:: 1.0.6 + +PostgreSQL allows to define the tablespace in which to create the index. +The tablespace can be specified on :class:`.Index` using the +``postgresql_tablespace`` keyword argument:: + + Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace') + +.. versionadded:: 1.1 + +Note that the same option is available on :class:`.Table` as well. + +.. _postgresql_index_concurrently: + +Indexes with CONCURRENTLY +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The PostgreSQL index option CONCURRENTLY is supported by passing the +flag ``postgresql_concurrently`` to the :class:`.Index` construct:: + + tbl = Table('testtbl', m, Column('data', Integer)) + + idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True) + +The above index construct will render DDL for CREATE INDEX, assuming +PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as:: + + CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data) + +For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for +a connection-less dialect, it will emit:: + + DROP INDEX CONCURRENTLY test_idx1 + +.. versionadded:: 1.1 support for CONCURRENTLY on DROP INDEX. The + CONCURRENTLY keyword is now only emitted if a high enough version + of PostgreSQL is detected on the connection (or for a connection-less + dialect). + +When using CONCURRENTLY, the Postgresql database requires that the statement +be invoked outside of a transaction block. The Python DBAPI enforces that +even for a single statement, a transaction is present, so to use this +construct, the DBAPI's "autocommit" mode must be used:: + + metadata = MetaData() + table = Table( + "foo", metadata, + Column("id", String)) + index = Index( + "foo_idx", table.c.id, postgresql_concurrently=True) + + with engine.connect() as conn: + with conn.execution_options(isolation_level='AUTOCOMMIT'): + table.create(conn) + +.. seealso:: + + :ref:`postgresql_isolation_level` + +.. _postgresql_index_reflection: + +PostgreSQL Index Reflection +--------------------------- + +The PostgreSQL database creates a UNIQUE INDEX implicitly whenever the +UNIQUE CONSTRAINT construct is used. When inspecting a table using +:class:`.Inspector`, the :meth:`.Inspector.get_indexes` +and the :meth:`.Inspector.get_unique_constraints` will report on these +two constructs distinctly; in the case of the index, the key +``duplicates_constraint`` will be present in the index entry if it is +detected as mirroring a constraint. When performing reflection using +``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned +in :attr:`.Table.indexes` when it is detected as mirroring a +:class:`.UniqueConstraint` in the :attr:`.Table.constraints` collection. + +.. versionchanged:: 1.0.0 - :class:`.Table` reflection now includes + :class:`.UniqueConstraint` objects present in the :attr:`.Table.constraints` + collection; the PostgreSQL backend will no longer include a "mirrored" + :class:`.Index` construct in :attr:`.Table.indexes` if it is detected + as corresponding to a unique constraint. + +Special Reflection Options +-------------------------- + +The :class:`.Inspector` used for the PostgreSQL backend is an instance +of :class:`.PGInspector`, which offers additional methods:: + + from sqlalchemy import create_engine, inspect + + engine = create_engine("postgresql+psycopg2://localhost/test") + insp = inspect(engine) # will be a PGInspector + + print(insp.get_enums()) + +.. autoclass:: PGInspector + :members: + +.. _postgresql_table_options: + +PostgreSQL Table Options +------------------------- + +Several options for CREATE TABLE are supported directly by the PostgreSQL +dialect in conjunction with the :class:`.Table` construct: + +* ``TABLESPACE``:: + + Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace') + + The above option is also available on the :class:`.Index` construct. + +* ``ON COMMIT``:: + + Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS') + +* ``WITH OIDS``:: + + Table("some_table", metadata, ..., postgresql_with_oids=True) + +* ``WITHOUT OIDS``:: + + Table("some_table", metadata, ..., postgresql_with_oids=False) + +* ``INHERITS``:: + + Table("some_table", metadata, ..., postgresql_inherits="some_supertable") + + Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...)) + +.. versionadded:: 1.0.0 + +.. seealso:: + + `PostgreSQL CREATE TABLE options + `_ + +ARRAY Types +----------- + +The PostgreSQL dialect supports arrays, both as multidimensional column types +as well as array literals: + +* :class:`.postgresql.ARRAY` - ARRAY datatype + +* :class:`.postgresql.array` - array literal + +* :func:`.postgresql.array_agg` - ARRAY_AGG SQL function + +* :class:`.postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate + function syntax. + +JSON Types +---------- + +The PostgreSQL dialect supports both JSON and JSONB datatypes, including +psycopg2's native support and support for all of PostgreSQL's special +operators: + +* :class:`.postgresql.JSON` + +* :class:`.postgresql.JSONB` + +HSTORE Type +----------- + +The PostgreSQL HSTORE type as well as hstore literals are supported: + +* :class:`.postgresql.HSTORE` - HSTORE datatype + +* :class:`.postgresql.hstore` - hstore literal + +ENUM Types +---------- + +PostgreSQL has an independently creatable TYPE structure which is used +to implement an enumerated type. This approach introduces significant +complexity on the SQLAlchemy side in terms of when this type should be +CREATED and DROPPED. The type object is also an independently reflectable +entity. The following sections should be consulted: + +* :class:`.postgresql.ENUM` - DDL and typing support for ENUM. + +* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types + +* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual + CREATE and DROP commands for ENUM. + +.. _postgresql_array_of_enum: + +Using ENUM with ARRAY +^^^^^^^^^^^^^^^^^^^^^ + +The combination of ENUM and ARRAY is not directly supported by backend +DBAPIs at this time. In order to send and receive an ARRAY of ENUM, +use the following workaround type:: + + class ArrayOfEnum(ARRAY): + + def bind_expression(self, bindvalue): + return sa.cast(bindvalue, self) + + def result_processor(self, dialect, coltype): + super_rp = super(ArrayOfEnum, self).result_processor( + dialect, coltype) + + def handle_raw_string(value): + inner = re.match(r"^{(.*)}$", value).group(1) + return inner.split(",") if inner else [] + + def process(value): + if value is None: + return None + return super_rp(handle_raw_string(value)) + return process + +E.g.:: + + Table( + 'mydata', metadata, + Column('id', Integer, primary_key=True), + Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum'))) + + ) + +This type is not included as a built-in type as it would be incompatible +with a DBAPI that suddenly decides to support ARRAY of ENUM directly in +a new version. + +.. _postgresql_array_of_json: + +Using JSON/JSONB with ARRAY +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Similar to using ENUM, for an ARRAY of JSON/JSONB we need to render the +appropriate CAST, however current psycopg2 drivers seem to handle the result +for ARRAY of JSON automatically, so the type is simpler:: + + + class CastingArray(ARRAY): + def bind_expression(self, bindvalue): + return sa.cast(bindvalue, self) + +E.g.:: + + Table( + 'mydata', metadata, + Column('id', Integer, primary_key=True), + Column('data', CastingArray(JSONB)) + ) + + +""" +from collections import defaultdict +import re +import datetime as dt + + +from sqlalchemy.sql import elements +from ... import sql, schema, exc, util +from ...engine import default, reflection +from ...sql import compiler, expression +from ... import types as sqltypes + +try: + from uuid import UUID as _python_UUID +except ImportError: + _python_UUID = None + +from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \ + CHAR, TEXT, FLOAT, NUMERIC, \ + DATE, BOOLEAN, REAL + +AUTOCOMMIT_REGEXP = re.compile( + r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|' + 'IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW)', + re.I | re.UNICODE) + +RESERVED_WORDS = set( + ["all", "analyse", "analyze", "and", "any", "array", "as", "asc", + "asymmetric", "both", "case", "cast", "check", "collate", "column", + "constraint", "create", "current_catalog", "current_date", + "current_role", "current_time", "current_timestamp", "current_user", + "default", "deferrable", "desc", "distinct", "do", "else", "end", + "except", "false", "fetch", "for", "foreign", "from", "grant", "group", + "having", "in", "initially", "intersect", "into", "leading", "limit", + "localtime", "localtimestamp", "new", "not", "null", "of", "off", + "offset", "old", "on", "only", "or", "order", "placing", "primary", + "references", "returning", "select", "session_user", "some", "symmetric", + "table", "then", "to", "trailing", "true", "union", "unique", "user", + "using", "variadic", "when", "where", "window", "with", "authorization", + "between", "binary", "cross", "current_schema", "freeze", "full", + "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", + "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" + ]) + +_DECIMAL_TYPES = (1231, 1700) +_FLOAT_TYPES = (700, 701, 1021, 1022) +_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016) + +class BYTEA(sqltypes.LargeBinary): + __visit_name__ = 'BYTEA' + + +class DOUBLE_PRECISION(sqltypes.Float): + __visit_name__ = 'DOUBLE_PRECISION' + + +class INET(sqltypes.TypeEngine): + __visit_name__ = "INET" +PGInet = INET + + +class CIDR(sqltypes.TypeEngine): + __visit_name__ = "CIDR" +PGCidr = CIDR + + +class MACADDR(sqltypes.TypeEngine): + __visit_name__ = "MACADDR" +PGMacAddr = MACADDR + + +class OID(sqltypes.TypeEngine): + + """Provide the PostgreSQL OID type. + + .. versionadded:: 0.9.5 + + """ + __visit_name__ = "OID" + + +class TIMESTAMP(sqltypes.TIMESTAMP): + + def __init__(self, timezone=False, precision=None): + super(TIMESTAMP, self).__init__(timezone=timezone) + self.precision = precision + + +class TIME(sqltypes.TIME): + + def __init__(self, timezone=False, precision=None): + super(TIME, self).__init__(timezone=timezone) + self.precision = precision + + +class INTERVAL(sqltypes.TypeEngine): + + """PostgreSQL INTERVAL type. + + The INTERVAL type may not be supported on all DBAPIs. + It is known to work on psycopg2 and not pg8000 or zxjdbc. + + """ + __visit_name__ = 'INTERVAL' + + def __init__(self, precision=None): + self.precision = precision + + @classmethod + def _adapt_from_generic_interval(cls, interval): + return INTERVAL(precision=interval.second_precision) + + @property + def _type_affinity(self): + return sqltypes.Interval + + @property + def python_type(self): + return dt.timedelta + +PGInterval = INTERVAL + + +class BIT(sqltypes.TypeEngine): + __visit_name__ = 'BIT' + + def __init__(self, length=None, varying=False): + if not varying: + # BIT without VARYING defaults to length 1 + self.length = length or 1 + else: + # but BIT VARYING can be unlimited-length, so no default + self.length = length + self.varying = varying + +PGBit = BIT + + +class UUID(sqltypes.TypeEngine): + + """PostgreSQL UUID type. + + Represents the UUID column type, interpreting + data either as natively returned by the DBAPI + or as Python uuid objects. + + The UUID type may not be supported on all DBAPIs. + It is known to work on psycopg2 and not pg8000. + + """ + __visit_name__ = 'UUID' + + def __init__(self, as_uuid=False): + """Construct a UUID type. + + + :param as_uuid=False: if True, values will be interpreted + as Python uuid objects, converting to/from string via the + DBAPI. + + """ + if as_uuid and _python_UUID is None: + raise NotImplementedError( + "This version of Python does not support " + "the native UUID type." + ) + self.as_uuid = as_uuid + + def bind_processor(self, dialect): + if self.as_uuid: + def process(value): + if value is not None: + value = util.text_type(value) + return value + return process + else: + return None + + def result_processor(self, dialect, coltype): + if self.as_uuid: + def process(value): + if value is not None: + value = _python_UUID(value) + return value + return process + else: + return None + +PGUuid = UUID + + +class TSVECTOR(sqltypes.TypeEngine): + + """The :class:`.postgresql.TSVECTOR` type implements the PostgreSQL + text search type TSVECTOR. + + It can be used to do full text queries on natural language + documents. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :ref:`postgresql_match` + + """ + __visit_name__ = 'TSVECTOR' + + +class ENUM(sqltypes.Enum): + + """PostgreSQL ENUM type. + + This is a subclass of :class:`.types.Enum` which includes + support for PG's ``CREATE TYPE`` and ``DROP TYPE``. + + When the builtin type :class:`.types.Enum` is used and the + :paramref:`.Enum.native_enum` flag is left at its default of + True, the PostgreSQL backend will use a :class:`.postgresql.ENUM` + type as the implementation, so the special create/drop rules + will be used. + + The create/drop behavior of ENUM is necessarily intricate, due to the + awkward relationship the ENUM type has in relationship to the + parent table, in that it may be "owned" by just a single table, or + may be shared among many tables. + + When using :class:`.types.Enum` or :class:`.postgresql.ENUM` + in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted + corresponding to when the :meth:`.Table.create` and :meth:`.Table.drop` + methods are called:: + + table = Table('sometable', metadata, + Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) + ) + + table.create(engine) # will emit CREATE ENUM and CREATE TABLE + table.drop(engine) # will emit DROP TABLE and DROP ENUM + + To use a common enumerated type between multiple tables, the best + practice is to declare the :class:`.types.Enum` or + :class:`.postgresql.ENUM` independently, and associate it with the + :class:`.MetaData` object itself:: + + my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) + + t1 = Table('sometable_one', metadata, + Column('some_enum', myenum) + ) + + t2 = Table('sometable_two', metadata, + Column('some_enum', myenum) + ) + + When this pattern is used, care must still be taken at the level + of individual table creates. Emitting CREATE TABLE without also + specifying ``checkfirst=True`` will still cause issues:: + + t1.create(engine) # will fail: no such type 'myenum' + + If we specify ``checkfirst=True``, the individual table-level create + operation will check for the ``ENUM`` and create if not exists:: + + # will check if enum exists, and emit CREATE TYPE if not + t1.create(engine, checkfirst=True) + + When using a metadata-level ENUM type, the type will always be created + and dropped if either the metadata-wide create/drop is called:: + + metadata.create_all(engine) # will emit CREATE TYPE + metadata.drop_all(engine) # will emit DROP TYPE + + The type can also be created and dropped directly:: + + my_enum.create(engine) + my_enum.drop(engine) + + .. versionchanged:: 1.0.0 The PostgreSQL :class:`.postgresql.ENUM` type + now behaves more strictly with regards to CREATE/DROP. A metadata-level + ENUM type will only be created and dropped at the metadata level, + not the table level, with the exception of + ``table.create(checkfirst=True)``. + The ``table.drop()`` call will now emit a DROP TYPE for a table-level + enumerated type. + + """ + + def __init__(self, *enums, **kw): + """Construct an :class:`~.postgresql.ENUM`. + + Arguments are the same as that of + :class:`.types.Enum`, but also including + the following parameters. + + :param create_type: Defaults to True. + Indicates that ``CREATE TYPE`` should be + emitted, after optionally checking for the + presence of the type, when the parent + table is being created; and additionally + that ``DROP TYPE`` is called when the table + is dropped. When ``False``, no check + will be performed and no ``CREATE TYPE`` + or ``DROP TYPE`` is emitted, unless + :meth:`~.postgresql.ENUM.create` + or :meth:`~.postgresql.ENUM.drop` + are called directly. + Setting to ``False`` is helpful + when invoking a creation scheme to a SQL file + without access to the actual database - + the :meth:`~.postgresql.ENUM.create` and + :meth:`~.postgresql.ENUM.drop` methods can + be used to emit SQL to a target bind. + + .. versionadded:: 0.7.4 + + """ + self.create_type = kw.pop("create_type", True) + super(ENUM, self).__init__(*enums, **kw) + + def create(self, bind=None, checkfirst=True): + """Emit ``CREATE TYPE`` for this + :class:`~.postgresql.ENUM`. + + If the underlying dialect does not support + PostgreSQL CREATE TYPE, no action is taken. + + :param bind: a connectable :class:`.Engine`, + :class:`.Connection`, or similar object to emit + SQL. + :param checkfirst: if ``True``, a query against + the PG catalog will be first performed to see + if the type does not exist already before + creating. + + """ + if not bind.dialect.supports_native_enum: + return + + if not checkfirst or \ + not bind.dialect.has_type( + bind, self.name, schema=self.schema): + bind.execute(CreateEnumType(self)) + + def drop(self, bind=None, checkfirst=True): + """Emit ``DROP TYPE`` for this + :class:`~.postgresql.ENUM`. + + If the underlying dialect does not support + PostgreSQL DROP TYPE, no action is taken. + + :param bind: a connectable :class:`.Engine`, + :class:`.Connection`, or similar object to emit + SQL. + :param checkfirst: if ``True``, a query against + the PG catalog will be first performed to see + if the type actually exists before dropping. + + """ + if not bind.dialect.supports_native_enum: + return + + if not checkfirst or \ + bind.dialect.has_type(bind, self.name, schema=self.schema): + bind.execute(DropEnumType(self)) + + def _check_for_name_in_memos(self, checkfirst, kw): + """Look in the 'ddl runner' for 'memos', then + note our name in that collection. + + This to ensure a particular named enum is operated + upon only once within any kind of create/drop + sequence without relying upon "checkfirst". + + """ + if not self.create_type: + return True + if '_ddl_runner' in kw: + ddl_runner = kw['_ddl_runner'] + if '_pg_enums' in ddl_runner.memo: + pg_enums = ddl_runner.memo['_pg_enums'] + else: + pg_enums = ddl_runner.memo['_pg_enums'] = set() + present = self.name in pg_enums + pg_enums.add(self.name) + return present + else: + return False + + def _on_table_create(self, target, bind, checkfirst=False, **kw): + if checkfirst or ( + not self.metadata and + not kw.get('_is_metadata_operation', False)) and \ + not self._check_for_name_in_memos(checkfirst, kw): + self.create(bind=bind, checkfirst=checkfirst) + + def _on_table_drop(self, target, bind, checkfirst=False, **kw): + if not self.metadata and \ + not kw.get('_is_metadata_operation', False) and \ + not self._check_for_name_in_memos(checkfirst, kw): + self.drop(bind=bind, checkfirst=checkfirst) + + def _on_metadata_create(self, target, bind, checkfirst=False, **kw): + if not self._check_for_name_in_memos(checkfirst, kw): + self.create(bind=bind, checkfirst=checkfirst) + + def _on_metadata_drop(self, target, bind, checkfirst=False, **kw): + if not self._check_for_name_in_memos(checkfirst, kw): + self.drop(bind=bind, checkfirst=checkfirst) + +colspecs = { + sqltypes.Interval: INTERVAL, + sqltypes.Enum: ENUM, +} + +ischema_names = { + 'integer': INTEGER, + 'bigint': BIGINT, + 'smallint': SMALLINT, + 'character varying': VARCHAR, + 'character': CHAR, + '"char"': sqltypes.String, + 'name': sqltypes.String, + 'text': TEXT, + 'numeric': NUMERIC, + 'float': FLOAT, + 'real': REAL, + 'inet': INET, + 'cidr': CIDR, + 'uuid': UUID, + 'bit': BIT, + 'bit varying': BIT, + 'macaddr': MACADDR, + 'oid': OID, + 'double precision': DOUBLE_PRECISION, + 'timestamp': TIMESTAMP, + 'timestamp with time zone': TIMESTAMP, + 'timestamp without time zone': TIMESTAMP, + 'time with time zone': TIME, + 'time without time zone': TIME, + 'date': DATE, + 'time': TIME, + 'bytea': BYTEA, + 'boolean': BOOLEAN, + 'interval': INTERVAL, + 'interval year to month': INTERVAL, + 'interval day to second': INTERVAL, + 'tsvector': TSVECTOR +} + + +class PGCompiler(compiler.SQLCompiler): + + def visit_array(self, element, **kw): + return "ARRAY[%s]" % self.visit_clauselist(element, **kw) + + def visit_slice(self, element, **kw): + return "%s:%s" % ( + self.process(element.start, **kw), + self.process(element.stop, **kw), + ) + + def visit_json_getitem_op_binary(self, binary, operator, **kw): + kw['eager_grouping'] = True + return self._generate_generic_binary( + binary, " -> ", **kw + ) + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + kw['eager_grouping'] = True + return self._generate_generic_binary( + binary, " #> ", **kw + ) + + def visit_getitem_binary(self, binary, operator, **kw): + return "%s[%s]" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw) + ) + + def visit_aggregate_order_by(self, element, **kw): + return "%s ORDER BY %s" % ( + self.process(element.target, **kw), + self.process(element.order_by, **kw) + ) + + def visit_match_op_binary(self, binary, operator, **kw): + if "postgresql_regconfig" in binary.modifiers: + regconfig = self.render_literal_value( + binary.modifiers['postgresql_regconfig'], + sqltypes.STRINGTYPE) + if regconfig: + return "%s @@ to_tsquery(%s, %s)" % ( + self.process(binary.left, **kw), + regconfig, + self.process(binary.right, **kw) + ) + return "%s @@ to_tsquery(%s)" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw) + ) + + def visit_ilike_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + + return '%s ILIKE %s' % \ + (self.process(binary.left, **kw), + self.process(binary.right, **kw)) \ + + ( + ' ESCAPE ' + + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape else '' + ) + + def visit_notilike_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + return '%s NOT ILIKE %s' % \ + (self.process(binary.left, **kw), + self.process(binary.right, **kw)) \ + + ( + ' ESCAPE ' + + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape else '' + ) + + def render_literal_value(self, value, type_): + value = super(PGCompiler, self).render_literal_value(value, type_) + + if self.dialect._backslash_escapes: + value = value.replace('\\', '\\\\') + return value + + def visit_sequence(self, seq): + return "nextval('%s')" % self.preparer.format_sequence(seq) + + def limit_clause(self, select, **kw): + text = "" + if select._limit_clause is not None: + text += " \n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: + text += " \n LIMIT ALL" + text += " OFFSET " + self.process(select._offset_clause, **kw) + return text + + def format_from_hint_text(self, sqltext, table, hint, iscrud): + if hint.upper() != 'ONLY': + raise exc.CompileError("Unrecognized hint: %r" % hint) + return "ONLY " + sqltext + + def get_select_precolumns(self, select, **kw): + if select._distinct is not False: + if select._distinct is True: + return "DISTINCT " + elif isinstance(select._distinct, (list, tuple)): + return "DISTINCT ON (" + ', '.join( + [self.process(col) for col in select._distinct] + ) + ") " + else: + return "DISTINCT ON (" + \ + self.process(select._distinct, **kw) + ") " + else: + return "" + + def for_update_clause(self, select, **kw): + + if select._for_update_arg.read: + if select._for_update_arg.key_share: + tmp = " FOR KEY SHARE" + else: + tmp = " FOR SHARE" + elif select._for_update_arg.key_share: + tmp = " FOR NO KEY UPDATE" + else: + tmp = " FOR UPDATE" + + if select._for_update_arg.of: + tables = util.OrderedSet( + c.table if isinstance(c, expression.ColumnClause) + else c for c in select._for_update_arg.of) + tmp += " OF " + ", ".join( + self.process(table, ashint=True, use_schema=False, **kw) + for table in tables + ) + + if select._for_update_arg.nowait: + tmp += " NOWAIT" + if select._for_update_arg.skip_locked: + tmp += " SKIP LOCKED" + + return tmp + + def returning_clause(self, stmt, returning_cols): + + columns = [ + self._label_select_column(None, c, True, False, {}) + for c in expression._select_iterables(returning_cols) + ] + + return 'RETURNING ' + ', '.join(columns) + + def visit_substring_func(self, func, **kw): + s = self.process(func.clauses.clauses[0], **kw) + start = self.process(func.clauses.clauses[1], **kw) + if len(func.clauses.clauses) > 2: + length = self.process(func.clauses.clauses[2], **kw) + return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) + else: + return "SUBSTRING(%s FROM %s)" % (s, start) + + def _on_conflict_target(self, clause, **kw): + + if clause.constraint_target is not None: + target_text = 'ON CONSTRAINT %s' % clause.constraint_target + elif clause.inferred_target_elements is not None: + target_text = '(%s)' % ', '.join( + (self.preparer.quote(c) + if isinstance(c, util.string_types) + else + self.process(c, include_table=False, use_schema=False)) + for c in clause.inferred_target_elements + ) + if clause.inferred_target_whereclause is not None: + target_text += ' WHERE %s' % \ + self.process( + clause.inferred_target_whereclause, + include_table=False, + use_schema=False + ) + else: + target_text = '' + + return target_text + + def visit_on_conflict_do_nothing(self, on_conflict, **kw): + + target_text = self._on_conflict_target(on_conflict, **kw) + + if target_text: + return "ON CONFLICT %s DO NOTHING" % target_text + else: + return "ON CONFLICT DO NOTHING" + + def visit_on_conflict_do_update(self, on_conflict, **kw): + + clause = on_conflict + + target_text = self._on_conflict_target(on_conflict, **kw) + + action_set_ops = [] + + set_parameters = dict(clause.update_values_to_set) + # create a list of column assignment clauses as tuples + cols = self.statement.table.c + for c in cols: + col_key = c.key + if col_key in set_parameters: + value = set_parameters.pop(col_key) + if elements._is_literal(value): + value = elements.BindParameter( + None, value, type_=c.type + ) + + else: + if isinstance(value, elements.BindParameter) and \ + value.type._isnull: + value = value._clone() + value.type = c.type + value_text = self.process(value.self_group(), use_schema=False) + + key_text = ( + self.preparer.quote(col_key) + ) + action_set_ops.append('%s = %s' % (key_text, value_text)) + + # check for names that don't match columns + if set_parameters: + util.warn( + "Additional column names not matching " + "any column keys in table '%s': %s" % ( + self.statement.table.name, + (", ".join("'%s'" % c for c in set_parameters)) + ) + ) + for k, v in set_parameters.items(): + key_text = ( + self.preparer.quote(k) + if isinstance(k, util.string_types) + else self.process(k, use_schema=False) + ) + value_text = self.process( + elements._literal_as_binds(v), + use_schema=False + ) + action_set_ops.append('%s = %s' % (key_text, value_text)) + + action_text = ', '.join(action_set_ops) + if clause.update_whereclause is not None: + action_text += ' WHERE %s' % \ + self.process( + clause.update_whereclause, + include_table=True, + use_schema=False + ) + + return 'ON CONFLICT %s DO UPDATE SET %s' % (target_text, action_text) + + +class PGDDLCompiler(compiler.DDLCompiler): + + def get_column_specification(self, column, **kwargs): + + colspec = self.preparer.format_column(column) + impl_type = column.type.dialect_impl(self.dialect) + if isinstance(impl_type, sqltypes.TypeDecorator): + impl_type = impl_type.impl + + if column.primary_key and \ + column is column.table._autoincrement_column and \ + ( + self.dialect.supports_smallserial or + not isinstance(impl_type, sqltypes.SmallInteger) + ) and ( + column.default is None or + ( + isinstance(column.default, schema.Sequence) and + column.default.optional + )): + if isinstance(impl_type, sqltypes.BigInteger): + colspec += " BIGSERIAL" + elif isinstance(impl_type, sqltypes.SmallInteger): + colspec += " SMALLSERIAL" + else: + colspec += " SERIAL" + else: + colspec += " " + self.dialect.type_compiler.process( + column.type, type_expression=column) + default = self.get_column_default_string(column) + if default is not None: + colspec += " DEFAULT " + default + + if not column.nullable: + colspec += " NOT NULL" + return colspec + + def visit_create_enum_type(self, create): + type_ = create.element + + return "CREATE TYPE %s AS ENUM (%s)" % ( + self.preparer.format_type(type_), + ", ".join( + self.sql_compiler.process(sql.literal(e), literal_binds=True) + for e in type_.enums) + ) + + def visit_drop_enum_type(self, drop): + type_ = drop.element + + return "DROP TYPE %s" % ( + self.preparer.format_type(type_) + ) + + def visit_create_index(self, create): + preparer = self.preparer + index = create.element + self._verify_index_table(index) + text = "CREATE " + if index.unique: + text += "UNIQUE " + text += "INDEX " + + if self.dialect._supports_create_index_concurrently: + concurrently = index.dialect_options['postgresql']['concurrently'] + if concurrently: + text += "CONCURRENTLY " + + text += "%s ON %s " % ( + self._prepared_index_name(index, + include_schema=False), + preparer.format_table(index.table) + ) + + using = index.dialect_options['postgresql']['using'] + if using: + text += "USING %s " % preparer.quote(using) + + ops = index.dialect_options["postgresql"]["ops"] + text += "(%s)" \ + % ( + ', '.join([ + self.sql_compiler.process( + expr.self_group() + if not isinstance(expr, expression.ColumnClause) + else expr, + include_table=False, literal_binds=True) + + ( + (' ' + ops[expr.key]) + if hasattr(expr, 'key') + and expr.key in ops else '' + ) + for expr in index.expressions + ]) + ) + + withclause = index.dialect_options['postgresql']['with'] + + if withclause: + text += " WITH (%s)" % (', '.join( + ['%s = %s' % storage_parameter + for storage_parameter in withclause.items()])) + + tablespace_name = index.dialect_options['postgresql']['tablespace'] + + if tablespace_name: + text += " TABLESPACE %s" % preparer.quote(tablespace_name) + + whereclause = index.dialect_options["postgresql"]["where"] + + if whereclause is not None: + where_compiled = self.sql_compiler.process( + whereclause, include_table=False, + literal_binds=True) + text += " WHERE " + where_compiled + return text + + def visit_drop_index(self, drop): + index = drop.element + + text = "\nDROP INDEX " + + if self.dialect._supports_drop_index_concurrently: + concurrently = index.dialect_options['postgresql']['concurrently'] + if concurrently: + text += "CONCURRENTLY " + + text += self._prepared_index_name(index, include_schema=True) + return text + + def visit_exclude_constraint(self, constraint, **kw): + text = "" + if constraint.name is not None: + text += "CONSTRAINT %s " % \ + self.preparer.format_constraint(constraint) + elements = [] + for expr, name, op in constraint._render_exprs: + kw['include_table'] = False + elements.append( + "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op) + ) + text += "EXCLUDE USING %s (%s)" % (constraint.using, + ', '.join(elements)) + if constraint.where is not None: + text += ' WHERE (%s)' % self.sql_compiler.process( + constraint.where, + literal_binds=True) + text += self.define_constraint_deferrability(constraint) + return text + + def post_create_table(self, table): + table_opts = [] + pg_opts = table.dialect_options['postgresql'] + + inherits = pg_opts.get('inherits') + if inherits is not None: + if not isinstance(inherits, (list, tuple)): + inherits = (inherits, ) + table_opts.append( + '\n INHERITS ( ' + + ', '.join(self.preparer.quote(name) for name in inherits) + + ' )') + + if pg_opts['with_oids'] is True: + table_opts.append('\n WITH OIDS') + elif pg_opts['with_oids'] is False: + table_opts.append('\n WITHOUT OIDS') + + if pg_opts['on_commit']: + on_commit_options = pg_opts['on_commit'].replace("_", " ").upper() + table_opts.append('\n ON COMMIT %s' % on_commit_options) + + if pg_opts['tablespace']: + tablespace_name = pg_opts['tablespace'] + table_opts.append( + '\n TABLESPACE %s' % self.preparer.quote(tablespace_name) + ) + + return ''.join(table_opts) + + +class PGTypeCompiler(compiler.GenericTypeCompiler): + def visit_TSVECTOR(self, type, **kw): + return "TSVECTOR" + + def visit_INET(self, type_, **kw): + return "INET" + + def visit_CIDR(self, type_, **kw): + return "CIDR" + + def visit_MACADDR(self, type_, **kw): + return "MACADDR" + + def visit_OID(self, type_, **kw): + return "OID" + + def visit_FLOAT(self, type_, **kw): + if not type_.precision: + return "FLOAT" + else: + return "FLOAT(%(precision)s)" % {'precision': type_.precision} + + def visit_DOUBLE_PRECISION(self, type_, **kw): + return "DOUBLE PRECISION" + + def visit_BIGINT(self, type_, **kw): + return "BIGINT" + + def visit_HSTORE(self, type_, **kw): + return "HSTORE" + + def visit_JSON(self, type_, **kw): + return "JSON" + + def visit_JSONB(self, type_, **kw): + return "JSONB" + + def visit_INT4RANGE(self, type_, **kw): + return "INT4RANGE" + + def visit_INT8RANGE(self, type_, **kw): + return "INT8RANGE" + + def visit_NUMRANGE(self, type_, **kw): + return "NUMRANGE" + + def visit_DATERANGE(self, type_, **kw): + return "DATERANGE" + + def visit_TSRANGE(self, type_, **kw): + return "TSRANGE" + + def visit_TSTZRANGE(self, type_, **kw): + return "TSTZRANGE" + + def visit_datetime(self, type_, **kw): + return self.visit_TIMESTAMP(type_, **kw) + + def visit_enum(self, type_, **kw): + if not type_.native_enum or not self.dialect.supports_native_enum: + return super(PGTypeCompiler, self).visit_enum(type_, **kw) + else: + return self.visit_ENUM(type_, **kw) + + def visit_ENUM(self, type_, **kw): + return self.dialect.identifier_preparer.format_type(type_) + + def visit_TIMESTAMP(self, type_, **kw): + return "TIMESTAMP%s %s" % ( + "(%d)" % type_.precision + if getattr(type_, 'precision', None) is not None else "", + (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" + ) + + def visit_TIME(self, type_, **kw): + return "TIME%s %s" % ( + "(%d)" % type_.precision + if getattr(type_, 'precision', None) is not None else "", + (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" + ) + + def visit_INTERVAL(self, type_, **kw): + if type_.precision is not None: + return "INTERVAL(%d)" % type_.precision + else: + return "INTERVAL" + + def visit_BIT(self, type_, **kw): + if type_.varying: + compiled = "BIT VARYING" + if type_.length is not None: + compiled += "(%d)" % type_.length + else: + compiled = "BIT(%d)" % type_.length + return compiled + + def visit_UUID(self, type_, **kw): + return "UUID" + + def visit_large_binary(self, type_, **kw): + return self.visit_BYTEA(type_, **kw) + + def visit_BYTEA(self, type_, **kw): + return "BYTEA" + + def visit_ARRAY(self, type_, **kw): + return self.process(type_.item_type) + ('[]' * (type_.dimensions + if type_.dimensions + is not None else 1)) + + +class PGIdentifierPreparer(compiler.IdentifierPreparer): + + reserved_words = RESERVED_WORDS + + def _unquote_identifier(self, value): + if value[0] == self.initial_quote: + value = value[1:-1].\ + replace(self.escape_to_quote, self.escape_quote) + return value + + def format_type(self, type_, use_schema=True): + if not type_.name: + raise exc.CompileError("PostgreSQL ENUM type requires a name.") + + name = self.quote(type_.name) + effective_schema = self.schema_for_object(type_) + + if not self.omit_schema and use_schema and \ + effective_schema is not None: + name = self.quote_schema(effective_schema) + "." + name + return name + + +class PGInspector(reflection.Inspector): + + def __init__(self, conn): + reflection.Inspector.__init__(self, conn) + + def get_table_oid(self, table_name, schema=None): + """Return the OID for the given table name.""" + + return self.dialect.get_table_oid(self.bind, table_name, schema, + info_cache=self.info_cache) + + def get_enums(self, schema=None): + """Return a list of ENUM objects. + + Each member is a dictionary containing these fields: + + * name - name of the enum + * schema - the schema name for the enum. + * visible - boolean, whether or not this enum is visible + in the default search path. + * labels - a list of string labels that apply to the enum. + + :param schema: schema name. If None, the default schema + (typically 'public') is used. May also be set to '*' to + indicate load enums for all schemas. + + .. versionadded:: 1.0.0 + + """ + schema = schema or self.default_schema_name + return self.dialect._load_enums(self.bind, schema) + + def get_foreign_table_names(self, schema=None): + """Return a list of FOREIGN TABLE names. + + Behavior is similar to that of :meth:`.Inspector.get_table_names`, + except that the list is limited to those tables tha report a + ``relkind`` value of ``f``. + + .. versionadded:: 1.0.0 + + """ + schema = schema or self.default_schema_name + return self.dialect._get_foreign_table_names(self.bind, schema) + + def get_view_names(self, schema=None, include=('plain', 'materialized')): + """Return all view names in `schema`. + + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + + :param include: specify which types of views to return. Passed + as a string value (for a single type) or a tuple (for any number + of types). Defaults to ``('plain', 'materialized')``. + + .. versionadded:: 1.1 + + """ + + return self.dialect.get_view_names(self.bind, schema, + info_cache=self.info_cache, + include=include) + + +class CreateEnumType(schema._CreateDropBase): + __visit_name__ = "create_enum_type" + + +class DropEnumType(schema._CreateDropBase): + __visit_name__ = "drop_enum_type" + + +class PGExecutionContext(default.DefaultExecutionContext): + + def fire_sequence(self, seq, type_): + return self._execute_scalar(( + "select nextval('%s')" % + self.dialect.identifier_preparer.format_sequence(seq)), type_) + + def get_insert_default(self, column): + if column.primary_key and \ + column is column.table._autoincrement_column: + if column.server_default and column.server_default.has_argument: + + # pre-execute passive defaults on primary key columns + return self._execute_scalar("select %s" % + column.server_default.arg, + column.type) + + elif (column.default is None or + (column.default.is_sequence and + column.default.optional)): + + # execute the sequence associated with a SERIAL primary + # key column. for non-primary-key SERIAL, the ID just + # generates server side. + + try: + seq_name = column._postgresql_seq_name + except AttributeError: + tab = column.table.name + col = column.name + tab = tab[0:29 + max(0, (29 - len(col)))] + col = col[0:29 + max(0, (29 - len(tab)))] + name = "%s_%s_seq" % (tab, col) + column._postgresql_seq_name = seq_name = name + + if column.table is not None: + effective_schema = self.connection.schema_for_object( + column.table) + else: + effective_schema = None + + if effective_schema is not None: + exc = "select nextval('\"%s\".\"%s\"')" % \ + (effective_schema, seq_name) + else: + exc = "select nextval('\"%s\"')" % \ + (seq_name, ) + + return self._execute_scalar(exc, column.type) + + return super(PGExecutionContext, self).get_insert_default(column) + + def should_autocommit_text(self, statement): + return AUTOCOMMIT_REGEXP.match(statement) + + +class PGDialect(default.DefaultDialect): + name = 'postgresql' + supports_alter = True + max_identifier_length = 63 + supports_sane_rowcount = True + + supports_native_enum = True + supports_native_boolean = True + supports_smallserial = True + + supports_sequences = True + sequences_optional = True + preexecute_autoincrement_sequences = True + postfetch_lastrowid = False + + supports_default_values = True + supports_empty_insert = False + supports_multivalues_insert = True + default_paramstyle = 'pyformat' + ischema_names = ischema_names + colspecs = colspecs + + statement_compiler = PGCompiler + ddl_compiler = PGDDLCompiler + type_compiler = PGTypeCompiler + preparer = PGIdentifierPreparer + execution_ctx_cls = PGExecutionContext + inspector = PGInspector + isolation_level = None + + construct_arguments = [ + (schema.Index, { + "using": False, + "where": None, + "ops": {}, + "concurrently": False, + "with": {}, + "tablespace": None + }), + (schema.Table, { + "ignore_search_path": False, + "tablespace": None, + "with_oids": None, + "on_commit": None, + "inherits": None + }), + ] + + reflection_options = ('postgresql_ignore_search_path', ) + + _backslash_escapes = True + _supports_create_index_concurrently = True + _supports_drop_index_concurrently = True + + def __init__(self, isolation_level=None, json_serializer=None, + json_deserializer=None, **kwargs): + default.DefaultDialect.__init__(self, **kwargs) + self.isolation_level = isolation_level + self._json_deserializer = json_deserializer + self._json_serializer = json_serializer + + def initialize(self, connection): + super(PGDialect, self).initialize(connection) + self.implicit_returning = self.server_version_info > (8, 2) and \ + self.__dict__.get('implicit_returning', True) + self.supports_native_enum = self.server_version_info >= (8, 3) + if not self.supports_native_enum: + self.colspecs = self.colspecs.copy() + # pop base Enum type + self.colspecs.pop(sqltypes.Enum, None) + # psycopg2, others may have placed ENUM here as well + self.colspecs.pop(ENUM, None) + + # http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689 + self.supports_smallserial = self.server_version_info >= (9, 2) + + self._backslash_escapes = self.server_version_info < (8, 2) or \ + connection.scalar( + "show standard_conforming_strings" + ) == 'off' + + self._supports_create_index_concurrently = \ + self.server_version_info >= (8, 2) + self._supports_drop_index_concurrently = \ + self.server_version_info >= (9, 2) + + def on_connect(self): + if self.isolation_level is not None: + def connect(conn): + self.set_isolation_level(conn, self.isolation_level) + return connect + else: + return None + + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ']) + + def set_isolation_level(self, connection, level): + level = level.replace('_', ' ') + if level not in self._isolation_lookup: + raise exc.ArgumentError( + "Invalid value '%s' for isolation_level. " + "Valid isolation levels for %s are %s" % + (level, self.name, ", ".join(self._isolation_lookup)) + ) + cursor = connection.cursor() + cursor.execute( + "SET SESSION CHARACTERISTICS AS TRANSACTION " + "ISOLATION LEVEL %s" % level) + cursor.execute("COMMIT") + cursor.close() + + def get_isolation_level(self, connection): + cursor = connection.cursor() + cursor.execute('show transaction isolation level') + val = cursor.fetchone()[0] + cursor.close() + return val.upper() + + def do_begin_twophase(self, connection, xid): + self.do_begin(connection.connection) + + def do_prepare_twophase(self, connection, xid): + connection.execute("PREPARE TRANSACTION '%s'" % xid) + + def do_rollback_twophase(self, connection, xid, + is_prepared=True, recover=False): + if is_prepared: + if recover: + # FIXME: ugly hack to get out of transaction + # context when committing recoverable transactions + # Must find out a way how to make the dbapi not + # open a transaction. + connection.execute("ROLLBACK") + connection.execute("ROLLBACK PREPARED '%s'" % xid) + connection.execute("BEGIN") + self.do_rollback(connection.connection) + else: + self.do_rollback(connection.connection) + + def do_commit_twophase(self, connection, xid, + is_prepared=True, recover=False): + if is_prepared: + if recover: + connection.execute("ROLLBACK") + connection.execute("COMMIT PREPARED '%s'" % xid) + connection.execute("BEGIN") + self.do_rollback(connection.connection) + else: + self.do_commit(connection.connection) + + def do_recover_twophase(self, connection): + resultset = connection.execute( + sql.text("SELECT gid FROM pg_prepared_xacts")) + return [row[0] for row in resultset] + + def _get_default_schema_name(self, connection): + return connection.scalar("select current_schema()") + + def has_schema(self, connection, schema): + query = ("select nspname from pg_namespace " + "where lower(nspname)=:schema") + cursor = connection.execute( + sql.text( + query, + bindparams=[ + sql.bindparam( + 'schema', util.text_type(schema.lower()), + type_=sqltypes.Unicode)] + ) + ) + + return bool(cursor.first()) + + def has_table(self, connection, table_name, schema=None): + # seems like case gets folded in pg_class... + if schema is None: + cursor = connection.execute( + sql.text( + "select relname from pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where " + "pg_catalog.pg_table_is_visible(c.oid) " + "and relname=:name", + bindparams=[ + sql.bindparam('name', util.text_type(table_name), + type_=sqltypes.Unicode)] + ) + ) + else: + cursor = connection.execute( + sql.text( + "select relname from pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where n.nspname=:schema and " + "relname=:name", + bindparams=[ + sql.bindparam('name', + util.text_type(table_name), + type_=sqltypes.Unicode), + sql.bindparam('schema', + util.text_type(schema), + type_=sqltypes.Unicode)] + ) + ) + return bool(cursor.first()) + + def has_sequence(self, connection, sequence_name, schema=None): + if schema is None: + cursor = connection.execute( + sql.text( + "SELECT relname FROM pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where relkind='S' and " + "n.nspname=current_schema() " + "and relname=:name", + bindparams=[ + sql.bindparam('name', util.text_type(sequence_name), + type_=sqltypes.Unicode) + ] + ) + ) + else: + cursor = connection.execute( + sql.text( + "SELECT relname FROM pg_class c join pg_namespace n on " + "n.oid=c.relnamespace where relkind='S' and " + "n.nspname=:schema and relname=:name", + bindparams=[ + sql.bindparam('name', util.text_type(sequence_name), + type_=sqltypes.Unicode), + sql.bindparam('schema', + util.text_type(schema), + type_=sqltypes.Unicode) + ] + ) + ) + + return bool(cursor.first()) + + def has_type(self, connection, type_name, schema=None): + if schema is not None: + query = """ + SELECT EXISTS ( + SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n + WHERE t.typnamespace = n.oid + AND t.typname = :typname + AND n.nspname = :nspname + ) + """ + query = sql.text(query) + else: + query = """ + SELECT EXISTS ( + SELECT * FROM pg_catalog.pg_type t + WHERE t.typname = :typname + AND pg_type_is_visible(t.oid) + ) + """ + query = sql.text(query) + query = query.bindparams( + sql.bindparam('typname', + util.text_type(type_name), type_=sqltypes.Unicode), + ) + if schema is not None: + query = query.bindparams( + sql.bindparam('nspname', + util.text_type(schema), type_=sqltypes.Unicode), + ) + cursor = connection.execute(query) + return bool(cursor.scalar()) + + def _get_server_version_info(self, connection): + v = connection.execute("select version()").scalar() + m = re.match( + r'.*(?:PostgreSQL|EnterpriseDB) ' + r'(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel)?', + v) + if not m: + raise AssertionError( + "Could not determine version from string '%s'" % v) + return tuple([int(x) for x in m.group(1, 2, 3) if x is not None]) + + @reflection.cache + def get_table_oid(self, connection, table_name, schema=None, **kw): + """Fetch the oid for schema.table_name. + + Several reflection methods require the table oid. The idea for using + this method is that it can be fetched one time and cached for + subsequent calls. + + """ + table_oid = None + if schema is not None: + schema_where_clause = "n.nspname = :schema" + else: + schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" + query = """ + SELECT c.oid + FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE (%s) + AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f') + """ % schema_where_clause + # Since we're binding to unicode, table_name and schema_name must be + # unicode. + table_name = util.text_type(table_name) + if schema is not None: + schema = util.text_type(schema) + s = sql.text(query).bindparams(table_name=sqltypes.Unicode) + s = s.columns(oid=sqltypes.Integer) + if schema: + s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode)) + c = connection.execute(s, table_name=table_name, schema=schema) + table_oid = c.scalar() + if table_oid is None: + raise exc.NoSuchTableError(table_name) + return table_oid + + @reflection.cache + def get_schema_names(self, connection, **kw): + result = connection.execute( + sql.text("SELECT nspname FROM pg_namespace " + "WHERE nspname NOT LIKE 'pg_%' " + "ORDER BY nspname" + ).columns(nspname=sqltypes.Unicode)) + return [name for name, in result] + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + result = connection.execute( + sql.text("SELECT c.relname FROM pg_class c " + "JOIN pg_namespace n ON n.oid = c.relnamespace " + "WHERE n.nspname = :schema AND c.relkind = 'r'" + ).columns(relname=sqltypes.Unicode), + schema=schema if schema is not None else self.default_schema_name) + return [name for name, in result] + + @reflection.cache + def _get_foreign_table_names(self, connection, schema=None, **kw): + result = connection.execute( + sql.text("SELECT c.relname FROM pg_class c " + "JOIN pg_namespace n ON n.oid = c.relnamespace " + "WHERE n.nspname = :schema AND c.relkind = 'f'" + ).columns(relname=sqltypes.Unicode), + schema=schema if schema is not None else self.default_schema_name) + return [name for name, in result] + + @reflection.cache + def get_view_names( + self, connection, schema=None, + include=('plain', 'materialized'), **kw): + + include_kind = {'plain': 'v', 'materialized': 'm'} + try: + kinds = [include_kind[i] for i in util.to_list(include)] + except KeyError: + raise ValueError( + "include %r unknown, needs to be a sequence containing " + "one or both of 'plain' and 'materialized'" % (include,)) + if not kinds: + raise ValueError( + "empty include, needs to be a sequence containing " + "one or both of 'plain' and 'materialized'") + + result = connection.execute( + sql.text("SELECT c.relname FROM pg_class c " + "JOIN pg_namespace n ON n.oid = c.relnamespace " + "WHERE n.nspname = :schema AND c.relkind IN (%s)" % + (", ".join("'%s'" % elem for elem in kinds)) + ).columns(relname=sqltypes.Unicode), + schema=schema if schema is not None else self.default_schema_name) + return [name for name, in result] + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + view_def = connection.scalar( + sql.text("SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c " + "JOIN pg_namespace n ON n.oid = c.relnamespace " + "WHERE n.nspname = :schema AND c.relname = :view_name " + "AND c.relkind IN ('v', 'm')" + ).columns(view_def=sqltypes.Unicode), + schema=schema if schema is not None else self.default_schema_name, + view_name=view_name) + return view_def + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + + table_oid = self.get_table_oid(connection, table_name, schema, + info_cache=kw.get('info_cache')) + SQL_COLS = """ + SELECT a.attname, + pg_catalog.format_type(a.atttypid, a.atttypmod), + (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) + FROM pg_catalog.pg_attrdef d + WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum + AND a.atthasdef) + AS DEFAULT, + a.attnotnull, a.attnum, a.attrelid as table_oid + FROM pg_catalog.pg_attribute a + WHERE a.attrelid = :table_oid + AND a.attnum > 0 AND NOT a.attisdropped + ORDER BY a.attnum + """ + s = sql.text(SQL_COLS, + bindparams=[ + sql.bindparam('table_oid', type_=sqltypes.Integer)], + typemap={ + 'attname': sqltypes.Unicode, + 'default': sqltypes.Unicode} + ) + c = connection.execute(s, table_oid=table_oid) + rows = c.fetchall() + domains = self._load_domains(connection) + enums = dict( + ( + "%s.%s" % (rec['schema'], rec['name']) + if not rec['visible'] else rec['name'], rec) for rec in + self._load_enums(connection, schema='*') + ) + + # format columns + columns = [] + for name, format_type, default, notnull, attnum, table_oid in rows: + column_info = self._get_column_info( + name, format_type, default, notnull, domains, enums, schema) + columns.append(column_info) + return columns + + def _get_column_info(self, name, format_type, default, + notnull, domains, enums, schema): + # strip (*) from character varying(5), timestamp(5) + # with time zone, geometry(POLYGON), etc. + attype = re.sub(r'\(.*\)', '', format_type) + + # strip '[]' from integer[], etc. + attype = attype.replace('[]', '') + + nullable = not notnull + is_array = format_type.endswith('[]') + charlen = re.search(r'\(([\d,]+)\)', format_type) + if charlen: + charlen = charlen.group(1) + args = re.search(r'\((.*)\)', format_type) + if args and args.group(1): + args = tuple(re.split(r'\s*,\s*', args.group(1))) + else: + args = () + kwargs = {} + + if attype == 'numeric': + if charlen: + prec, scale = charlen.split(',') + args = (int(prec), int(scale)) + else: + args = () + elif attype == 'double precision': + args = (53, ) + elif attype == 'integer': + args = () + elif attype in ('timestamp with time zone', + 'time with time zone'): + kwargs['timezone'] = True + if charlen: + kwargs['precision'] = int(charlen) + args = () + elif attype in ('timestamp without time zone', + 'time without time zone', 'time'): + kwargs['timezone'] = False + if charlen: + kwargs['precision'] = int(charlen) + args = () + elif attype == 'bit varying': + kwargs['varying'] = True + if charlen: + args = (int(charlen),) + else: + args = () + elif attype in ('interval', 'interval year to month', + 'interval day to second'): + if charlen: + kwargs['precision'] = int(charlen) + args = () + elif charlen: + args = (int(charlen),) + + while True: + if attype in self.ischema_names: + coltype = self.ischema_names[attype] + break + elif attype in enums: + enum = enums[attype] + coltype = ENUM + kwargs['name'] = enum['name'] + if not enum['visible']: + kwargs['schema'] = enum['schema'] + args = tuple(enum['labels']) + break + elif attype in domains: + domain = domains[attype] + attype = domain['attype'] + # A table can't override whether the domain is nullable. + nullable = domain['nullable'] + if domain['default'] and not default: + # It can, however, override the default + # value, but can't set it to null. + default = domain['default'] + continue + else: + coltype = None + break + + if coltype: + coltype = coltype(*args, **kwargs) + if is_array: + coltype = self.ischema_names['_array'](coltype) + else: + util.warn("Did not recognize type '%s' of column '%s'" % + (attype, name)) + coltype = sqltypes.NULLTYPE + # adjust the default value + autoincrement = False + if default is not None: + match = re.search(r"""(nextval\(')([^']+)('.*$)""", default) + if match is not None: + if issubclass(coltype._type_affinity, sqltypes.Integer): + autoincrement = True + # the default is related to a Sequence + sch = schema + if '.' not in match.group(2) and sch is not None: + # unconditionally quote the schema name. this could + # later be enhanced to obey quoting rules / + # "quote schema" + default = match.group(1) + \ + ('"%s"' % sch) + '.' + \ + match.group(2) + match.group(3) + + column_info = dict(name=name, type=coltype, nullable=nullable, + default=default, autoincrement=autoincrement) + return column_info + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + table_oid = self.get_table_oid(connection, table_name, schema, + info_cache=kw.get('info_cache')) + + if self.server_version_info < (8, 4): + PK_SQL = """ + SELECT a.attname + FROM + pg_class t + join pg_index ix on t.oid = ix.indrelid + join pg_attribute a + on t.oid=a.attrelid AND %s + WHERE + t.oid = :table_oid and ix.indisprimary = 't' + ORDER BY a.attnum + """ % self._pg_index_any("a.attnum", "ix.indkey") + + else: + # unnest() and generate_subscripts() both introduced in + # version 8.4 + PK_SQL = """ + SELECT a.attname + FROM pg_attribute a JOIN ( + SELECT unnest(ix.indkey) attnum, + generate_subscripts(ix.indkey, 1) ord + FROM pg_index ix + WHERE ix.indrelid = :table_oid AND ix.indisprimary + ) k ON a.attnum=k.attnum + WHERE a.attrelid = :table_oid + ORDER BY k.ord + """ + t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode}) + c = connection.execute(t, table_oid=table_oid) + cols = [r[0] for r in c.fetchall()] + + PK_CONS_SQL = """ + SELECT conname + FROM pg_catalog.pg_constraint r + WHERE r.conrelid = :table_oid AND r.contype = 'p' + ORDER BY 1 + """ + t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode}) + c = connection.execute(t, table_oid=table_oid) + name = c.scalar() + + return {'constrained_columns': cols, 'name': name} + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, + postgresql_ignore_search_path=False, **kw): + preparer = self.identifier_preparer + table_oid = self.get_table_oid(connection, table_name, schema, + info_cache=kw.get('info_cache')) + + FK_SQL = """ + SELECT r.conname, + pg_catalog.pg_get_constraintdef(r.oid, true) as condef, + n.nspname as conschema + FROM pg_catalog.pg_constraint r, + pg_namespace n, + pg_class c + + WHERE r.conrelid = :table AND + r.contype = 'f' AND + c.oid = confrelid AND + n.oid = c.relnamespace + ORDER BY 1 + """ + # http://www.postgresql.org/docs/9.0/static/sql-createtable.html + FK_REGEX = re.compile( + r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)' + r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?' + r'[\s]?(ON UPDATE ' + r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' + r'[\s]?(ON DELETE ' + r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' + r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?' + r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?' + ) + + t = sql.text(FK_SQL, typemap={ + 'conname': sqltypes.Unicode, + 'condef': sqltypes.Unicode}) + c = connection.execute(t, table=table_oid) + fkeys = [] + for conname, condef, conschema in c.fetchall(): + m = re.search(FK_REGEX, condef).groups() + + constrained_columns, referred_schema, \ + referred_table, referred_columns, \ + _, match, _, onupdate, _, ondelete, \ + deferrable, _, initially = m + + if deferrable is not None: + deferrable = True if deferrable == 'DEFERRABLE' else False + constrained_columns = [preparer._unquote_identifier(x) + for x in re.split( + r'\s*,\s*', constrained_columns)] + + if postgresql_ignore_search_path: + # when ignoring search path, we use the actual schema + # provided it isn't the "default" schema + if conschema != self.default_schema_name: + referred_schema = conschema + else: + referred_schema = schema + elif referred_schema: + # referred_schema is the schema that we regexp'ed from + # pg_get_constraintdef(). If the schema is in the search + # path, pg_get_constraintdef() will give us None. + referred_schema = \ + preparer._unquote_identifier(referred_schema) + elif schema is not None and schema == conschema: + # If the actual schema matches the schema of the table + # we're reflecting, then we will use that. + referred_schema = schema + + referred_table = preparer._unquote_identifier(referred_table) + referred_columns = [preparer._unquote_identifier(x) + for x in + re.split(r'\s*,\s', referred_columns)] + fkey_d = { + 'name': conname, + 'constrained_columns': constrained_columns, + 'referred_schema': referred_schema, + 'referred_table': referred_table, + 'referred_columns': referred_columns, + 'options': { + 'onupdate': onupdate, + 'ondelete': ondelete, + 'deferrable': deferrable, + 'initially': initially, + 'match': match + } + } + fkeys.append(fkey_d) + return fkeys + + def _pg_index_any(self, col, compare_to): + if self.server_version_info < (8, 1): + # http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us + # "In CVS tip you could replace this with "attnum = ANY (indkey)". + # Unfortunately, most array support doesn't work on int2vector in + # pre-8.1 releases, so I think you're kinda stuck with the above + # for now. + # regards, tom lane" + return "(%s)" % " OR ".join( + "%s[%d] = %s" % (compare_to, ind, col) + for ind in range(0, 10) + ) + else: + return "%s = ANY(%s)" % (col, compare_to) + + @reflection.cache + def get_indexes(self, connection, table_name, schema, **kw): + table_oid = self.get_table_oid(connection, table_name, schema, + info_cache=kw.get('info_cache')) + + # cast indkey as varchar since it's an int2vector, + # returned as a list by some drivers such as pypostgresql + + if self.server_version_info < (8, 5): + IDX_SQL = """ + SELECT + i.relname as relname, + ix.indisunique, ix.indexprs, ix.indpred, + a.attname, a.attnum, NULL, ix.indkey%s, + %s, am.amname + FROM + pg_class t + join pg_index ix on t.oid = ix.indrelid + join pg_class i on i.oid = ix.indexrelid + left outer join + pg_attribute a + on t.oid = a.attrelid and %s + left outer join + pg_am am + on i.relam = am.oid + WHERE + t.relkind IN ('r', 'v', 'f', 'm') + and t.oid = :table_oid + and ix.indisprimary = 'f' + ORDER BY + t.relname, + i.relname + """ % ( + # version 8.3 here was based on observing the + # cast does not work in PG 8.2.4, does work in 8.3.0. + # nothing in PG changelogs regarding this. + "::varchar" if self.server_version_info >= (8, 3) else "", + "i.reloptions" if self.server_version_info >= (8, 2) + else "NULL", + self._pg_index_any("a.attnum", "ix.indkey") + ) + else: + IDX_SQL = """ + SELECT + i.relname as relname, + ix.indisunique, ix.indexprs, ix.indpred, + a.attname, a.attnum, c.conrelid, ix.indkey::varchar, + i.reloptions, am.amname + FROM + pg_class t + join pg_index ix on t.oid = ix.indrelid + join pg_class i on i.oid = ix.indexrelid + left outer join + pg_attribute a + on t.oid = a.attrelid and a.attnum = ANY(ix.indkey) + left outer join + pg_constraint c + on (ix.indrelid = c.conrelid and + ix.indexrelid = c.conindid and + c.contype in ('p', 'u', 'x')) + left outer join + pg_am am + on i.relam = am.oid + WHERE + t.relkind IN ('r', 'v', 'f', 'm') + and t.oid = :table_oid + and ix.indisprimary = 'f' + ORDER BY + t.relname, + i.relname + """ + + t = sql.text(IDX_SQL, typemap={ + 'relname': sqltypes.Unicode, + 'attname': sqltypes.Unicode}) + c = connection.execute(t, table_oid=table_oid) + + indexes = defaultdict(lambda: defaultdict(dict)) + + sv_idx_name = None + for row in c.fetchall(): + (idx_name, unique, expr, prd, col, + col_num, conrelid, idx_key, options, amname) = row + + if expr: + if idx_name != sv_idx_name: + util.warn( + "Skipped unsupported reflection of " + "expression-based index %s" + % idx_name) + sv_idx_name = idx_name + continue + + if prd and not idx_name == sv_idx_name: + util.warn( + "Predicate of partial index %s ignored during reflection" + % idx_name) + sv_idx_name = idx_name + + has_idx = idx_name in indexes + index = indexes[idx_name] + if col is not None: + index['cols'][col_num] = col + if not has_idx: + index['key'] = [int(k.strip()) for k in idx_key.split()] + index['unique'] = unique + if conrelid is not None: + index['duplicates_constraint'] = idx_name + if options: + index['options'] = dict( + [option.split("=") for option in options]) + + # it *might* be nice to include that this is 'btree' in the + # reflection info. But we don't want an Index object + # to have a ``postgresql_using`` in it that is just the + # default, so for the moment leaving this out. + if amname and amname != 'btree': + index['amname'] = amname + + result = [] + for name, idx in indexes.items(): + entry = { + 'name': name, + 'unique': idx['unique'], + 'column_names': [idx['cols'][i] for i in idx['key']] + } + if 'duplicates_constraint' in idx: + entry['duplicates_constraint'] = idx['duplicates_constraint'] + if 'options' in idx: + entry.setdefault( + 'dialect_options', {})["postgresql_with"] = idx['options'] + if 'amname' in idx: + entry.setdefault( + 'dialect_options', {})["postgresql_using"] = idx['amname'] + result.append(entry) + return result + + @reflection.cache + def get_unique_constraints(self, connection, table_name, + schema=None, **kw): + table_oid = self.get_table_oid(connection, table_name, schema, + info_cache=kw.get('info_cache')) + + UNIQUE_SQL = """ + SELECT + cons.conname as name, + cons.conkey as key, + a.attnum as col_num, + a.attname as col_name + FROM + pg_catalog.pg_constraint cons + join pg_attribute a + on cons.conrelid = a.attrelid AND + a.attnum = ANY(cons.conkey) + WHERE + cons.conrelid = :table_oid AND + cons.contype = 'u' + """ + + t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode}) + c = connection.execute(t, table_oid=table_oid) + + uniques = defaultdict(lambda: defaultdict(dict)) + for row in c.fetchall(): + uc = uniques[row.name] + uc["key"] = row.key + uc["cols"][row.col_num] = row.col_name + + return [ + {'name': name, + 'column_names': [uc["cols"][i] for i in uc["key"]]} + for name, uc in uniques.items() + ] + + @reflection.cache + def get_check_constraints( + self, connection, table_name, schema=None, **kw): + table_oid = self.get_table_oid(connection, table_name, schema, + info_cache=kw.get('info_cache')) + + CHECK_SQL = """ + SELECT + cons.conname as name, + cons.consrc as src + FROM + pg_catalog.pg_constraint cons + WHERE + cons.conrelid = :table_oid AND + cons.contype = 'c' + """ + + c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid) + + return [ + {'name': name, + 'sqltext': src[1:-1]} + for name, src in c.fetchall() + ] + + def _load_enums(self, connection, schema=None): + schema = schema or self.default_schema_name + if not self.supports_native_enum: + return {} + + # Load data types for enums: + SQL_ENUMS = """ + SELECT t.typname as "name", + -- no enum defaults in 8.4 at least + -- t.typdefault as "default", + pg_catalog.pg_type_is_visible(t.oid) as "visible", + n.nspname as "schema", + e.enumlabel as "label" + FROM pg_catalog.pg_type t + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid + WHERE t.typtype = 'e' + """ + + if schema != '*': + SQL_ENUMS += "AND n.nspname = :schema " + + # e.oid gives us label order within an enum + SQL_ENUMS += 'ORDER BY "schema", "name", e.oid' + + s = sql.text(SQL_ENUMS, typemap={ + 'attname': sqltypes.Unicode, + 'label': sqltypes.Unicode}) + + if schema != '*': + s = s.bindparams(schema=schema) + + c = connection.execute(s) + + enums = [] + enum_by_name = {} + for enum in c.fetchall(): + key = (enum['schema'], enum['name']) + if key in enum_by_name: + enum_by_name[key]['labels'].append(enum['label']) + else: + enum_by_name[key] = enum_rec = { + 'name': enum['name'], + 'schema': enum['schema'], + 'visible': enum['visible'], + 'labels': [enum['label']], + } + enums.append(enum_rec) + + return enums + + def _load_domains(self, connection): + # Load data types for domains: + SQL_DOMAINS = """ + SELECT t.typname as "name", + pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype", + not t.typnotnull as "nullable", + t.typdefault as "default", + pg_catalog.pg_type_is_visible(t.oid) as "visible", + n.nspname as "schema" + FROM pg_catalog.pg_type t + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace + WHERE t.typtype = 'd' + """ + + s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode}) + c = connection.execute(s) + + domains = {} + for domain in c.fetchall(): + # strip (30) from character varying(30) + attype = re.search(r'([^\(]+)', domain['attype']).group(1) + if domain['visible']: + # 'visible' just means whether or not the domain is in a + # schema that's on the search path -- or not overridden by + # a schema with higher precedence. If it's not visible, + # it will be prefixed with the schema-name when it's used. + name = domain['name'] + else: + name = "%s.%s" % (domain['schema'], domain['name']) + + domains[name] = { + 'attype': attype, + 'nullable': domain['nullable'], + 'default': domain['default'] + } + + return domains diff --git a/app/lib/sqlalchemy/dialects/postgresql/dml.py b/app/lib/sqlalchemy/dialects/postgresql/dml.py new file mode 100644 index 0000000..bfdfbfa --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/dml.py @@ -0,0 +1,213 @@ +# postgresql/on_conflict.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from ...sql.elements import ClauseElement, _literal_as_binds +from ...sql.dml import Insert as StandardInsert +from ...sql.expression import alias +from ...sql import schema +from ...util.langhelpers import public_factory +from ...sql.base import _generative +from ... import util +from . import ext + +__all__ = ('Insert', 'insert') + + +class Insert(StandardInsert): + """PostgreSQL-specific implementation of INSERT. + + Adds methods for PG-specific syntaxes such as ON CONFLICT. + + .. versionadded:: 1.1 + + """ + + @util.memoized_property + def excluded(self): + """Provide the ``excluded`` namespace for an ON CONFLICT statement + + PG's ON CONFLICT clause allows reference to the row that would + be inserted, known as ``excluded``. This attribute provides + all columns in this row to be referenaceable. + + .. seealso:: + + :ref:`postgresql_insert_on_conflict` - example of how + to use :attr:`.Insert.excluded` + + """ + return alias(self.table, name='excluded').columns + + @_generative + def on_conflict_do_update( + self, + constraint=None, index_elements=None, + index_where=None, set_=None, where=None): + """ + Specifies a DO UPDATE SET action for ON CONFLICT clause. + + Either the ``constraint`` or ``index_elements`` argument is + required, but only one of these can be specified. + + :param constraint: + The name of a unique or exclusion constraint on the table, + or the constraint object itself if it has a .name attribute. + + :param index_elements: + A sequence consisting of string column names, :class:`.Column` + objects, or other column expression objects that will be used + to infer a target index. + + :param index_where: + Additional WHERE criterion that can be used to infer a + conditional target index. + + :param set_: + Required argument. A dictionary or other mapping object + with column names as keys and expressions or literals as values, + specifying the ``SET`` actions to take. + If the target :class:`.Column` specifies a ".key" attribute distinct + from the column name, that key should be used. + + .. warning:: This dictionary does **not** take into account + Python-specified default UPDATE values or generation functions, + e.g. those specified using :paramref:`.Column.onupdate`. + These values will not be exercised for an ON CONFLICT style of + UPDATE, unless they are manually specified in the + :paramref:`.Insert.on_conflict_do_update.set_` dictionary. + + :param where: + Optional argument. If present, can be a literal SQL + string or an acceptable expression for a ``WHERE`` clause + that restricts the rows affected by ``DO UPDATE SET``. Rows + not meeting the ``WHERE`` condition will not be updated + (effectively a ``DO NOTHING`` for those rows). + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`postgresql_insert_on_conflict` + + """ + self._post_values_clause = OnConflictDoUpdate( + constraint, index_elements, index_where, set_, where) + return self + + @_generative + def on_conflict_do_nothing( + self, + constraint=None, index_elements=None, index_where=None): + """ + Specifies a DO NOTHING action for ON CONFLICT clause. + + The ``constraint`` and ``index_elements`` arguments + are optional, but only one of these can be specified. + + :param constraint: + The name of a unique or exclusion constraint on the table, + or the constraint object itself if it has a .name attribute. + + :param index_elements: + A sequence consisting of string column names, :class:`.Column` + objects, or other column expression objects that will be used + to infer a target index. + + :param index_where: + Additional WHERE criterion that can be used to infer a + conditional target index. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`postgresql_insert_on_conflict` + + """ + self._post_values_clause = OnConflictDoNothing( + constraint, index_elements, index_where) + return self + +insert = public_factory(Insert, '.dialects.postgresql.insert') + + +class OnConflictClause(ClauseElement): + def __init__( + self, + constraint=None, + index_elements=None, + index_where=None): + + if constraint is not None: + if not isinstance(constraint, util.string_types) and \ + isinstance(constraint, ( + schema.Index, schema.Constraint, + ext.ExcludeConstraint)): + constraint = getattr(constraint, 'name') or constraint + + if constraint is not None: + if index_elements is not None: + raise ValueError( + "'constraint' and 'index_elements' are mutually exclusive") + + if isinstance(constraint, util.string_types): + self.constraint_target = constraint + self.inferred_target_elements = None + self.inferred_target_whereclause = None + elif isinstance(constraint, schema.Index): + index_elements = constraint.expressions + index_where = \ + constraint.dialect_options['postgresql'].get("where") + elif isinstance(constraint, ext.ExcludeConstraint): + index_elements = constraint.columns + index_where = constraint.where + else: + index_elements = constraint.columns + index_where = \ + constraint.dialect_options['postgresql'].get("where") + + if index_elements is not None: + self.constraint_target = None + self.inferred_target_elements = index_elements + self.inferred_target_whereclause = index_where + elif constraint is None: + self.constraint_target = self.inferred_target_elements = \ + self.inferred_target_whereclause = None + + +class OnConflictDoNothing(OnConflictClause): + __visit_name__ = 'on_conflict_do_nothing' + + +class OnConflictDoUpdate(OnConflictClause): + __visit_name__ = 'on_conflict_do_update' + + def __init__( + self, + constraint=None, + index_elements=None, + index_where=None, + set_=None, + where=None): + super(OnConflictDoUpdate, self).__init__( + constraint=constraint, + index_elements=index_elements, + index_where=index_where) + + if self.inferred_target_elements is None and \ + self.constraint_target is None: + raise ValueError( + "Either constraint or index_elements, " + "but not both, must be specified unless DO NOTHING") + + if (not isinstance(set_, dict) or not set_): + raise ValueError("set parameter must be a non-empty dictionary") + self.update_values_to_set = [ + (key, value) + for key, value in set_.items() + ] + self.update_whereclause = where diff --git a/app/lib/sqlalchemy/dialects/postgresql/ext.py b/app/lib/sqlalchemy/dialects/postgresql/ext.py new file mode 100644 index 0000000..55eded9 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/ext.py @@ -0,0 +1,218 @@ +# postgresql/ext.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from ...sql import expression +from ...sql import elements +from ...sql import functions +from ...sql.schema import ColumnCollectionConstraint +from .array import ARRAY + + +class aggregate_order_by(expression.ColumnElement): + """Represent a PostgreSQL aggregate order by expression. + + E.g.:: + + from sqlalchemy.dialects.postgresql import aggregate_order_by + expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc())) + stmt = select([expr]) + + would represent the expression:: + + SELECT array_agg(a ORDER BY b DESC) FROM table; + + Similarly:: + + expr = func.string_agg( + table.c.a, + aggregate_order_by(literal_column("','"), table.c.a) + ) + stmt = select([expr]) + + Would represent:: + + SELECT string_agg(a, ',' ORDER BY a) FROM table; + + .. versionadded:: 1.1 + + .. seealso:: + + :class:`.array_agg` + + """ + + __visit_name__ = 'aggregate_order_by' + + def __init__(self, target, order_by): + self.target = elements._literal_as_binds(target) + self.order_by = elements._literal_as_binds(order_by) + + def self_group(self, against=None): + return self + + def get_children(self, **kwargs): + return self.target, self.order_by + + def _copy_internals(self, clone=elements._clone, **kw): + self.target = clone(self.target, **kw) + self.order_by = clone(self.order_by, **kw) + + @property + def _from_objects(self): + return self.target._from_objects + self.order_by._from_objects + + +class ExcludeConstraint(ColumnCollectionConstraint): + """A table-level EXCLUDE constraint. + + Defines an EXCLUDE constraint as described in the `postgres + documentation`__. + + __ http://www.postgresql.org/docs/9.0/\ +static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE + """ + + __visit_name__ = 'exclude_constraint' + + where = None + + def __init__(self, *elements, **kw): + r""" + Create an :class:`.ExcludeConstraint` object. + + E.g.:: + + const = ExcludeConstraint( + (Column('period'), '&&'), + (Column('group'), '='), + where=(Column('group') != 'some group') + ) + + The constraint is normally embedded into the :class:`.Table` construct + directly, or added later using :meth:`.append_constraint`:: + + some_table = Table( + 'some_table', metadata, + Column('id', Integer, primary_key=True), + Column('period', TSRANGE()), + Column('group', String) + ) + + some_table.append_constraint( + ExcludeConstraint( + (some_table.c.period, '&&'), + (some_table.c.group, '='), + where=some_table.c.group != 'some group', + name='some_table_excl_const' + ) + ) + + :param \*elements: + A sequence of two tuples of the form ``(column, operator)`` where + "column" is a SQL expression element or a raw SQL string, most + typically a :class:`.Column` object, + and "operator" is a string containing the operator to use. + + .. note:: + + A plain string passed for the value of "column" is interpreted + as an arbitrary SQL expression; when passing a plain string, + any necessary quoting and escaping syntaxes must be applied + manually. In order to specify a column name when a + :class:`.Column` object is not available, while ensuring that + any necessary quoting rules take effect, an ad-hoc + :class:`.Column` or :func:`.sql.expression.column` object may + be used. + + :param name: + Optional, the in-database name of this constraint. + + :param deferrable: + Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when + issuing DDL for this constraint. + + :param initially: + Optional string. If set, emit INITIALLY when issuing DDL + for this constraint. + + :param using: + Optional string. If set, emit USING when issuing DDL + for this constraint. Defaults to 'gist'. + + :param where: + Optional SQL expression construct or literal SQL string. + If set, emit WHERE when issuing DDL + for this constraint. + + .. note:: + + A plain string passed here is interpreted as an arbitrary SQL + expression; when passing a plain string, any necessary quoting + and escaping syntaxes must be applied manually. + + """ + columns = [] + render_exprs = [] + self.operators = {} + + expressions, operators = zip(*elements) + + for (expr, column, strname, add_element), operator in zip( + self._extract_col_expression_collection(expressions), + operators + ): + if add_element is not None: + columns.append(add_element) + + name = column.name if column is not None else strname + + if name is not None: + # backwards compat + self.operators[name] = operator + + expr = expression._literal_as_text(expr) + + render_exprs.append( + (expr, name, operator) + ) + + self._render_exprs = render_exprs + ColumnCollectionConstraint.__init__( + self, + *columns, + name=kw.get('name'), + deferrable=kw.get('deferrable'), + initially=kw.get('initially') + ) + self.using = kw.get('using', 'gist') + where = kw.get('where') + if where is not None: + self.where = expression._literal_as_text(where) + + def copy(self, **kw): + elements = [(col, self.operators[col]) + for col in self.columns.keys()] + c = self.__class__(*elements, + name=self.name, + deferrable=self.deferrable, + initially=self.initially, + where=self.where, + using=self.using) + c.dispatch._update(self.dispatch) + return c + + +def array_agg(*arg, **kw): + """PostgreSQL-specific form of :class:`.array_agg`, ensures + return type is :class:`.postgresql.ARRAY` and not + the plain :class:`.types.ARRAY`. + + .. versionadded:: 1.1 + + """ + kw['type_'] = ARRAY(functions._type_from_args(arg)) + return functions.func.array_agg(*arg, **kw) diff --git a/app/lib/sqlalchemy/dialects/postgresql/hstore.py b/app/lib/sqlalchemy/dialects/postgresql/hstore.py new file mode 100644 index 0000000..952c6ed --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -0,0 +1,420 @@ +# postgresql/hstore.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import re + +from .base import ischema_names +from .array import ARRAY +from ... import types as sqltypes +from ...sql import functions as sqlfunc +from ...sql import operators +from ... import util + +__all__ = ('HSTORE', 'hstore') + +idx_precedence = operators._PRECEDENCE[operators.json_getitem_op] + +GETITEM = operators.custom_op( + "->", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +HAS_KEY = operators.custom_op( + "?", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +HAS_ALL = operators.custom_op( + "?&", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +HAS_ANY = operators.custom_op( + "?|", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +CONTAINS = operators.custom_op( + "@>", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +CONTAINED_BY = operators.custom_op( + "<@", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + + +class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine): + """Represent the PostgreSQL HSTORE type. + + The :class:`.HSTORE` type stores dictionaries containing strings, e.g.:: + + data_table = Table('data_table', metadata, + Column('id', Integer, primary_key=True), + Column('data', HSTORE) + ) + + with engine.connect() as conn: + conn.execute( + data_table.insert(), + data = {"key1": "value1", "key2": "value2"} + ) + + :class:`.HSTORE` provides for a wide range of operations, including: + + * Index operations:: + + data_table.c.data['some key'] == 'some value' + + * Containment operations:: + + data_table.c.data.has_key('some key') + + data_table.c.data.has_all(['one', 'two', 'three']) + + * Concatenation:: + + data_table.c.data + {"k1": "v1"} + + For a full list of special methods see + :class:`.HSTORE.comparator_factory`. + + For usage with the SQLAlchemy ORM, it may be desirable to combine + the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary + now part of the :mod:`sqlalchemy.ext.mutable` + extension. This extension will allow "in-place" changes to the + dictionary, e.g. addition of new keys or replacement/removal of existing + keys to/from the current dictionary, to produce events which will be + detected by the unit of work:: + + from sqlalchemy.ext.mutable import MutableDict + + class MyClass(Base): + __tablename__ = 'data_table' + + id = Column(Integer, primary_key=True) + data = Column(MutableDict.as_mutable(HSTORE)) + + my_object = session.query(MyClass).one() + + # in-place mutation, requires Mutable extension + # in order for the ORM to detect + my_object.data['some_key'] = 'some value' + + session.commit() + + When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM + will not be alerted to any changes to the contents of an existing + dictionary, unless that dictionary value is re-assigned to the + HSTORE-attribute itself, thus generating a change event. + + .. versionadded:: 0.8 + + .. seealso:: + + :class:`.hstore` - render the PostgreSQL ``hstore()`` function. + + + """ + + __visit_name__ = 'HSTORE' + hashable = False + text_type = sqltypes.Text() + + def __init__(self, text_type=None): + """Construct a new :class:`.HSTORE`. + + :param text_type: the type that should be used for indexed values. + Defaults to :class:`.types.Text`. + + .. versionadded:: 1.1.0 + + """ + if text_type is not None: + self.text_type = text_type + + class Comparator( + sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator): + """Define comparison operations for :class:`.HSTORE`.""" + + def has_key(self, other): + """Boolean expression. Test for presence of a key. Note that the + key may be a SQLA expression. + """ + return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean) + + def has_all(self, other): + """Boolean expression. Test for presence of all keys in jsonb + """ + return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean) + + def has_any(self, other): + """Boolean expression. Test for presence of any key in jsonb + """ + return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean) + + def contains(self, other, **kwargs): + """Boolean expression. Test if keys (or array) are a superset + of/contained the keys of the argument jsonb expression. + """ + return self.operate(CONTAINS, other, result_type=sqltypes.Boolean) + + def contained_by(self, other): + """Boolean expression. Test if keys are a proper subset of the + keys of the argument jsonb expression. + """ + return self.operate( + CONTAINED_BY, other, result_type=sqltypes.Boolean) + + def _setup_getitem(self, index): + return GETITEM, index, self.type.text_type + + def defined(self, key): + """Boolean expression. Test for presence of a non-NULL value for + the key. Note that the key may be a SQLA expression. + """ + return _HStoreDefinedFunction(self.expr, key) + + def delete(self, key): + """HStore expression. Returns the contents of this hstore with the + given key deleted. Note that the key may be a SQLA expression. + """ + if isinstance(key, dict): + key = _serialize_hstore(key) + return _HStoreDeleteFunction(self.expr, key) + + def slice(self, array): + """HStore expression. Returns a subset of an hstore defined by + array of keys. + """ + return _HStoreSliceFunction(self.expr, array) + + def keys(self): + """Text array expression. Returns array of keys.""" + return _HStoreKeysFunction(self.expr) + + def vals(self): + """Text array expression. Returns array of values.""" + return _HStoreValsFunction(self.expr) + + def array(self): + """Text array expression. Returns array of alternating keys and + values. + """ + return _HStoreArrayFunction(self.expr) + + def matrix(self): + """Text array expression. Returns array of [key, value] pairs.""" + return _HStoreMatrixFunction(self.expr) + + comparator_factory = Comparator + + def bind_processor(self, dialect): + if util.py2k: + encoding = dialect.encoding + + def process(value): + if isinstance(value, dict): + return _serialize_hstore(value).encode(encoding) + else: + return value + else: + def process(value): + if isinstance(value, dict): + return _serialize_hstore(value) + else: + return value + return process + + def result_processor(self, dialect, coltype): + if util.py2k: + encoding = dialect.encoding + + def process(value): + if value is not None: + return _parse_hstore(value.decode(encoding)) + else: + return value + else: + def process(value): + if value is not None: + return _parse_hstore(value) + else: + return value + return process + + +ischema_names['hstore'] = HSTORE + + +class hstore(sqlfunc.GenericFunction): + """Construct an hstore value within a SQL expression using the + PostgreSQL ``hstore()`` function. + + The :class:`.hstore` function accepts one or two arguments as described + in the PostgreSQL documentation. + + E.g.:: + + from sqlalchemy.dialects.postgresql import array, hstore + + select([hstore('key1', 'value1')]) + + select([ + hstore( + array(['key1', 'key2', 'key3']), + array(['value1', 'value2', 'value3']) + ) + ]) + + .. versionadded:: 0.8 + + .. seealso:: + + :class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype. + + """ + type = HSTORE + name = 'hstore' + + +class _HStoreDefinedFunction(sqlfunc.GenericFunction): + type = sqltypes.Boolean + name = 'defined' + + +class _HStoreDeleteFunction(sqlfunc.GenericFunction): + type = HSTORE + name = 'delete' + + +class _HStoreSliceFunction(sqlfunc.GenericFunction): + type = HSTORE + name = 'slice' + + +class _HStoreKeysFunction(sqlfunc.GenericFunction): + type = ARRAY(sqltypes.Text) + name = 'akeys' + + +class _HStoreValsFunction(sqlfunc.GenericFunction): + type = ARRAY(sqltypes.Text) + name = 'avals' + + +class _HStoreArrayFunction(sqlfunc.GenericFunction): + type = ARRAY(sqltypes.Text) + name = 'hstore_to_array' + + +class _HStoreMatrixFunction(sqlfunc.GenericFunction): + type = ARRAY(sqltypes.Text) + name = 'hstore_to_matrix' + + +# +# parsing. note that none of this is used with the psycopg2 backend, +# which provides its own native extensions. +# + +# My best guess at the parsing rules of hstore literals, since no formal +# grammar is given. This is mostly reverse engineered from PG's input parser +# behavior. +HSTORE_PAIR_RE = re.compile(r""" +( + "(?P (\\ . | [^"])* )" # Quoted key +) +[ ]* => [ ]* # Pair operator, optional adjoining whitespace +( + (?P NULL ) # NULL value + | "(?P (\\ . | [^"])* )" # Quoted value +) +""", re.VERBOSE) + +HSTORE_DELIMITER_RE = re.compile(r""" +[ ]* , [ ]* +""", re.VERBOSE) + + +def _parse_error(hstore_str, pos): + """format an unmarshalling error.""" + + ctx = 20 + hslen = len(hstore_str) + + parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)] + residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)] + + if len(parsed_tail) > ctx: + parsed_tail = '[...]' + parsed_tail[1:] + if len(residual) > ctx: + residual = residual[:-1] + '[...]' + + return "After %r, could not parse residual at position %d: %r" % ( + parsed_tail, pos, residual) + + +def _parse_hstore(hstore_str): + """Parse an hstore from its literal string representation. + + Attempts to approximate PG's hstore input parsing rules as closely as + possible. Although currently this is not strictly necessary, since the + current implementation of hstore's output syntax is stricter than what it + accepts as input, the documentation makes no guarantees that will always + be the case. + + + + """ + result = {} + pos = 0 + pair_match = HSTORE_PAIR_RE.match(hstore_str) + + while pair_match is not None: + key = pair_match.group('key').replace(r'\"', '"').replace( + "\\\\", "\\") + if pair_match.group('value_null'): + value = None + else: + value = pair_match.group('value').replace( + r'\"', '"').replace("\\\\", "\\") + result[key] = value + + pos += pair_match.end() + + delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:]) + if delim_match is not None: + pos += delim_match.end() + + pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:]) + + if pos != len(hstore_str): + raise ValueError(_parse_error(hstore_str, pos)) + + return result + + +def _serialize_hstore(val): + """Serialize a dictionary into an hstore literal. Keys and values must + both be strings (except None for values). + + """ + def esc(s, position): + if position == 'value' and s is None: + return 'NULL' + elif isinstance(s, util.string_types): + return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"') + else: + raise ValueError("%r in %s position is not a string." % + (s, position)) + + return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value')) + for k, v in val.items()) + + diff --git a/app/lib/sqlalchemy/dialects/postgresql/json.py b/app/lib/sqlalchemy/dialects/postgresql/json.py new file mode 100644 index 0000000..f145806 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/json.py @@ -0,0 +1,301 @@ +# postgresql/json.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +from __future__ import absolute_import + +import json +import collections + +from .base import ischema_names, colspecs +from ... import types as sqltypes +from ...sql import operators +from ...sql import elements +from ... import util + +__all__ = ('JSON', 'JSONB') + +idx_precedence = operators._PRECEDENCE[operators.json_getitem_op] + +ASTEXT = operators.custom_op( + "->>", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +JSONPATH_ASTEXT = operators.custom_op( + "#>>", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + + +HAS_KEY = operators.custom_op( + "?", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +HAS_ALL = operators.custom_op( + "?&", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +HAS_ANY = operators.custom_op( + "?|", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +CONTAINS = operators.custom_op( + "@>", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + +CONTAINED_BY = operators.custom_op( + "<@", precedence=idx_precedence, natural_self_precedent=True, + eager_grouping=True +) + + +class JSONPathType(sqltypes.JSON.JSONPathType): + def bind_processor(self, dialect): + super_proc = self.string_bind_processor(dialect) + + def process(value): + assert isinstance(value, collections.Sequence) + tokens = [util.text_type(elem)for elem in value] + value = "{%s}" % (", ".join(tokens)) + if super_proc: + value = super_proc(value) + return value + + return process + + def literal_processor(self, dialect): + super_proc = self.string_literal_processor(dialect) + + def process(value): + assert isinstance(value, collections.Sequence) + tokens = [util.text_type(elem)for elem in value] + value = "{%s}" % (", ".join(tokens)) + if super_proc: + value = super_proc(value) + return value + + return process + +colspecs[sqltypes.JSON.JSONPathType] = JSONPathType + + +class JSON(sqltypes.JSON): + """Represent the PostgreSQL JSON type. + + This type is a specialization of the Core-level :class:`.types.JSON` + type. Be sure to read the documentation for :class:`.types.JSON` for + important tips regarding treatment of NULL values and ORM use. + + .. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a PostgreSQL- + specific specialization of the new :class:`.types.JSON` type. + + The operators provided by the PostgreSQL version of :class:`.JSON` + include: + + * Index operations (the ``->`` operator):: + + data_table.c.data['some key'] + + data_table.c.data[5] + + + * Index operations returning text (the ``->>`` operator):: + + data_table.c.data['some key'].astext == 'some value' + + * Index operations with CAST + (equivalent to ``CAST(col ->> ['some key'] AS )``):: + + data_table.c.data['some key'].astext.cast(Integer) == 5 + + * Path index operations (the ``#>`` operator):: + + data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')] + + * Path index operations returning text (the ``#>>`` operator):: + + data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \ +'some value' + + .. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on + JSON objects now requires that the :attr:`.JSON.Comparator.astext` + modifier be called explicitly, if the cast works only from a textual + string. + + Index operations return an expression object whose type defaults to + :class:`.JSON` by default, so that further JSON-oriented instructions + may be called upon the result type. + + Custom serializers and deserializers are specified at the dialect level, + that is using :func:`.create_engine`. The reason for this is that when + using psycopg2, the DBAPI only allows serializers at the per-cursor + or per-connection level. E.g.:: + + engine = create_engine("postgresql://scott:tiger@localhost/test", + json_serializer=my_serialize_fn, + json_deserializer=my_deserialize_fn + ) + + When using the psycopg2 dialect, the json_deserializer is registered + against the database using ``psycopg2.extras.register_default_json``. + + .. seealso:: + + :class:`.types.JSON` - Core level JSON type + + :class:`.JSONB` + + """ + + astext_type = sqltypes.Text() + + def __init__(self, none_as_null=False, astext_type=None): + """Construct a :class:`.JSON` type. + + :param none_as_null: if True, persist the value ``None`` as a + SQL NULL value, not the JSON encoding of ``null``. Note that + when this flag is False, the :func:`.null` construct can still + be used to persist a NULL value:: + + from sqlalchemy import null + conn.execute(table.insert(), data=null()) + + .. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null` + is now supported in order to persist a NULL value. + + .. seealso:: + + :attr:`.JSON.NULL` + + :param astext_type: the type to use for the + :attr:`.JSON.Comparator.astext` + accessor on indexed attributes. Defaults to :class:`.types.Text`. + + .. versionadded:: 1.1 + + """ + super(JSON, self).__init__(none_as_null=none_as_null) + if astext_type is not None: + self.astext_type = astext_type + + class Comparator(sqltypes.JSON.Comparator): + """Define comparison operations for :class:`.JSON`.""" + + @property + def astext(self): + """On an indexed expression, use the "astext" (e.g. "->>") + conversion when rendered in SQL. + + E.g.:: + + select([data_table.c.data['some key'].astext]) + + .. seealso:: + + :meth:`.ColumnElement.cast` + + """ + + if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType): + return self.expr.left.operate( + JSONPATH_ASTEXT, + self.expr.right, result_type=self.type.astext_type) + else: + return self.expr.left.operate( + ASTEXT, self.expr.right, result_type=self.type.astext_type) + + comparator_factory = Comparator + + +colspecs[sqltypes.JSON] = JSON +ischema_names['json'] = JSON + + +class JSONB(JSON): + """Represent the PostgreSQL JSONB type. + + The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.:: + + data_table = Table('data_table', metadata, + Column('id', Integer, primary_key=True), + Column('data', JSONB) + ) + + with engine.connect() as conn: + conn.execute( + data_table.insert(), + data = {"key1": "value1", "key2": "value2"} + ) + + The :class:`.JSONB` type includes all operations provided by + :class:`.JSON`, including the same behaviors for indexing operations. + It also adds additional operators specific to JSONB, including + :meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`, + :meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`, + and :meth:`.JSONB.Comparator.contained_by`. + + Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect + in-place changes when used with the ORM, unless the + :mod:`sqlalchemy.ext.mutable` extension is used. + + Custom serializers and deserializers + are shared with the :class:`.JSON` class, using the ``json_serializer`` + and ``json_deserializer`` keyword arguments. These must be specified + at the dialect level using :func:`.create_engine`. When using + psycopg2, the serializers are associated with the jsonb type using + ``psycopg2.extras.register_default_jsonb`` on a per-connection basis, + in the same way that ``psycopg2.extras.register_default_json`` is used + to register these handlers with the json type. + + .. versionadded:: 0.9.7 + + .. seealso:: + + :class:`.JSON` + + """ + + __visit_name__ = 'JSONB' + + class Comparator(JSON.Comparator): + """Define comparison operations for :class:`.JSON`.""" + + def has_key(self, other): + """Boolean expression. Test for presence of a key. Note that the + key may be a SQLA expression. + """ + return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean) + + def has_all(self, other): + """Boolean expression. Test for presence of all keys in jsonb + """ + return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean) + + def has_any(self, other): + """Boolean expression. Test for presence of any key in jsonb + """ + return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean) + + def contains(self, other, **kwargs): + """Boolean expression. Test if keys (or array) are a superset + of/contained the keys of the argument jsonb expression. + """ + return self.operate(CONTAINS, other, result_type=sqltypes.Boolean) + + def contained_by(self, other): + """Boolean expression. Test if keys are a proper subset of the + keys of the argument jsonb expression. + """ + return self.operate( + CONTAINED_BY, other, result_type=sqltypes.Boolean) + + comparator_factory = Comparator + +ischema_names['jsonb'] = JSONB diff --git a/app/lib/sqlalchemy/dialects/postgresql/pg8000.py b/app/lib/sqlalchemy/dialects/postgresql/pg8000.py new file mode 100644 index 0000000..8c019a2 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -0,0 +1,265 @@ +# postgresql/pg8000.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: postgresql+pg8000 + :name: pg8000 + :dbapi: pg8000 + :connectstring: \ +postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...] + :url: https://pythonhosted.org/pg8000/ + + +.. _pg8000_unicode: + +Unicode +------- + +pg8000 will encode / decode string values between it and the server using the +PostgreSQL ``client_encoding`` parameter; by default this is the value in +the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. +Typically, this can be changed to ``utf-8``, as a more useful default:: + + #client_encoding = sql_ascii # actually, defaults to database + # encoding + client_encoding = utf8 + +The ``client_encoding`` can be overridden for a session by executing the SQL: + +SET CLIENT_ENCODING TO 'utf8'; + +SQLAlchemy will execute this SQL on all new connections based on the value +passed to :func:`.create_engine` using the ``client_encoding`` parameter:: + + engine = create_engine( + "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8') + + +.. _pg8000_isolation_level: + +pg8000 Transaction Isolation Level +------------------------------------- + +The pg8000 dialect offers the same isolation level settings as that +of the :ref:`psycopg2 ` dialect: + +* ``READ COMMITTED`` +* ``READ UNCOMMITTED`` +* ``REPEATABLE READ`` +* ``SERIALIZABLE`` +* ``AUTOCOMMIT`` + +.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using + pg8000. + +.. seealso:: + + :ref:`postgresql_isolation_level` + + :ref:`psycopg2_isolation_level` + + +""" +from ... import util, exc +import decimal +from ... import processors +from ... import types as sqltypes +from .base import ( + PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext, + _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES) +import re +from sqlalchemy.dialects.postgresql.json import JSON + + +class _PGNumeric(sqltypes.Numeric): + def result_processor(self, dialect, coltype): + if self.asdecimal: + if coltype in _FLOAT_TYPES: + return processors.to_decimal_processor_factory( + decimal.Decimal, self._effective_decimal_return_scale) + elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: + # pg8000 returns Decimal natively for 1700 + return None + else: + raise exc.InvalidRequestError( + "Unknown PG numeric type: %d" % coltype) + else: + if coltype in _FLOAT_TYPES: + # pg8000 returns float natively for 701 + return None + elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: + return processors.to_float + else: + raise exc.InvalidRequestError( + "Unknown PG numeric type: %d" % coltype) + + +class _PGNumericNoBind(_PGNumeric): + def bind_processor(self, dialect): + return None + + +class _PGJSON(JSON): + + def result_processor(self, dialect, coltype): + if dialect._dbapi_version > (1, 10, 1): + return None # Has native JSON + else: + return super(_PGJSON, self).result_processor(dialect, coltype) + + +class PGExecutionContext_pg8000(PGExecutionContext): + pass + + +class PGCompiler_pg8000(PGCompiler): + def visit_mod_binary(self, binary, operator, **kw): + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + + def post_process_text(self, text): + if '%%' in text: + util.warn("The SQLAlchemy postgresql dialect " + "now automatically escapes '%' in text() " + "expressions to '%%'.") + return text.replace('%', '%%') + + +class PGIdentifierPreparer_pg8000(PGIdentifierPreparer): + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + return value.replace('%', '%%') + + +class PGDialect_pg8000(PGDialect): + driver = 'pg8000' + + supports_unicode_statements = True + + supports_unicode_binds = True + + default_paramstyle = 'format' + supports_sane_multi_rowcount = True + execution_ctx_cls = PGExecutionContext_pg8000 + statement_compiler = PGCompiler_pg8000 + preparer = PGIdentifierPreparer_pg8000 + description_encoding = 'use_encoding' + + colspecs = util.update_copy( + PGDialect.colspecs, + { + sqltypes.Numeric: _PGNumericNoBind, + sqltypes.Float: _PGNumeric, + JSON: _PGJSON, + sqltypes.JSON: _PGJSON + } + ) + + def __init__(self, client_encoding=None, **kwargs): + PGDialect.__init__(self, **kwargs) + self.client_encoding = client_encoding + + def initialize(self, connection): + self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14) + super(PGDialect_pg8000, self).initialize(connection) + + @util.memoized_property + def _dbapi_version(self): + if self.dbapi and hasattr(self.dbapi, '__version__'): + return tuple( + [ + int(x) for x in re.findall( + r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)]) + else: + return (99, 99, 99) + + @classmethod + def dbapi(cls): + return __import__('pg8000') + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + if 'port' in opts: + opts['port'] = int(opts['port']) + opts.update(url.query) + return ([], opts) + + def is_disconnect(self, e, connection, cursor): + return "connection is closed" in str(e) + + def set_isolation_level(self, connection, level): + level = level.replace('_', ' ') + + # adjust for ConnectionFairy possibly being present + if hasattr(connection, 'connection'): + connection = connection.connection + + if level == 'AUTOCOMMIT': + connection.autocommit = True + elif level in self._isolation_lookup: + connection.autocommit = False + cursor = connection.cursor() + cursor.execute( + "SET SESSION CHARACTERISTICS AS TRANSACTION " + "ISOLATION LEVEL %s" % level) + cursor.execute("COMMIT") + cursor.close() + else: + raise exc.ArgumentError( + "Invalid value '%s' for isolation_level. " + "Valid isolation levels for %s are %s or AUTOCOMMIT" % + (level, self.name, ", ".join(self._isolation_lookup)) + ) + + def set_client_encoding(self, connection, client_encoding): + # adjust for ConnectionFairy possibly being present + if hasattr(connection, 'connection'): + connection = connection.connection + + cursor = connection.cursor() + cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'") + cursor.execute("COMMIT") + cursor.close() + + def do_begin_twophase(self, connection, xid): + connection.connection.tpc_begin((0, xid, '')) + + def do_prepare_twophase(self, connection, xid): + connection.connection.tpc_prepare() + + def do_rollback_twophase( + self, connection, xid, is_prepared=True, recover=False): + connection.connection.tpc_rollback((0, xid, '')) + + def do_commit_twophase( + self, connection, xid, is_prepared=True, recover=False): + connection.connection.tpc_commit((0, xid, '')) + + def do_recover_twophase(self, connection): + return [row[1] for row in connection.connection.tpc_recover()] + + def on_connect(self): + fns = [] + if self.client_encoding is not None: + def on_connect(conn): + self.set_client_encoding(conn, self.client_encoding) + fns.append(on_connect) + + if self.isolation_level is not None: + def on_connect(conn): + self.set_isolation_level(conn, self.isolation_level) + fns.append(on_connect) + + if len(fns) > 0: + def on_connect(conn): + for fn in fns: + fn(conn) + return on_connect + else: + return None + +dialect = PGDialect_pg8000 diff --git a/app/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/app/lib/sqlalchemy/dialects/postgresql/psycopg2.py new file mode 100644 index 0000000..5032814 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -0,0 +1,702 @@ +# postgresql/psycopg2.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: postgresql+psycopg2 + :name: psycopg2 + :dbapi: psycopg2 + :connectstring: postgresql+psycopg2://user:password@host:port/dbname\ +[?key=value&key=value...] + :url: http://pypi.python.org/pypi/psycopg2/ + +psycopg2 Connect Arguments +----------------------------------- + +psycopg2-specific keyword arguments which are accepted by +:func:`.create_engine()` are: + +* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL + statements which support this feature. What this essentially means from a + psycopg2 point of view is that the cursor is created using a name, e.g. + ``connection.cursor('some name')``, which has the effect that result rows + are not immediately pre-fetched and buffered after statement execution, but + are instead left on the server and only retrieved as needed. SQLAlchemy's + :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering + behavior when this feature is enabled, such that groups of 100 rows at a + time are fetched over the wire to reduce conversational overhead. + Note that the :paramref:`.Connection.execution_options.stream_results` + execution option is a more targeted + way of enabling this mode on a per-execution basis. +* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode + per connection. True by default. + + .. seealso:: + + :ref:`psycopg2_disable_native_unicode` + +* ``isolation_level``: This option, available for all PostgreSQL dialects, + includes the ``AUTOCOMMIT`` isolation level when using the psycopg2 + dialect. + + .. seealso:: + + :ref:`psycopg2_isolation_level` + +* ``client_encoding``: sets the client encoding in a libpq-agnostic way, + using psycopg2's ``set_client_encoding()`` method. + + .. seealso:: + + :ref:`psycopg2_unicode` + +Unix Domain Connections +------------------------ + +psycopg2 supports connecting via Unix domain connections. When the ``host`` +portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, +which specifies Unix-domain communication rather than TCP/IP communication:: + + create_engine("postgresql+psycopg2://user:password@/dbname") + +By default, the socket file used is to connect to a Unix-domain socket +in ``/tmp``, or whatever socket directory was specified when PostgreSQL +was built. This value can be overridden by passing a pathname to psycopg2, +using ``host`` as an additional keyword argument:: + + create_engine("postgresql+psycopg2://user:password@/dbname?\ +host=/var/lib/postgresql") + +See also: + +`PQconnectdbParams `_ + +.. _psycopg2_execution_options: + +Per-Statement/Connection Execution Options +------------------------------------------- + +The following DBAPI-specific options are respected when used with +:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, +:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: + +* ``isolation_level`` - Set the transaction isolation level for the lifespan of a + :class:`.Connection` (can only be set on a connection, not a statement + or query). See :ref:`psycopg2_isolation_level`. + +* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors - + this feature makes use of "named" cursors in combination with special + result handling methods so that result rows are not fully buffered. + If ``None`` or not set, the ``server_side_cursors`` option of the + :class:`.Engine` is used. + +* ``max_row_buffer`` - when using ``stream_results``, an integer value that + specifies the maximum number of rows to buffer at a time. This is + interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the + buffer will grow to ultimately store 1000 rows at a time. + + .. versionadded:: 1.0.6 + +.. _psycopg2_unicode: + +Unicode with Psycopg2 +---------------------- + +By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` +extension, such that the DBAPI receives and returns all strings as Python +Unicode objects directly - SQLAlchemy passes these values through without +change. Psycopg2 here will encode/decode string values based on the +current "client encoding" setting; by default this is the value in +the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. +Typically, this can be changed to ``utf8``, as a more useful default:: + + # postgresql.conf file + + # client_encoding = sql_ascii # actually, defaults to database + # encoding + client_encoding = utf8 + +A second way to affect the client encoding is to set it within Psycopg2 +locally. SQLAlchemy will call psycopg2's +:meth:`psycopg2:connection.set_client_encoding` method +on all new connections based on the value passed to +:func:`.create_engine` using the ``client_encoding`` parameter:: + + # set_client_encoding() setting; + # works for *all* PostgreSQL versions + engine = create_engine("postgresql://user:pass@host/dbname", + client_encoding='utf8') + +This overrides the encoding specified in the PostgreSQL client configuration. +When using the parameter in this way, the psycopg2 driver emits +``SET client_encoding TO 'utf8'`` on the connection explicitly, and works +in all PostgreSQL versions. + +Note that the ``client_encoding`` setting as passed to :func:`.create_engine` +is **not the same** as the more recently added ``client_encoding`` parameter +now supported by libpq directly. This is enabled when ``client_encoding`` +is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed +using the :paramref:`.create_engine.connect_args` parameter:: + + # libpq direct parameter setting; + # only works for PostgreSQL **9.1 and above** + engine = create_engine("postgresql://user:pass@host/dbname", + connect_args={'client_encoding': 'utf8'}) + + # using the query string is equivalent + engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8") + +The above parameter was only added to libpq as of version 9.1 of PostgreSQL, +so using the previous method is better for cross-version support. + +.. _psycopg2_disable_native_unicode: + +Disabling Native Unicode +^^^^^^^^^^^^^^^^^^^^^^^^ + +SQLAlchemy can also be instructed to skip the usage of the psycopg2 +``UNICODE`` extension and to instead utilize its own unicode encode/decode +services, which are normally reserved only for those DBAPIs that don't +fully support unicode directly. Passing ``use_native_unicode=False`` to +:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``. +SQLAlchemy will instead encode data itself into Python bytestrings on the way +in and coerce from bytes on the way back, +using the value of the :func:`.create_engine` ``encoding`` parameter, which +defaults to ``utf-8``. +SQLAlchemy's own unicode encode/decode functionality is steadily becoming +obsolete as most DBAPIs now support unicode fully. + +Bound Parameter Styles +---------------------- + +The default parameter style for the psycopg2 dialect is "pyformat", where +SQL is rendered using ``%(paramname)s`` style. This format has the limitation +that it does not accommodate the unusual case of parameter names that +actually contain percent or parenthesis symbols; as SQLAlchemy in many cases +generates bound parameter names based on the name of a column, the presence +of these characters in a column name can lead to problems. + +There are two solutions to the issue of a :class:`.schema.Column` that contains +one of these characters in its name. One is to specify the +:paramref:`.schema.Column.key` for columns that have such names:: + + measurement = Table('measurement', metadata, + Column('Size (meters)', Integer, key='size_meters') + ) + +Above, an INSERT statement such as ``measurement.insert()`` will use +``size_meters`` as the parameter name, and a SQL expression such as +``measurement.c.size_meters > 10`` will derive the bound parameter name +from the ``size_meters`` key as well. + +.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key` + as the source of naming when anonymous bound parameters are created + in SQL expressions; previously, this behavior only applied to + :meth:`.Table.insert` and :meth:`.Table.update` parameter names. + +The other solution is to use a positional format; psycopg2 allows use of the +"format" paramstyle, which can be passed to +:paramref:`.create_engine.paramstyle`:: + + engine = create_engine( + 'postgresql://scott:tiger@localhost:5432/test', paramstyle='format') + +With the above engine, instead of a statement like:: + + INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s) + {'Size (meters)': 1} + +we instead see:: + + INSERT INTO measurement ("Size (meters)") VALUES (%s) + (1, ) + +Where above, the dictionary style is converted into a tuple with positional +style. + + +Transactions +------------ + +The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. + +.. _psycopg2_isolation_level: + +Psycopg2 Transaction Isolation Level +------------------------------------- + +As discussed in :ref:`postgresql_isolation_level`, +all PostgreSQL dialects support setting of transaction isolation level +both via the ``isolation_level`` parameter passed to :func:`.create_engine`, +as well as the ``isolation_level`` argument used by +:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these +options make use of psycopg2's ``set_isolation_level()`` connection method, +rather than emitting a PostgreSQL directive; this is because psycopg2's +API-level setting is always emitted at the start of each transaction in any +case. + +The psycopg2 dialect supports these constants for isolation level: + +* ``READ COMMITTED`` +* ``READ UNCOMMITTED`` +* ``REPEATABLE READ`` +* ``SERIALIZABLE`` +* ``AUTOCOMMIT`` + +.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using + psycopg2. + +.. seealso:: + + :ref:`postgresql_isolation_level` + + :ref:`pg8000_isolation_level` + + +NOTICE logging +--------------- + +The psycopg2 dialect will log PostgreSQL NOTICE messages via the +``sqlalchemy.dialects.postgresql`` logger:: + + import logging + logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) + +.. _psycopg2_hstore:: + +HSTORE type +------------ + +The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of +the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension +by default when psycopg2 version 2.4 or greater is used, and +it is detected that the target database has the HSTORE type set up for use. +In other words, when the dialect makes the first +connection, a sequence like the following is performed: + +1. Request the available HSTORE oids using + ``psycopg2.extras.HstoreAdapter.get_oids()``. + If this function returns a list of HSTORE identifiers, we then determine + that the ``HSTORE`` extension is present. + This function is **skipped** if the version of psycopg2 installed is + less than version 2.4. + +2. If the ``use_native_hstore`` flag is at its default of ``True``, and + we've detected that ``HSTORE`` oids are available, the + ``psycopg2.extensions.register_hstore()`` extension is invoked for all + connections. + +The ``register_hstore()`` extension has the effect of **all Python +dictionaries being accepted as parameters regardless of the type of target +column in SQL**. The dictionaries are converted by this extension into a +textual HSTORE expression. If this behavior is not desired, disable the +use of the hstore extension by setting ``use_native_hstore`` to ``False`` as +follows:: + + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", + use_native_hstore=False) + +The ``HSTORE`` type is **still supported** when the +``psycopg2.extensions.register_hstore()`` extension is not used. It merely +means that the coercion between Python dictionaries and the HSTORE +string format, on both the parameter side and the result side, will take +place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` +which may be more performant. + +""" +from __future__ import absolute_import + +import re +import logging + +from ... import util, exc +import decimal +from ... import processors +from ...engine import result as _result +from ...sql import expression +from ... import types as sqltypes +from .base import PGDialect, PGCompiler, \ + PGIdentifierPreparer, PGExecutionContext, \ + ENUM, _DECIMAL_TYPES, _FLOAT_TYPES,\ + _INT_TYPES, UUID +from .hstore import HSTORE +from .json import JSON, JSONB + +try: + from uuid import UUID as _python_UUID +except ImportError: + _python_UUID = None + + +logger = logging.getLogger('sqlalchemy.dialects.postgresql') + + +class _PGNumeric(sqltypes.Numeric): + def bind_processor(self, dialect): + return None + + def result_processor(self, dialect, coltype): + if self.asdecimal: + if coltype in _FLOAT_TYPES: + return processors.to_decimal_processor_factory( + decimal.Decimal, + self._effective_decimal_return_scale) + elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: + # pg8000 returns Decimal natively for 1700 + return None + else: + raise exc.InvalidRequestError( + "Unknown PG numeric type: %d" % coltype) + else: + if coltype in _FLOAT_TYPES: + # pg8000 returns float natively for 701 + return None + elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: + return processors.to_float + else: + raise exc.InvalidRequestError( + "Unknown PG numeric type: %d" % coltype) + + +class _PGEnum(ENUM): + def result_processor(self, dialect, coltype): + if self.native_enum and util.py2k and self.convert_unicode is True: + # we can't easily use PG's extensions here because + # the OID is on the fly, and we need to give it a python + # function anyway - not really worth it. + self.convert_unicode = "force_nocheck" + return super(_PGEnum, self).result_processor(dialect, coltype) + + +class _PGHStore(HSTORE): + def bind_processor(self, dialect): + if dialect._has_native_hstore: + return None + else: + return super(_PGHStore, self).bind_processor(dialect) + + def result_processor(self, dialect, coltype): + if dialect._has_native_hstore: + return None + else: + return super(_PGHStore, self).result_processor(dialect, coltype) + + +class _PGJSON(JSON): + + def result_processor(self, dialect, coltype): + if dialect._has_native_json: + return None + else: + return super(_PGJSON, self).result_processor(dialect, coltype) + + +class _PGJSONB(JSONB): + + def result_processor(self, dialect, coltype): + if dialect._has_native_jsonb: + return None + else: + return super(_PGJSONB, self).result_processor(dialect, coltype) + + +class _PGUUID(UUID): + def bind_processor(self, dialect): + if not self.as_uuid and dialect.use_native_uuid: + nonetype = type(None) + + def process(value): + if value is not None: + value = _python_UUID(value) + return value + return process + + def result_processor(self, dialect, coltype): + if not self.as_uuid and dialect.use_native_uuid: + def process(value): + if value is not None: + value = str(value) + return value + return process + + +_server_side_id = util.counter() + + +class PGExecutionContext_psycopg2(PGExecutionContext): + def create_server_side_cursor(self): + # use server-side cursors: + # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html + ident = "c_%s_%s" % (hex(id(self))[2:], + hex(_server_side_id())[2:]) + return self._dbapi_connection.cursor(ident) + + def get_result_proxy(self): + # TODO: ouch + if logger.isEnabledFor(logging.INFO): + self._log_notices(self.cursor) + + if self._is_server_side: + return _result.BufferedRowResultProxy(self) + else: + return _result.ResultProxy(self) + + def _log_notices(self, cursor): + for notice in cursor.connection.notices: + # NOTICE messages have a + # newline character at the end + logger.info(notice.rstrip()) + + cursor.connection.notices[:] = [] + + +class PGCompiler_psycopg2(PGCompiler): + def visit_mod_binary(self, binary, operator, **kw): + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + + def post_process_text(self, text): + return text.replace('%', '%%') + + +class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + return value.replace('%', '%%') + + +class PGDialect_psycopg2(PGDialect): + driver = 'psycopg2' + if util.py2k: + supports_unicode_statements = False + + supports_server_side_cursors = True + + default_paramstyle = 'pyformat' + # set to true based on psycopg2 version + supports_sane_multi_rowcount = False + execution_ctx_cls = PGExecutionContext_psycopg2 + statement_compiler = PGCompiler_psycopg2 + preparer = PGIdentifierPreparer_psycopg2 + psycopg2_version = (0, 0) + + FEATURE_VERSION_MAP = dict( + native_json=(2, 5), + native_jsonb=(2, 5, 4), + sane_multi_rowcount=(2, 0, 9), + array_oid=(2, 4, 3), + hstore_adapter=(2, 4) + ) + + _has_native_hstore = False + _has_native_json = False + _has_native_jsonb = False + + engine_config_types = PGDialect.engine_config_types.union([ + ('use_native_unicode', util.asbool), + ]) + + colspecs = util.update_copy( + PGDialect.colspecs, + { + sqltypes.Numeric: _PGNumeric, + ENUM: _PGEnum, # needs force_unicode + sqltypes.Enum: _PGEnum, # needs force_unicode + HSTORE: _PGHStore, + JSON: _PGJSON, + sqltypes.JSON: _PGJSON, + JSONB: _PGJSONB, + UUID: _PGUUID + } + ) + + def __init__(self, server_side_cursors=False, use_native_unicode=True, + client_encoding=None, + use_native_hstore=True, use_native_uuid=True, + **kwargs): + PGDialect.__init__(self, **kwargs) + self.server_side_cursors = server_side_cursors + self.use_native_unicode = use_native_unicode + self.use_native_hstore = use_native_hstore + self.use_native_uuid = use_native_uuid + self.supports_unicode_binds = use_native_unicode + self.client_encoding = client_encoding + if self.dbapi and hasattr(self.dbapi, '__version__'): + m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', + self.dbapi.__version__) + if m: + self.psycopg2_version = tuple( + int(x) + for x in m.group(1, 2, 3) + if x is not None) + + def initialize(self, connection): + super(PGDialect_psycopg2, self).initialize(connection) + self._has_native_hstore = self.use_native_hstore and \ + self._hstore_oids(connection.connection) \ + is not None + self._has_native_json = \ + self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json'] + self._has_native_jsonb = \ + self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb'] + + # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9 + self.supports_sane_multi_rowcount = \ + self.psycopg2_version >= \ + self.FEATURE_VERSION_MAP['sane_multi_rowcount'] + + @classmethod + def dbapi(cls): + import psycopg2 + return psycopg2 + + @classmethod + def _psycopg2_extensions(cls): + from psycopg2 import extensions + return extensions + + @classmethod + def _psycopg2_extras(cls): + from psycopg2 import extras + return extras + + @util.memoized_property + def _isolation_lookup(self): + extensions = self._psycopg2_extensions() + return { + 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT, + 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED, + 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, + 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ, + 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE + } + + def set_isolation_level(self, connection, level): + try: + level = self._isolation_lookup[level.replace('_', ' ')] + except KeyError: + raise exc.ArgumentError( + "Invalid value '%s' for isolation_level. " + "Valid isolation levels for %s are %s" % + (level, self.name, ", ".join(self._isolation_lookup)) + ) + + connection.set_isolation_level(level) + + def on_connect(self): + extras = self._psycopg2_extras() + extensions = self._psycopg2_extensions() + + fns = [] + if self.client_encoding is not None: + def on_connect(conn): + conn.set_client_encoding(self.client_encoding) + fns.append(on_connect) + + if self.isolation_level is not None: + def on_connect(conn): + self.set_isolation_level(conn, self.isolation_level) + fns.append(on_connect) + + if self.dbapi and self.use_native_uuid: + def on_connect(conn): + extras.register_uuid(None, conn) + fns.append(on_connect) + + if self.dbapi and self.use_native_unicode: + def on_connect(conn): + extensions.register_type(extensions.UNICODE, conn) + extensions.register_type(extensions.UNICODEARRAY, conn) + fns.append(on_connect) + + if self.dbapi and self.use_native_hstore: + def on_connect(conn): + hstore_oids = self._hstore_oids(conn) + if hstore_oids is not None: + oid, array_oid = hstore_oids + kw = {'oid': oid} + if util.py2k: + kw['unicode'] = True + if self.psycopg2_version >= \ + self.FEATURE_VERSION_MAP['array_oid']: + kw['array_oid'] = array_oid + extras.register_hstore(conn, **kw) + fns.append(on_connect) + + if self.dbapi and self._json_deserializer: + def on_connect(conn): + if self._has_native_json: + extras.register_default_json( + conn, loads=self._json_deserializer) + if self._has_native_jsonb: + extras.register_default_jsonb( + conn, loads=self._json_deserializer) + fns.append(on_connect) + + if fns: + def on_connect(conn): + for fn in fns: + fn(conn) + return on_connect + else: + return None + + @util.memoized_instancemethod + def _hstore_oids(self, conn): + if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']: + extras = self._psycopg2_extras() + oids = extras.HstoreAdapter.get_oids(conn) + if oids is not None and oids[0]: + return oids[0:2] + return None + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + if 'port' in opts: + opts['port'] = int(opts['port']) + opts.update(url.query) + return ([], opts) + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, self.dbapi.Error): + # check the "closed" flag. this might not be + # present on old psycopg2 versions. Also, + # this flag doesn't actually help in a lot of disconnect + # situations, so don't rely on it. + if getattr(connection, 'closed', False): + return True + + # checks based on strings. in the case that .closed + # didn't cut it, fall back onto these. + str_e = str(e).partition("\n")[0] + for msg in [ + # these error messages from libpq: interfaces/libpq/fe-misc.c + # and interfaces/libpq/fe-secure.c. + 'terminating connection', + 'closed the connection', + 'connection not open', + 'could not receive data from server', + 'could not send data to server', + # psycopg2 client errors, psycopg2/conenction.h, + # psycopg2/cursor.h + 'connection already closed', + 'cursor already closed', + # not sure where this path is originally from, it may + # be obsolete. It really says "losed", not "closed". + 'losed the connection unexpectedly', + # these can occur in newer SSL + 'connection has been closed unexpectedly', + 'SSL SYSCALL error: Bad file descriptor', + 'SSL SYSCALL error: EOF detected', + 'SSL error: decryption failed or bad record mac', + ]: + idx = str_e.find(msg) + if idx >= 0 and '"' not in str_e[:idx]: + return True + return False + +dialect = PGDialect_psycopg2 diff --git a/app/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/app/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py new file mode 100644 index 0000000..e99389d --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -0,0 +1,61 @@ +# testing/engines.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" +.. dialect:: postgresql+psycopg2cffi + :name: psycopg2cffi + :dbapi: psycopg2cffi + :connectstring: \ +postgresql+psycopg2cffi://user:password@host:port/dbname\ +[?key=value&key=value...] + :url: http://pypi.python.org/pypi/psycopg2cffi/ + +``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C +layer. This makes it suitable for use in e.g. PyPy. Documentation +is as per ``psycopg2``. + +.. versionadded:: 1.0.0 + +.. seealso:: + + :mod:`sqlalchemy.dialects.postgresql.psycopg2` + +""" +from .psycopg2 import PGDialect_psycopg2 + + +class PGDialect_psycopg2cffi(PGDialect_psycopg2): + driver = 'psycopg2cffi' + supports_unicode_statements = True + + # psycopg2cffi's first release is 2.5.0, but reports + # __version__ as 2.4.4. Subsequent releases seem to have + # fixed this. + + FEATURE_VERSION_MAP = dict( + native_json=(2, 4, 4), + native_jsonb=(2, 7, 1), + sane_multi_rowcount=(2, 4, 4), + array_oid=(2, 4, 4), + hstore_adapter=(2, 4, 4) + ) + + @classmethod + def dbapi(cls): + return __import__('psycopg2cffi') + + @classmethod + def _psycopg2_extensions(cls): + root = __import__('psycopg2cffi', fromlist=['extensions']) + return root.extensions + + @classmethod + def _psycopg2_extras(cls): + root = __import__('psycopg2cffi', fromlist=['extras']) + return root.extras + + +dialect = PGDialect_psycopg2cffi diff --git a/app/lib/sqlalchemy/dialects/postgresql/pygresql.py b/app/lib/sqlalchemy/dialects/postgresql/pygresql.py new file mode 100644 index 0000000..aa877a2 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/pygresql.py @@ -0,0 +1,243 @@ +# postgresql/pygresql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: postgresql+pygresql + :name: pygresql + :dbapi: pgdb + :connectstring: postgresql+pygresql://user:password@host:port/dbname\ +[?key=value&key=value...] + :url: http://www.pygresql.org/ +""" + +import decimal +import re + +from ... import exc, processors, util +from ...types import Numeric, JSON as Json +from ...sql.elements import Null +from .base import PGDialect, PGCompiler, PGIdentifierPreparer, \ + _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES, UUID +from .hstore import HSTORE +from .json import JSON, JSONB + + +class _PGNumeric(Numeric): + + def bind_processor(self, dialect): + return None + + def result_processor(self, dialect, coltype): + if not isinstance(coltype, int): + coltype = coltype.oid + if self.asdecimal: + if coltype in _FLOAT_TYPES: + return processors.to_decimal_processor_factory( + decimal.Decimal, + self._effective_decimal_return_scale) + elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: + # PyGreSQL returns Decimal natively for 1700 (numeric) + return None + else: + raise exc.InvalidRequestError( + "Unknown PG numeric type: %d" % coltype) + else: + if coltype in _FLOAT_TYPES: + # PyGreSQL returns float natively for 701 (float8) + return None + elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: + return processors.to_float + else: + raise exc.InvalidRequestError( + "Unknown PG numeric type: %d" % coltype) + + +class _PGHStore(HSTORE): + + def bind_processor(self, dialect): + if not dialect.has_native_hstore: + return super(_PGHStore, self).bind_processor(dialect) + hstore = dialect.dbapi.Hstore + def process(value): + if isinstance(value, dict): + return hstore(value) + return value + return process + + def result_processor(self, dialect, coltype): + if not dialect.has_native_hstore: + return super(_PGHStore, self).result_processor(dialect, coltype) + + +class _PGJSON(JSON): + + def bind_processor(self, dialect): + if not dialect.has_native_json: + return super(_PGJSON, self).bind_processor(dialect) + json = dialect.dbapi.Json + + def process(value): + if value is self.NULL: + value = None + elif isinstance(value, Null) or ( + value is None and self.none_as_null): + return None + if value is None or isinstance(value, (dict, list)): + return json(value) + return value + + return process + + def result_processor(self, dialect, coltype): + if not dialect.has_native_json: + return super(_PGJSON, self).result_processor(dialect, coltype) + + +class _PGJSONB(JSONB): + + def bind_processor(self, dialect): + if not dialect.has_native_json: + return super(_PGJSONB, self).bind_processor(dialect) + json = dialect.dbapi.Json + + def process(value): + if value is self.NULL: + value = None + elif isinstance(value, Null) or ( + value is None and self.none_as_null): + return None + if value is None or isinstance(value, (dict, list)): + return json(value) + return value + + return process + + def result_processor(self, dialect, coltype): + if not dialect.has_native_json: + return super(_PGJSONB, self).result_processor(dialect, coltype) + + +class _PGUUID(UUID): + + def bind_processor(self, dialect): + if not dialect.has_native_uuid: + return super(_PGUUID, self).bind_processor(dialect) + uuid = dialect.dbapi.Uuid + + def process(value): + if value is None: + return None + if isinstance(value, (str, bytes)): + if len(value) == 16: + return uuid(bytes=value) + return uuid(value) + if isinstance(value, int): + return uuid(int=value) + return value + + return process + + def result_processor(self, dialect, coltype): + if not dialect.has_native_uuid: + return super(_PGUUID, self).result_processor(dialect, coltype) + if not self.as_uuid: + def process(value): + if value is not None: + return str(value) + return process + + +class _PGCompiler(PGCompiler): + + def visit_mod_binary(self, binary, operator, **kw): + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + + def post_process_text(self, text): + return text.replace('%', '%%') + + +class _PGIdentifierPreparer(PGIdentifierPreparer): + + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + return value.replace('%', '%%') + + +class PGDialect_pygresql(PGDialect): + + driver = 'pygresql' + + statement_compiler = _PGCompiler + preparer = _PGIdentifierPreparer + + @classmethod + def dbapi(cls): + import pgdb + return pgdb + + colspecs = util.update_copy( + PGDialect.colspecs, + { + Numeric: _PGNumeric, + HSTORE: _PGHStore, + Json: _PGJSON, + JSON: _PGJSON, + JSONB: _PGJSONB, + UUID: _PGUUID, + } + ) + + def __init__(self, **kwargs): + super(PGDialect_pygresql, self).__init__(**kwargs) + try: + version = self.dbapi.version + m = re.match(r'(\d+)\.(\d+)', version) + version = (int(m.group(1)), int(m.group(2))) + except (AttributeError, ValueError, TypeError): + version = (0, 0) + self.dbapi_version = version + if version < (5, 0): + has_native_hstore = has_native_json = has_native_uuid = False + if version != (0, 0): + util.warn("PyGreSQL is only fully supported by SQLAlchemy" + " since version 5.0.") + else: + self.supports_unicode_statements = True + self.supports_unicode_binds = True + has_native_hstore = has_native_json = has_native_uuid = True + self.has_native_hstore = has_native_hstore + self.has_native_json = has_native_json + self.has_native_uuid = has_native_uuid + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + if 'port' in opts: + opts['host'] = '%s:%s' % ( + opts.get('host', '').rsplit(':', 1)[0], opts.pop('port')) + opts.update(url.query) + return [], opts + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, self.dbapi.Error): + if not connection: + return False + try: + connection = connection.connection + except AttributeError: + pass + else: + if not connection: + return False + try: + return connection.closed + except AttributeError: # PyGreSQL < 5.0 + return connection._cnx is None + return False + + +dialect = PGDialect_pygresql diff --git a/app/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/app/lib/sqlalchemy/dialects/postgresql/pypostgresql.py new file mode 100644 index 0000000..ab77493 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -0,0 +1,97 @@ +# postgresql/pypostgresql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: postgresql+pypostgresql + :name: py-postgresql + :dbapi: pypostgresql + :connectstring: postgresql+pypostgresql://user:password@host:port/dbname\ +[?key=value&key=value...] + :url: http://python.projects.pgfoundry.org/ + + +""" +from ... import util +from ... import types as sqltypes +from .base import PGDialect, PGExecutionContext +from ... import processors + + +class PGNumeric(sqltypes.Numeric): + def bind_processor(self, dialect): + return processors.to_str + + def result_processor(self, dialect, coltype): + if self.asdecimal: + return None + else: + return processors.to_float + + +class PGExecutionContext_pypostgresql(PGExecutionContext): + pass + + +class PGDialect_pypostgresql(PGDialect): + driver = 'pypostgresql' + + supports_unicode_statements = True + supports_unicode_binds = True + description_encoding = None + default_paramstyle = 'pyformat' + + # requires trunk version to support sane rowcounts + # TODO: use dbapi version information to set this flag appropriately + supports_sane_rowcount = True + supports_sane_multi_rowcount = False + + execution_ctx_cls = PGExecutionContext_pypostgresql + colspecs = util.update_copy( + PGDialect.colspecs, + { + sqltypes.Numeric: PGNumeric, + + # prevents PGNumeric from being used + sqltypes.Float: sqltypes.Float, + } + ) + + @classmethod + def dbapi(cls): + from postgresql.driver import dbapi20 + return dbapi20 + + _DBAPI_ERROR_NAMES = [ + "Error", + "InterfaceError", "DatabaseError", "DataError", + "OperationalError", "IntegrityError", "InternalError", + "ProgrammingError", "NotSupportedError" + ] + + @util.memoized_property + def dbapi_exception_translation_map(self): + if self.dbapi is None: + return {} + + return dict( + (getattr(self.dbapi, name).__name__, name) + for name in self._DBAPI_ERROR_NAMES + ) + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user') + if 'port' in opts: + opts['port'] = int(opts['port']) + else: + opts['port'] = 5432 + opts.update(url.query) + return ([], opts) + + def is_disconnect(self, e, connection, cursor): + return "connection is closed" in str(e) + +dialect = PGDialect_pypostgresql diff --git a/app/lib/sqlalchemy/dialects/postgresql/ranges.py b/app/lib/sqlalchemy/dialects/postgresql/ranges.py new file mode 100644 index 0000000..e7f7da4 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -0,0 +1,168 @@ +# Copyright (C) 2013-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .base import ischema_names +from ... import types as sqltypes + +__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE') + + +class RangeOperators(object): + """ + This mixin provides functionality for the Range Operators + listed in Table 9-44 of the `postgres documentation`__ for Range + Functions and Operators. It is used by all the range types + provided in the ``postgres`` dialect and can likely be used for + any range types you create yourself. + + __ http://www.postgresql.org/docs/devel/static/functions-range.html + + No extra support is provided for the Range Functions listed in + Table 9-45 of the postgres documentation. For these, the normal + :func:`~sqlalchemy.sql.expression.func` object should be used. + + .. versionadded:: 0.8.2 Support for PostgreSQL RANGE operations. + + """ + + class comparator_factory(sqltypes.Concatenable.Comparator): + """Define comparison operations for range types.""" + + def __ne__(self, other): + "Boolean expression. Returns true if two ranges are not equal" + return self.expr.op('<>')(other) + + def contains(self, other, **kw): + """Boolean expression. Returns true if the right hand operand, + which can be an element or a range, is contained within the + column. + """ + return self.expr.op('@>')(other) + + def contained_by(self, other): + """Boolean expression. Returns true if the column is contained + within the right hand operand. + """ + return self.expr.op('<@')(other) + + def overlaps(self, other): + """Boolean expression. Returns true if the column overlaps + (has points in common with) the right hand operand. + """ + return self.expr.op('&&')(other) + + def strictly_left_of(self, other): + """Boolean expression. Returns true if the column is strictly + left of the right hand operand. + """ + return self.expr.op('<<')(other) + + __lshift__ = strictly_left_of + + def strictly_right_of(self, other): + """Boolean expression. Returns true if the column is strictly + right of the right hand operand. + """ + return self.expr.op('>>')(other) + + __rshift__ = strictly_right_of + + def not_extend_right_of(self, other): + """Boolean expression. Returns true if the range in the column + does not extend right of the range in the operand. + """ + return self.expr.op('&<')(other) + + def not_extend_left_of(self, other): + """Boolean expression. Returns true if the range in the column + does not extend left of the range in the operand. + """ + return self.expr.op('&>')(other) + + def adjacent_to(self, other): + """Boolean expression. Returns true if the range in the column + is adjacent to the range in the operand. + """ + return self.expr.op('-|-')(other) + + def __add__(self, other): + """Range expression. Returns the union of the two ranges. + Will raise an exception if the resulting range is not + contigous. + """ + return self.expr.op('+')(other) + + +class INT4RANGE(RangeOperators, sqltypes.TypeEngine): + """Represent the PostgreSQL INT4RANGE type. + + .. versionadded:: 0.8.2 + + """ + + __visit_name__ = 'INT4RANGE' + +ischema_names['int4range'] = INT4RANGE + + +class INT8RANGE(RangeOperators, sqltypes.TypeEngine): + """Represent the PostgreSQL INT8RANGE type. + + .. versionadded:: 0.8.2 + + """ + + __visit_name__ = 'INT8RANGE' + +ischema_names['int8range'] = INT8RANGE + + +class NUMRANGE(RangeOperators, sqltypes.TypeEngine): + """Represent the PostgreSQL NUMRANGE type. + + .. versionadded:: 0.8.2 + + """ + + __visit_name__ = 'NUMRANGE' + +ischema_names['numrange'] = NUMRANGE + + +class DATERANGE(RangeOperators, sqltypes.TypeEngine): + """Represent the PostgreSQL DATERANGE type. + + .. versionadded:: 0.8.2 + + """ + + __visit_name__ = 'DATERANGE' + +ischema_names['daterange'] = DATERANGE + + +class TSRANGE(RangeOperators, sqltypes.TypeEngine): + """Represent the PostgreSQL TSRANGE type. + + .. versionadded:: 0.8.2 + + """ + + __visit_name__ = 'TSRANGE' + +ischema_names['tsrange'] = TSRANGE + + +class TSTZRANGE(RangeOperators, sqltypes.TypeEngine): + """Represent the PostgreSQL TSTZRANGE type. + + .. versionadded:: 0.8.2 + + """ + + __visit_name__ = 'TSTZRANGE' + +ischema_names['tstzrange'] = TSTZRANGE diff --git a/app/lib/sqlalchemy/dialects/postgresql/zxjdbc.py b/app/lib/sqlalchemy/dialects/postgresql/zxjdbc.py new file mode 100644 index 0000000..f3cfbb8 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/postgresql/zxjdbc.py @@ -0,0 +1,46 @@ +# postgresql/zxjdbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: postgresql+zxjdbc + :name: zxJDBC for Jython + :dbapi: zxjdbc + :connectstring: postgresql+zxjdbc://scott:tiger@localhost/db + :driverurl: http://jdbc.postgresql.org/ + + +""" +from ...connectors.zxJDBC import ZxJDBCConnector +from .base import PGDialect, PGExecutionContext + + +class PGExecutionContext_zxjdbc(PGExecutionContext): + + def create_cursor(self): + cursor = self._dbapi_connection.cursor() + cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) + return cursor + + +class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect): + jdbc_db_name = 'postgresql' + jdbc_driver_name = 'org.postgresql.Driver' + + execution_ctx_cls = PGExecutionContext_zxjdbc + + supports_native_decimal = True + + def __init__(self, *args, **kwargs): + super(PGDialect_zxjdbc, self).__init__(*args, **kwargs) + from com.ziclix.python.sql.handler import PostgresqlDataHandler + self.DataHandler = PostgresqlDataHandler + + def _get_server_version_info(self, connection): + parts = connection.connection.dbversion.split('.') + return tuple(int(x) for x in parts) + +dialect = PGDialect_zxjdbc diff --git a/app/lib/sqlalchemy/dialects/sqlite/__init__.py b/app/lib/sqlalchemy/dialects/sqlite/__init__.py new file mode 100644 index 0000000..a0ec025 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sqlite/__init__.py @@ -0,0 +1,20 @@ +# sqlite/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from sqlalchemy.dialects.sqlite import base, pysqlite, pysqlcipher + +# default dialect +base.dialect = pysqlite.dialect + +from sqlalchemy.dialects.sqlite.base import ( + BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER, REAL, + NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect, +) + +__all__ = ('BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', + 'FLOAT', 'INTEGER', 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', + 'TIMESTAMP', 'VARCHAR', 'REAL', 'dialect') diff --git a/app/lib/sqlalchemy/dialects/sqlite/base.py b/app/lib/sqlalchemy/dialects/sqlite/base.py new file mode 100644 index 0000000..76193ff --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sqlite/base.py @@ -0,0 +1,1577 @@ +# sqlite/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r""" +.. dialect:: sqlite + :name: SQLite + +.. _sqlite_datetime: + +Date and Time Types +------------------- + +SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does +not provide out of the box functionality for translating values between Python +`datetime` objects and a SQLite-supported format. SQLAlchemy's own +:class:`~sqlalchemy.types.DateTime` and related types provide date formatting +and parsing functionality when SQlite is used. The implementation classes are +:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`. +These types represent dates and times as ISO formatted strings, which also +nicely support ordering. There's no reliance on typical "libc" internals for +these functions so historical dates are fully supported. + +Ensuring Text affinity +^^^^^^^^^^^^^^^^^^^^^^ + +The DDL rendered for these types is the standard ``DATE``, ``TIME`` +and ``DATETIME`` indicators. However, custom storage formats can also be +applied to these types. When the +storage format is detected as containing no alpha characters, the DDL for +these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, +so that the column continues to have textual affinity. + +.. seealso:: + + `Type Affinity `_ - in the SQLite documentation + +.. _sqlite_autoincrement: + +SQLite Auto Incrementing Behavior +---------------------------------- + +Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html + +Key concepts: + +* SQLite has an implicit "auto increment" feature that takes place for any + non-composite primary-key column that is specifically created using + "INTEGER PRIMARY KEY" for the type + primary key. + +* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not** + equivalent to the implicit autoincrement feature; this keyword is not + recommended for general use. SQLAlchemy does not render this keyword + unless a special SQLite-specific directive is used (see below). However, + it still requires that the column's type is named "INTEGER". + +Using the AUTOINCREMENT Keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To specifically render the AUTOINCREMENT keyword on the primary key column +when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table +construct:: + + Table('sometable', metadata, + Column('id', Integer, primary_key=True), + sqlite_autoincrement=True) + +Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +SQLite's typing model is based on naming conventions. Among +other things, this means that any type name which contains the +substring ``"INT"`` will be determined to be of "integer affinity". A +type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by +SQLite to be of "integer" affinity. However, **the SQLite +autoincrement feature, whether implicitly or explicitly enabled, +requires that the name of the column's type +is exactly the string "INTEGER"**. Therefore, if an +application uses a type like :class:`.BigInteger` for a primary key, on +SQLite this type will need to be rendered as the name ``"INTEGER"`` when +emitting the initial ``CREATE TABLE`` statement in order for the autoincrement +behavior to be available. + +One approach to achieve this is to use :class:`.Integer` on SQLite +only using :meth:`.TypeEngine.with_variant`:: + + table = Table( + "my_table", metadata, + Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True) + ) + +Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name +to be ``INTEGER`` when compiled against SQLite:: + + from sqlalchemy import BigInteger + from sqlalchemy.ext.compiler import compiles + + class SLBigInteger(BigInteger): + pass + + @compiles(SLBigInteger, 'sqlite') + def bi_c(element, compiler, **kw): + return "INTEGER" + + @compiles(SLBigInteger) + def bi_c(element, compiler, **kw): + return compiler.visit_BIGINT(element, **kw) + + + table = Table( + "my_table", metadata, + Column("id", SLBigInteger(), primary_key=True) + ) + +.. seealso:: + + :meth:`.TypeEngine.with_variant` + + :ref:`sqlalchemy.ext.compiler_toplevel` + + `Datatypes In SQLite Version 3 `_ + +.. _sqlite_concurrency: + +Database Locking Behavior / Concurrency +--------------------------------------- + +SQLite is not designed for a high level of write concurrency. The database +itself, being a file, is locked completely during write operations within +transactions, meaning exactly one "connection" (in reality a file handle) +has exclusive access to the database during this period - all other +"connections" will be blocked during this time. + +The Python DBAPI specification also calls for a connection model that is +always in a transaction; there is no ``connection.begin()`` method, +only ``connection.commit()`` and ``connection.rollback()``, upon which a +new transaction is to be begun immediately. This may seem to imply +that the SQLite driver would in theory allow only a single filehandle on a +particular database file at any time; however, there are several +factors both within SQlite itself as well as within the pysqlite driver +which loosen this restriction significantly. + +However, no matter what locking modes are used, SQLite will still always +lock the database file once a transaction is started and DML (e.g. INSERT, +UPDATE, DELETE) has at least been emitted, and this will block +other transactions at least at the point that they also attempt to emit DML. +By default, the length of time on this block is very short before it times out +with an error. + +This behavior becomes more critical when used in conjunction with the +SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs +within a transaction, and with its autoflush model, may emit DML preceding +any SELECT statement. This may lead to a SQLite database that locks +more quickly than is expected. The locking mode of SQLite and the pysqlite +driver can be manipulated to some degree, however it should be noted that +achieving a high degree of write-concurrency with SQLite is a losing battle. + +For more information on SQLite's lack of write concurrency by design, please +see +`Situations Where Another RDBMS May Work Better - High Concurrency +`_ near the bottom of the page. + +The following subsections introduce areas that are impacted by SQLite's +file-based architecture and additionally will usually require workarounds to +work when using the pysqlite driver. + +.. _sqlite_isolation_level: + +Transaction Isolation Level +---------------------------- + +SQLite supports "transaction isolation" in a non-standard way, along two +axes. One is that of the `PRAGMA read_uncommitted `_ +instruction. This setting can essentially switch SQLite between its +default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation +mode normally referred to as ``READ UNCOMMITTED``. + +SQLAlchemy ties into this PRAGMA statement using the +:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`. +Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"`` +and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively. +SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by +the pysqlite driver's default behavior. + +The other axis along which SQLite's transactional locking is impacted is +via the nature of the ``BEGIN`` statement used. The three varieties +are "deferred", "immediate", and "exclusive", as described at +`BEGIN TRANSACTION `_. A straight +``BEGIN`` statement uses the "deferred" mode, where the the database file is +not locked until the first read or write operation, and read access remains +open to other transactions until the first write operation. But again, +it is critical to note that the pysqlite driver interferes with this behavior +by *not even emitting BEGIN* until the first write operation. + +.. warning:: + + SQLite's transactional scope is impacted by unresolved + issues in the pysqlite driver, which defers BEGIN statements to a greater + degree than is often feasible. See the section :ref:`pysqlite_serializable` + for techniques to work around this behavior. + +SAVEPOINT Support +---------------------------- + +SQLite supports SAVEPOINTs, which only function once a transaction is +begun. SQLAlchemy's SAVEPOINT support is available using the +:meth:`.Connection.begin_nested` method at the Core level, and +:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs +won't work at all with pysqlite unless workarounds are taken. + +.. warning:: + + SQLite's SAVEPOINT feature is impacted by unresolved + issues in the pysqlite driver, which defers BEGIN statements to a greater + degree than is often feasible. See the section :ref:`pysqlite_serializable` + for techniques to work around this behavior. + +Transactional DDL +---------------------------- + +The SQLite database supports transactional :term:`DDL` as well. +In this case, the pysqlite driver is not only failing to start transactions, +it also is ending any existing transction when DDL is detected, so again, +workarounds are required. + +.. warning:: + + SQLite's transactional DDL is impacted by unresolved issues + in the pysqlite driver, which fails to emit BEGIN and additionally + forces a COMMIT to cancel any transaction when DDL is encountered. + See the section :ref:`pysqlite_serializable` + for techniques to work around this behavior. + +.. _sqlite_foreign_keys: + +Foreign Key Support +------------------- + +SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, +however by default these constraints have no effect on the operation of the +table. + +Constraint checking on SQLite has three prerequisites: + +* At least version 3.6.19 of SQLite must be in use +* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY + or SQLITE_OMIT_TRIGGER symbols enabled. +* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all + connections before use. + +SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for +new connections through the usage of events:: + + from sqlalchemy.engine import Engine + from sqlalchemy import event + + @event.listens_for(Engine, "connect") + def set_sqlite_pragma(dbapi_connection, connection_record): + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + +.. warning:: + + When SQLite foreign keys are enabled, it is **not possible** + to emit CREATE or DROP statements for tables that contain + mutually-dependent foreign key constraints; + to emit the DDL for these tables requires that ALTER TABLE be used to + create or drop these constraints separately, for which SQLite has + no support. + +.. seealso:: + + `SQLite Foreign Key Support `_ + - on the SQLite web site. + + :ref:`event_toplevel` - SQLAlchemy event API. + + :ref:`use_alter` - more information on SQLAlchemy's facilities for handling + mutually-dependent foreign key constraints. + +.. _sqlite_type_reflection: + +Type Reflection +--------------- + +SQLite types are unlike those of most other database backends, in that +the string name of the type usually does not correspond to a "type" in a +one-to-one fashion. Instead, SQLite links per-column typing behavior +to one of five so-called "type affinities" based on a string matching +pattern for the type. + +SQLAlchemy's reflection process, when inspecting types, uses a simple +lookup table to link the keywords returned to provided SQLAlchemy types. +This lookup table is present within the SQLite dialect as it is for all +other dialects. However, the SQLite dialect has a different "fallback" +routine for when a particular type name is not located in the lookup map; +it instead implements the SQLite "type affinity" scheme located at +http://www.sqlite.org/datatype3.html section 2.1. + +The provided typemap will make direct associations from an exact string +name match for the following types: + +:class:`~.types.BIGINT`, :class:`~.types.BLOB`, +:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`, +:class:`~.types.CHAR`, :class:`~.types.DATE`, +:class:`~.types.DATETIME`, :class:`~.types.FLOAT`, +:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`, +:class:`~.types.INTEGER`, :class:`~.types.INTEGER`, +:class:`~.types.NUMERIC`, :class:`~.types.REAL`, +:class:`~.types.SMALLINT`, :class:`~.types.TEXT`, +:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`, +:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`, +:class:`~.types.NCHAR` + +When a type name does not match one of the above types, the "type affinity" +lookup is used instead: + +* :class:`~.types.INTEGER` is returned if the type name includes the + string ``INT`` +* :class:`~.types.TEXT` is returned if the type name includes the + string ``CHAR``, ``CLOB`` or ``TEXT`` +* :class:`~.types.NullType` is returned if the type name includes the + string ``BLOB`` +* :class:`~.types.REAL` is returned if the type name includes the string + ``REAL``, ``FLOA`` or ``DOUB``. +* Otherwise, the :class:`~.types.NUMERIC` type is used. + +.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting + columns. + + +.. _sqlite_partial_index: + +Partial Indexes +--------------- + +A partial index, e.g. one which uses a WHERE clause, can be specified +with the DDL system using the argument ``sqlite_where``:: + + tbl = Table('testtbl', m, Column('data', Integer)) + idx = Index('test_idx1', tbl.c.data, + sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10)) + +The index will be rendered at create time as:: + + CREATE INDEX test_idx1 ON testtbl (data) + WHERE data > 5 AND data < 10 + +.. versionadded:: 0.9.9 + +.. _sqlite_dotted_column_names: + +Dotted Column Names +------------------- + +Using table or column names that explicitly have periods in them is +**not recommended**. While this is generally a bad idea for relational +databases in general, as the dot is a syntactically significant character, +the SQLite driver up until version **3.10.0** of SQLite has a bug which +requires that SQLAlchemy filter out these dots in result sets. + +.. versionchanged:: 1.1 + + The following SQLite issue has been resolved as of version 3.10.0 + of SQLite. SQLAlchemy as of **1.1** automatically disables its internal + workarounds based on detection of this version. + +The bug, entirely outside of SQLAlchemy, can be illustrated thusly:: + + import sqlite3 + + assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version" + + conn = sqlite3.connect(":memory:") + cursor = conn.cursor() + + cursor.execute("create table x (a integer, b integer)") + cursor.execute("insert into x (a, b) values (1, 1)") + cursor.execute("insert into x (a, b) values (2, 2)") + + cursor.execute("select x.a, x.b from x") + assert [c[0] for c in cursor.description] == ['a', 'b'] + + cursor.execute(''' + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + ''') + assert [c[0] for c in cursor.description] == ['a', 'b'], \ + [c[0] for c in cursor.description] + +The second assertion fails:: + + Traceback (most recent call last): + File "test.py", line 19, in + [c[0] for c in cursor.description] + AssertionError: ['x.a', 'x.b'] + +Where above, the driver incorrectly reports the names of the columns +including the name of the table, which is entirely inconsistent vs. +when the UNION is not present. + +SQLAlchemy relies upon column names being predictable in how they match +to the original statement, so the SQLAlchemy dialect has no choice but +to filter these out:: + + + from sqlalchemy import create_engine + + eng = create_engine("sqlite://") + conn = eng.connect() + + conn.execute("create table x (a integer, b integer)") + conn.execute("insert into x (a, b) values (1, 1)") + conn.execute("insert into x (a, b) values (2, 2)") + + result = conn.execute("select x.a, x.b from x") + assert result.keys() == ["a", "b"] + + result = conn.execute(''' + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + ''') + assert result.keys() == ["a", "b"] + +Note that above, even though SQLAlchemy filters out the dots, *both +names are still addressable*:: + + >>> row = result.first() + >>> row["a"] + 1 + >>> row["x.a"] + 1 + >>> row["b"] + 1 + >>> row["x.b"] + 1 + +Therefore, the workaround applied by SQLAlchemy only impacts +:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API. +In the very specific case where +an application is forced to use column names that contain dots, and the +functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` +is required to return these dotted names unmodified, the ``sqlite_raw_colnames`` +execution option may be provided, either on a per-:class:`.Connection` basis:: + + result = conn.execution_options(sqlite_raw_colnames=True).execute(''' + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + ''') + assert result.keys() == ["x.a", "x.b"] + +or on a per-:class:`.Engine` basis:: + + engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True}) + +When using the per-:class:`.Engine` execution option, note that +**Core and ORM queries that use UNION may not function properly**. + +""" + +import datetime +import re + +from ... import processors +from ... import sql, exc +from ... import types as sqltypes, schema as sa_schema +from ... import util +from ...engine import default, reflection +from ...sql import compiler + +from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT, + INTEGER, REAL, NUMERIC, SMALLINT, TEXT, + TIMESTAMP, VARCHAR) + + +class _DateTimeMixin(object): + _reg = None + _storage_format = None + + def __init__(self, storage_format=None, regexp=None, **kw): + super(_DateTimeMixin, self).__init__(**kw) + if regexp is not None: + self._reg = re.compile(regexp) + if storage_format is not None: + self._storage_format = storage_format + + @property + def format_is_text_affinity(self): + """return True if the storage format will automatically imply + a TEXT affinity. + + If the storage format contains no non-numeric characters, + it will imply a NUMERIC storage format on SQLite; in this case, + the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, + TIME_CHAR. + + .. versionadded:: 1.0.0 + + """ + spec = self._storage_format % { + "year": 0, "month": 0, "day": 0, "hour": 0, + "minute": 0, "second": 0, "microsecond": 0 + } + return bool(re.search(r'[^0-9]', spec)) + + def adapt(self, cls, **kw): + if issubclass(cls, _DateTimeMixin): + if self._storage_format: + kw["storage_format"] = self._storage_format + if self._reg: + kw["regexp"] = self._reg + return super(_DateTimeMixin, self).adapt(cls, **kw) + + def literal_processor(self, dialect): + bp = self.bind_processor(dialect) + + def process(value): + return "'%s'" % bp(value) + return process + + +class DATETIME(_DateTimeMixin, sqltypes.DateTime): + r"""Represent a Python datetime object in SQLite using a string. + + The default string storage format is:: + + "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\ +%(second)02d.%(microsecond)06d" + + e.g.:: + + 2011-03-15 12:05:57.10558 + + The storage format can be customized to some degree using the + ``storage_format`` and ``regexp`` parameters, such as:: + + import re + from sqlalchemy.dialects.sqlite import DATETIME + + dt = DATETIME( + storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\ +%(min)02d:%(second)02d", + regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" + ) + + :param storage_format: format string which will be applied to the dict + with keys year, month, day, hour, minute, second, and microsecond. + + :param regexp: regular expression which will be applied to incoming result + rows. If the regexp contains named groups, the resulting match dict is + applied to the Python datetime() constructor as keyword arguments. + Otherwise, if positional groups are used, the datetime() constructor + is called with positional arguments via + ``*map(int, match_obj.groups(0))``. + """ + + _storage_format = ( + "%(year)04d-%(month)02d-%(day)02d " + "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + ) + + def __init__(self, *args, **kwargs): + truncate_microseconds = kwargs.pop('truncate_microseconds', False) + super(DATETIME, self).__init__(*args, **kwargs) + if truncate_microseconds: + assert 'storage_format' not in kwargs, "You can specify only "\ + "one of truncate_microseconds or storage_format." + assert 'regexp' not in kwargs, "You can specify only one of "\ + "truncate_microseconds or regexp." + self._storage_format = ( + "%(year)04d-%(month)02d-%(day)02d " + "%(hour)02d:%(minute)02d:%(second)02d" + ) + + def bind_processor(self, dialect): + datetime_datetime = datetime.datetime + datetime_date = datetime.date + format = self._storage_format + + def process(value): + if value is None: + return None + elif isinstance(value, datetime_datetime): + return format % { + 'year': value.year, + 'month': value.month, + 'day': value.day, + 'hour': value.hour, + 'minute': value.minute, + 'second': value.second, + 'microsecond': value.microsecond, + } + elif isinstance(value, datetime_date): + return format % { + 'year': value.year, + 'month': value.month, + 'day': value.day, + 'hour': 0, + 'minute': 0, + 'second': 0, + 'microsecond': 0, + } + else: + raise TypeError("SQLite DateTime type only accepts Python " + "datetime and date objects as input.") + return process + + def result_processor(self, dialect, coltype): + if self._reg: + return processors.str_to_datetime_processor_factory( + self._reg, datetime.datetime) + else: + return processors.str_to_datetime + + +class DATE(_DateTimeMixin, sqltypes.Date): + r"""Represent a Python date object in SQLite using a string. + + The default string storage format is:: + + "%(year)04d-%(month)02d-%(day)02d" + + e.g.:: + + 2011-03-15 + + The storage format can be customized to some degree using the + ``storage_format`` and ``regexp`` parameters, such as:: + + import re + from sqlalchemy.dialects.sqlite import DATE + + d = DATE( + storage_format="%(month)02d/%(day)02d/%(year)04d", + regexp=re.compile("(?P\d+)/(?P\d+)/(?P\d+)") + ) + + :param storage_format: format string which will be applied to the + dict with keys year, month, and day. + + :param regexp: regular expression which will be applied to + incoming result rows. If the regexp contains named groups, the + resulting match dict is applied to the Python date() constructor + as keyword arguments. Otherwise, if positional groups are used, the + date() constructor is called with positional arguments via + ``*map(int, match_obj.groups(0))``. + """ + + _storage_format = "%(year)04d-%(month)02d-%(day)02d" + + def bind_processor(self, dialect): + datetime_date = datetime.date + format = self._storage_format + + def process(value): + if value is None: + return None + elif isinstance(value, datetime_date): + return format % { + 'year': value.year, + 'month': value.month, + 'day': value.day, + } + else: + raise TypeError("SQLite Date type only accepts Python " + "date objects as input.") + return process + + def result_processor(self, dialect, coltype): + if self._reg: + return processors.str_to_datetime_processor_factory( + self._reg, datetime.date) + else: + return processors.str_to_date + + +class TIME(_DateTimeMixin, sqltypes.Time): + r"""Represent a Python time object in SQLite using a string. + + The default string storage format is:: + + "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + + e.g.:: + + 12:05:57.10558 + + The storage format can be customized to some degree using the + ``storage_format`` and ``regexp`` parameters, such as:: + + import re + from sqlalchemy.dialects.sqlite import TIME + + t = TIME( + storage_format="%(hour)02d-%(minute)02d-%(second)02d-\ +%(microsecond)06d", + regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") + ) + + :param storage_format: format string which will be applied to the dict + with keys hour, minute, second, and microsecond. + + :param regexp: regular expression which will be applied to incoming result + rows. If the regexp contains named groups, the resulting match dict is + applied to the Python time() constructor as keyword arguments. Otherwise, + if positional groups are used, the time() constructor is called with + positional arguments via ``*map(int, match_obj.groups(0))``. + """ + + _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" + + def __init__(self, *args, **kwargs): + truncate_microseconds = kwargs.pop('truncate_microseconds', False) + super(TIME, self).__init__(*args, **kwargs) + if truncate_microseconds: + assert 'storage_format' not in kwargs, "You can specify only "\ + "one of truncate_microseconds or storage_format." + assert 'regexp' not in kwargs, "You can specify only one of "\ + "truncate_microseconds or regexp." + self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d" + + def bind_processor(self, dialect): + datetime_time = datetime.time + format = self._storage_format + + def process(value): + if value is None: + return None + elif isinstance(value, datetime_time): + return format % { + 'hour': value.hour, + 'minute': value.minute, + 'second': value.second, + 'microsecond': value.microsecond, + } + else: + raise TypeError("SQLite Time type only accepts Python " + "time objects as input.") + return process + + def result_processor(self, dialect, coltype): + if self._reg: + return processors.str_to_datetime_processor_factory( + self._reg, datetime.time) + else: + return processors.str_to_time + +colspecs = { + sqltypes.Date: DATE, + sqltypes.DateTime: DATETIME, + sqltypes.Time: TIME, +} + +ischema_names = { + 'BIGINT': sqltypes.BIGINT, + 'BLOB': sqltypes.BLOB, + 'BOOL': sqltypes.BOOLEAN, + 'BOOLEAN': sqltypes.BOOLEAN, + 'CHAR': sqltypes.CHAR, + 'DATE': sqltypes.DATE, + 'DATE_CHAR': sqltypes.DATE, + 'DATETIME': sqltypes.DATETIME, + 'DATETIME_CHAR': sqltypes.DATETIME, + 'DOUBLE': sqltypes.FLOAT, + 'DECIMAL': sqltypes.DECIMAL, + 'FLOAT': sqltypes.FLOAT, + 'INT': sqltypes.INTEGER, + 'INTEGER': sqltypes.INTEGER, + 'NUMERIC': sqltypes.NUMERIC, + 'REAL': sqltypes.REAL, + 'SMALLINT': sqltypes.SMALLINT, + 'TEXT': sqltypes.TEXT, + 'TIME': sqltypes.TIME, + 'TIME_CHAR': sqltypes.TIME, + 'TIMESTAMP': sqltypes.TIMESTAMP, + 'VARCHAR': sqltypes.VARCHAR, + 'NVARCHAR': sqltypes.NVARCHAR, + 'NCHAR': sqltypes.NCHAR, +} + + +class SQLiteCompiler(compiler.SQLCompiler): + extract_map = util.update_copy( + compiler.SQLCompiler.extract_map, + { + 'month': '%m', + 'day': '%d', + 'year': '%Y', + 'second': '%S', + 'hour': '%H', + 'doy': '%j', + 'minute': '%M', + 'epoch': '%s', + 'dow': '%w', + 'week': '%W', + }) + + def visit_now_func(self, fn, **kw): + return "CURRENT_TIMESTAMP" + + def visit_localtimestamp_func(self, func, **kw): + return 'DATETIME(CURRENT_TIMESTAMP, "localtime")' + + def visit_true(self, expr, **kw): + return '1' + + def visit_false(self, expr, **kw): + return '0' + + def visit_char_length_func(self, fn, **kw): + return "length%s" % self.function_argspec(fn) + + def visit_cast(self, cast, **kwargs): + if self.dialect.supports_cast: + return super(SQLiteCompiler, self).visit_cast(cast, **kwargs) + else: + return self.process(cast.clause, **kwargs) + + def visit_extract(self, extract, **kw): + try: + return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( + self.extract_map[extract.field], + self.process(extract.expr, **kw) + ) + except KeyError: + raise exc.CompileError( + "%s is not a valid extract argument." % extract.field) + + def limit_clause(self, select, **kw): + text = "" + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: + text += "\n LIMIT " + self.process(sql.literal(-1)) + text += " OFFSET " + self.process(select._offset_clause, **kw) + else: + text += " OFFSET " + self.process(sql.literal(0), **kw) + return text + + def for_update_clause(self, select, **kw): + # sqlite has no "FOR UPDATE" AFAICT + return '' + + def visit_is_distinct_from_binary(self, binary, operator, **kw): + return "%s IS NOT %s" % (self.process(binary.left), + self.process(binary.right)) + + def visit_isnot_distinct_from_binary(self, binary, operator, **kw): + return "%s IS %s" % (self.process(binary.left), + self.process(binary.right)) + + +class SQLiteDDLCompiler(compiler.DDLCompiler): + + def get_column_specification(self, column, **kwargs): + coltype = self.dialect.type_compiler.process( + column.type, type_expression=column) + colspec = self.preparer.format_column(column) + " " + coltype + default = self.get_column_default_string(column) + if default is not None: + colspec += " DEFAULT " + default + + if not column.nullable: + colspec += " NOT NULL" + + if column.primary_key: + if ( + column.autoincrement is True and + len(column.table.primary_key.columns) != 1 + ): + raise exc.CompileError( + "SQLite does not support autoincrement for " + "composite primary keys") + + if (column.table.dialect_options['sqlite']['autoincrement'] and + len(column.table.primary_key.columns) == 1 and + issubclass(column.type._type_affinity, sqltypes.Integer) and + not column.foreign_keys): + colspec += " PRIMARY KEY AUTOINCREMENT" + + return colspec + + def visit_primary_key_constraint(self, constraint): + # for columns with sqlite_autoincrement=True, + # the PRIMARY KEY constraint can only be inline + # with the column itself. + if len(constraint.columns) == 1: + c = list(constraint)[0] + if (c.primary_key and + c.table.dialect_options['sqlite']['autoincrement'] and + issubclass(c.type._type_affinity, sqltypes.Integer) and + not c.foreign_keys): + return None + + return super(SQLiteDDLCompiler, self).visit_primary_key_constraint( + constraint) + + def visit_foreign_key_constraint(self, constraint): + + local_table = constraint.elements[0].parent.table + remote_table = constraint.elements[0].column.table + + if local_table.schema != remote_table.schema: + return None + else: + return super( + SQLiteDDLCompiler, + self).visit_foreign_key_constraint(constraint) + + def define_constraint_remote_table(self, constraint, table, preparer): + """Format the remote table clause of a CREATE CONSTRAINT clause.""" + + return preparer.format_table(table, use_schema=False) + + def visit_create_index(self, create, include_schema=False, + include_table_schema=True): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + text += "INDEX %s ON %s (%s)" \ + % ( + self._prepared_index_name(index, + include_schema=True), + preparer.format_table(index.table, + use_schema=False), + ', '.join( + self.sql_compiler.process( + expr, include_table=False, literal_binds=True) for + expr in index.expressions) + ) + + whereclause = index.dialect_options["sqlite"]["where"] + if whereclause is not None: + where_compiled = self.sql_compiler.process( + whereclause, include_table=False, + literal_binds=True) + text += " WHERE " + where_compiled + + return text + + +class SQLiteTypeCompiler(compiler.GenericTypeCompiler): + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_) + + def visit_DATETIME(self, type_, **kw): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_DATETIME(type_) + else: + return "DATETIME_CHAR" + + def visit_DATE(self, type_, **kw): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_DATE(type_) + else: + return "DATE_CHAR" + + def visit_TIME(self, type_, **kw): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_TIME(type_) + else: + return "TIME_CHAR" + + +class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): + reserved_words = set([ + 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc', + 'attach', 'autoincrement', 'before', 'begin', 'between', 'by', + 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit', + 'conflict', 'constraint', 'create', 'cross', 'current_date', + 'current_time', 'current_timestamp', 'database', 'default', + 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct', + 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive', + 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob', + 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index', + 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', + 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit', + 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on', + 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query', + 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict', + 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp', + 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union', + 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual', + 'when', 'where', + ]) + + def format_index(self, index, use_schema=True, name=None): + """Prepare a quoted index and schema name.""" + + if name is None: + name = index.name + result = self.quote(name, index.quote) + if (not self.omit_schema and + use_schema and + getattr(index.table, "schema", None)): + result = self.quote_schema( + index.table.schema, index.table.quote_schema) + "." + result + return result + + +class SQLiteExecutionContext(default.DefaultExecutionContext): + @util.memoized_property + def _preserve_raw_colnames(self): + return not self.dialect._broken_dotted_colnames or \ + self.execution_options.get("sqlite_raw_colnames", False) + + def _translate_colname(self, colname): + # TODO: detect SQLite version 3.10.0 or greater; + # see [ticket:3633] + + # adjust for dotted column names. SQLite + # in the case of UNION may store col names as + # "tablename.colname", or if using an attached database, + # "database.tablename.colname", in cursor.description + if not self._preserve_raw_colnames and "." in colname: + return colname.split(".")[-1], colname + else: + return colname, None + + +class SQLiteDialect(default.DefaultDialect): + name = 'sqlite' + supports_alter = False + supports_unicode_statements = True + supports_unicode_binds = True + supports_default_values = True + supports_empty_insert = False + supports_cast = True + supports_multivalues_insert = True + + default_paramstyle = 'qmark' + execution_ctx_cls = SQLiteExecutionContext + statement_compiler = SQLiteCompiler + ddl_compiler = SQLiteDDLCompiler + type_compiler = SQLiteTypeCompiler + preparer = SQLiteIdentifierPreparer + ischema_names = ischema_names + colspecs = colspecs + isolation_level = None + + supports_cast = True + supports_default_values = True + + construct_arguments = [ + (sa_schema.Table, { + "autoincrement": False + }), + (sa_schema.Index, { + "where": None, + }), + ] + + _broken_fk_pragma_quotes = False + _broken_dotted_colnames = False + + def __init__(self, isolation_level=None, native_datetime=False, **kwargs): + default.DefaultDialect.__init__(self, **kwargs) + self.isolation_level = isolation_level + + # this flag used by pysqlite dialect, and perhaps others in the + # future, to indicate the driver is handling date/timestamp + # conversions (and perhaps datetime/time as well on some hypothetical + # driver ?) + self.native_datetime = native_datetime + + if self.dbapi is not None: + self.supports_right_nested_joins = ( + self.dbapi.sqlite_version_info >= (3, 7, 16)) + self._broken_dotted_colnames = ( + self.dbapi.sqlite_version_info < (3, 10, 0) + ) + self.supports_default_values = ( + self.dbapi.sqlite_version_info >= (3, 3, 8)) + self.supports_cast = ( + self.dbapi.sqlite_version_info >= (3, 2, 3)) + self.supports_multivalues_insert = ( + # http://www.sqlite.org/releaselog/3_7_11.html + self.dbapi.sqlite_version_info >= (3, 7, 11)) + # see http://www.sqlalchemy.org/trac/ticket/2568 + # as well as http://www.sqlite.org/src/info/600482d161 + self._broken_fk_pragma_quotes = ( + self.dbapi.sqlite_version_info < (3, 6, 14)) + + _isolation_lookup = { + 'READ UNCOMMITTED': 1, + 'SERIALIZABLE': 0, + } + + def set_isolation_level(self, connection, level): + try: + isolation_level = self._isolation_lookup[level.replace('_', ' ')] + except KeyError: + raise exc.ArgumentError( + "Invalid value '%s' for isolation_level. " + "Valid isolation levels for %s are %s" % + (level, self.name, ", ".join(self._isolation_lookup)) + ) + cursor = connection.cursor() + cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level) + cursor.close() + + def get_isolation_level(self, connection): + cursor = connection.cursor() + cursor.execute('PRAGMA read_uncommitted') + res = cursor.fetchone() + if res: + value = res[0] + else: + # http://www.sqlite.org/changes.html#version_3_3_3 + # "Optional READ UNCOMMITTED isolation (instead of the + # default isolation level of SERIALIZABLE) and + # table level locking when database connections + # share a common cache."" + # pre-SQLite 3.3.0 default to 0 + value = 0 + cursor.close() + if value == 0: + return "SERIALIZABLE" + elif value == 1: + return "READ UNCOMMITTED" + else: + assert False, "Unknown isolation level %s" % value + + def on_connect(self): + if self.isolation_level is not None: + def connect(conn): + self.set_isolation_level(conn, self.isolation_level) + return connect + else: + return None + + @reflection.cache + def get_schema_names(self, connection, **kw): + s = "PRAGMA database_list" + dl = connection.execute(s) + + return [db[1] for db in dl if db[1] != "temp"] + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + if schema is not None: + qschema = self.identifier_preparer.quote_identifier(schema) + master = '%s.sqlite_master' % qschema + else: + master = "sqlite_master" + s = ("SELECT name FROM %s " + "WHERE type='table' ORDER BY name") % (master,) + rs = connection.execute(s) + return [row[0] for row in rs] + + @reflection.cache + def get_temp_table_names(self, connection, **kw): + s = "SELECT name FROM sqlite_temp_master "\ + "WHERE type='table' ORDER BY name " + rs = connection.execute(s) + + return [row[0] for row in rs] + + @reflection.cache + def get_temp_view_names(self, connection, **kw): + s = "SELECT name FROM sqlite_temp_master "\ + "WHERE type='view' ORDER BY name " + rs = connection.execute(s) + + return [row[0] for row in rs] + + def has_table(self, connection, table_name, schema=None): + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema) + return bool(info) + + @reflection.cache + def get_view_names(self, connection, schema=None, **kw): + if schema is not None: + qschema = self.identifier_preparer.quote_identifier(schema) + master = '%s.sqlite_master' % qschema + else: + master = "sqlite_master" + s = ("SELECT name FROM %s " + "WHERE type='view' ORDER BY name") % (master,) + rs = connection.execute(s) + + return [row[0] for row in rs] + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + if schema is not None: + qschema = self.identifier_preparer.quote_identifier(schema) + master = '%s.sqlite_master' % qschema + s = ("SELECT sql FROM %s WHERE name = '%s'" + "AND type='view'") % (master, view_name) + rs = connection.execute(s) + else: + try: + s = ("SELECT sql FROM " + " (SELECT * FROM sqlite_master UNION ALL " + " SELECT * FROM sqlite_temp_master) " + "WHERE name = '%s' " + "AND type='view'") % view_name + rs = connection.execute(s) + except exc.DBAPIError: + s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " + "AND type='view'") % view_name + rs = connection.execute(s) + + result = rs.fetchall() + if result: + return result[0].sql + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema) + + columns = [] + for row in info: + (name, type_, nullable, default, primary_key) = ( + row[1], row[2].upper(), not row[3], row[4], row[5]) + + columns.append(self._get_column_info(name, type_, nullable, + default, primary_key)) + return columns + + def _get_column_info(self, name, type_, nullable, default, primary_key): + coltype = self._resolve_type_affinity(type_) + + if default is not None: + default = util.text_type(default) + + return { + 'name': name, + 'type': coltype, + 'nullable': nullable, + 'default': default, + 'autoincrement': 'auto', + 'primary_key': primary_key, + } + + def _resolve_type_affinity(self, type_): + """Return a data type from a reflected column, using affinity tules. + + SQLite's goal for universal compatibility introduces some complexity + during reflection, as a column's defined type might not actually be a + type that SQLite understands - or indeed, my not be defined *at all*. + Internally, SQLite handles this with a 'data type affinity' for each + column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER', + 'REAL', or 'NONE' (raw bits). The algorithm that determines this is + listed in http://www.sqlite.org/datatype3.html section 2.1. + + This method allows SQLAlchemy to support that algorithm, while still + providing access to smarter reflection utilities by regcognizing + column definitions that SQLite only supports through affinity (like + DATE and DOUBLE). + + """ + match = re.match(r'([\w ]+)(\(.*?\))?', type_) + if match: + coltype = match.group(1) + args = match.group(2) + else: + coltype = '' + args = '' + + if coltype in self.ischema_names: + coltype = self.ischema_names[coltype] + elif 'INT' in coltype: + coltype = sqltypes.INTEGER + elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype: + coltype = sqltypes.TEXT + elif 'BLOB' in coltype or not coltype: + coltype = sqltypes.NullType + elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype: + coltype = sqltypes.REAL + else: + coltype = sqltypes.NUMERIC + + if args is not None: + args = re.findall(r'(\d+)', args) + try: + coltype = coltype(*[int(a) for a in args]) + except TypeError: + util.warn( + "Could not instantiate type %s with " + "reflected arguments %s; using no arguments." % + (coltype, args)) + coltype = coltype() + else: + coltype = coltype() + + return coltype + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + constraint_name = None + table_data = self._get_table_sql(connection, table_name, schema=schema) + if table_data: + PK_PATTERN = r'CONSTRAINT (\w+) PRIMARY KEY' + result = re.search(PK_PATTERN, table_data, re.I) + constraint_name = result.group(1) if result else None + + cols = self.get_columns(connection, table_name, schema, **kw) + pkeys = [] + for col in cols: + if col['primary_key']: + pkeys.append(col['name']) + + return {'constrained_columns': pkeys, 'name': constraint_name} + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + # sqlite makes this *extremely difficult*. + # First, use the pragma to get the actual FKs. + pragma_fks = self._get_table_pragma( + connection, "foreign_key_list", + table_name, schema=schema + ) + + fks = {} + + for row in pragma_fks: + (numerical_id, rtbl, lcol, rcol) = ( + row[0], row[2], row[3], row[4]) + + if rcol is None: + rcol = lcol + + if self._broken_fk_pragma_quotes: + rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) + + if numerical_id in fks: + fk = fks[numerical_id] + else: + fk = fks[numerical_id] = { + 'name': None, + 'constrained_columns': [], + 'referred_schema': schema, + 'referred_table': rtbl, + 'referred_columns': [], + 'options': {} + } + fks[numerical_id] = fk + + fk['constrained_columns'].append(lcol) + fk['referred_columns'].append(rcol) + + def fk_sig(constrained_columns, referred_table, referred_columns): + return tuple(constrained_columns) + (referred_table,) + \ + tuple(referred_columns) + + # then, parse the actual SQL and attempt to find DDL that matches + # the names as well. SQLite saves the DDL in whatever format + # it was typed in as, so need to be liberal here. + + keys_by_signature = dict( + ( + fk_sig( + fk['constrained_columns'], + fk['referred_table'], fk['referred_columns']), + fk + ) for fk in fks.values() + ) + + table_data = self._get_table_sql(connection, table_name, schema=schema) + if table_data is None: + # system tables, etc. + return [] + + def parse_fks(): + FK_PATTERN = ( + r'(?:CONSTRAINT (\w+) +)?' + r'FOREIGN KEY *\( *(.+?) *\) +' + r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *' + r'((?:ON (?:DELETE|UPDATE) ' + r'(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)' + ) + for match in re.finditer(FK_PATTERN, table_data, re.I): + ( + constraint_name, constrained_columns, + referred_quoted_name, referred_name, + referred_columns, onupdatedelete) = \ + match.group(1, 2, 3, 4, 5, 6) + constrained_columns = list( + self._find_cols_in_sig(constrained_columns)) + if not referred_columns: + referred_columns = constrained_columns + else: + referred_columns = list( + self._find_cols_in_sig(referred_columns)) + referred_name = referred_quoted_name or referred_name + options = {} + + for token in re.split(r" *\bON\b *", onupdatedelete.upper()): + if token.startswith("DELETE"): + options['ondelete'] = token[6:].strip() + elif token.startswith("UPDATE"): + options["onupdate"] = token[6:].strip() + yield ( + constraint_name, constrained_columns, + referred_name, referred_columns, options) + fkeys = [] + + for ( + constraint_name, constrained_columns, + referred_name, referred_columns, options) in parse_fks(): + sig = fk_sig( + constrained_columns, referred_name, referred_columns) + if sig not in keys_by_signature: + util.warn( + "WARNING: SQL-parsed foreign key constraint " + "'%s' could not be located in PRAGMA " + "foreign_keys for table %s" % ( + sig, + table_name + )) + continue + key = keys_by_signature.pop(sig) + key['name'] = constraint_name + key['options'] = options + fkeys.append(key) + # assume the remainders are the unnamed, inline constraints, just + # use them as is as it's extremely difficult to parse inline + # constraints + fkeys.extend(keys_by_signature.values()) + return fkeys + + def _find_cols_in_sig(self, sig): + for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I): + yield match.group(1) or match.group(2) + + @reflection.cache + def get_unique_constraints(self, connection, table_name, + schema=None, **kw): + + auto_index_by_sig = {} + for idx in self.get_indexes( + connection, table_name, schema=schema, + include_auto_indexes=True, **kw): + if not idx['name'].startswith("sqlite_autoindex"): + continue + sig = tuple(idx['column_names']) + auto_index_by_sig[sig] = idx + + table_data = self._get_table_sql( + connection, table_name, schema=schema, **kw) + if not table_data: + return [] + + unique_constraints = [] + + def parse_uqs(): + UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' + INLINE_UNIQUE_PATTERN = ( + r'(?:(".+?")|([a-z0-9]+)) ' + r'+[a-z0-9_ ]+? +UNIQUE') + + for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): + name, cols = match.group(1, 2) + yield name, list(self._find_cols_in_sig(cols)) + + # we need to match inlines as well, as we seek to differentiate + # a UNIQUE constraint from a UNIQUE INDEX, even though these + # are kind of the same thing :) + for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I): + cols = list( + self._find_cols_in_sig(match.group(1) or match.group(2))) + yield None, cols + + for name, cols in parse_uqs(): + sig = tuple(cols) + if sig in auto_index_by_sig: + auto_index_by_sig.pop(sig) + parsed_constraint = { + 'name': name, + 'column_names': cols + } + unique_constraints.append(parsed_constraint) + # NOTE: auto_index_by_sig might not be empty here, + # the PRIMARY KEY may have an entry. + return unique_constraints + + @reflection.cache + def get_check_constraints(self, connection, table_name, + schema=None, **kw): + table_data = self._get_table_sql( + connection, table_name, schema=schema, **kw) + if not table_data: + return [] + + CHECK_PATTERN = ( + r'(?:CONSTRAINT (\w+) +)?' + r'CHECK *\( *(.+) *\),? *' + ) + check_constraints = [] + # NOTE: we aren't using re.S here because we actually are + # taking advantage of each CHECK constraint being all on one + # line in the table definition in order to delineate. This + # necessarily makes assumptions as to how the CREATE TABLE + # was emitted. + for match in re.finditer(CHECK_PATTERN, table_data, re.I): + check_constraints.append({ + 'sqltext': match.group(2), + 'name': match.group(1) + }) + + return check_constraints + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + pragma_indexes = self._get_table_pragma( + connection, "index_list", table_name, schema=schema) + indexes = [] + + include_auto_indexes = kw.pop('include_auto_indexes', False) + for row in pragma_indexes: + # ignore implicit primary key index. + # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html + if (not include_auto_indexes and + row[1].startswith('sqlite_autoindex')): + continue + + indexes.append(dict(name=row[1], column_names=[], unique=row[2])) + + # loop thru unique indexes to get the column names. + for idx in indexes: + pragma_index = self._get_table_pragma( + connection, "index_info", idx['name']) + + for row in pragma_index: + idx['column_names'].append(row[2]) + return indexes + + @reflection.cache + def _get_table_sql(self, connection, table_name, schema=None, **kw): + try: + s = ("SELECT sql FROM " + " (SELECT * FROM sqlite_master UNION ALL " + " SELECT * FROM sqlite_temp_master) " + "WHERE name = '%s' " + "AND type = 'table'") % table_name + rs = connection.execute(s) + except exc.DBAPIError: + s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " + "AND type = 'table'") % table_name + rs = connection.execute(s) + return rs.scalar() + + def _get_table_pragma(self, connection, pragma, table_name, schema=None): + quote = self.identifier_preparer.quote_identifier + if schema is not None: + statement = "PRAGMA %s." % quote(schema) + else: + statement = "PRAGMA " + qtable = quote(table_name) + statement = "%s%s(%s)" % (statement, pragma, qtable) + cursor = connection.execute(statement) + if not cursor._soft_closed: + # work around SQLite issue whereby cursor.description + # is blank when PRAGMA returns no rows: + # http://www.sqlite.org/cvstrac/tktview?tn=1884 + result = cursor.fetchall() + else: + result = [] + return result diff --git a/app/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/app/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py new file mode 100644 index 0000000..e005d2e --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -0,0 +1,130 @@ +# sqlite/pysqlcipher.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: sqlite+pysqlcipher + :name: pysqlcipher + :dbapi: pysqlcipher + :connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=] + :url: https://pypi.python.org/pypi/pysqlcipher + + ``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make + use of the `SQLCipher `_ backend. + + ``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect + will attempt to import it if ``pysqlcipher`` is non-present. + + .. versionadded:: 1.1.4 - added fallback import for pysqlcipher3 + + .. versionadded:: 0.9.9 - added pysqlcipher dialect + +Driver +------ + +The driver here is the `pysqlcipher `_ +driver, which makes use of the SQLCipher engine. This system essentially +introduces new PRAGMA commands to SQLite which allows the setting of a +passphrase and other encryption parameters, allowing the database +file to be encrypted. + +`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3, +the driver is the same. + +Connect Strings +--------------- + +The format of the connect string is in every way the same as that +of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the +"password" field is now accepted, which should contain a passphrase:: + + e = create_engine('sqlite+pysqlcipher://:testing@/foo.db') + +For an absolute file path, two leading slashes should be used for the +database name:: + + e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db') + +A selection of additional encryption-related pragmas supported by SQLCipher +as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed +in the query string, and will result in that PRAGMA being called for each +new connection. Currently, ``cipher``, ``kdf_iter`` +``cipher_page_size`` and ``cipher_use_hmac`` are supported:: + + e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000') + + +Pooling Behavior +---------------- + +The driver makes a change to the default pool behavior of pysqlite +as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver +has been observed to be significantly slower on connection than the +pysqlite driver, most likely due to the encryption overhead, so the +dialect here defaults to using the :class:`.SingletonThreadPool` +implementation, +instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool +implementation is entirely configurable using the +:paramref:`.create_engine.poolclass` parameter; the :class:`.StaticPool` may +be more feasible for single-threaded use, or :class:`.NullPool` may be used +to prevent unencrypted connections from being held open for long periods of +time, at the expense of slower startup time for new connections. + + +""" +from __future__ import absolute_import +from .pysqlite import SQLiteDialect_pysqlite +from ...engine import url as _url +from ... import pool + + +class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite): + driver = 'pysqlcipher' + + pragmas = ('kdf_iter', 'cipher', 'cipher_page_size', 'cipher_use_hmac') + + @classmethod + def dbapi(cls): + try: + from pysqlcipher import dbapi2 as sqlcipher + except ImportError as e: + try: + from pysqlcipher3 import dbapi2 as sqlcipher + except ImportError: + raise e + return sqlcipher + + @classmethod + def get_pool_class(cls, url): + return pool.SingletonThreadPool + + def connect(self, *cargs, **cparams): + passphrase = cparams.pop('passphrase', '') + + pragmas = dict( + (key, cparams.pop(key, None)) for key in + self.pragmas + ) + + conn = super(SQLiteDialect_pysqlcipher, self).\ + connect(*cargs, **cparams) + conn.execute('pragma key="%s"' % passphrase) + for prag, value in pragmas.items(): + if value is not None: + conn.execute('pragma %s="%s"' % (prag, value)) + + return conn + + def create_connect_args(self, url): + super_url = _url.URL( + url.drivername, username=url.username, + host=url.host, database=url.database, query=url.query) + c_args, opts = super(SQLiteDialect_pysqlcipher, self).\ + create_connect_args(super_url) + opts['passphrase'] = url.password + return c_args, opts + +dialect = SQLiteDialect_pysqlcipher diff --git a/app/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/app/lib/sqlalchemy/dialects/sqlite/pysqlite.py new file mode 100644 index 0000000..40a7cbb --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -0,0 +1,377 @@ +# sqlite/pysqlite.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r""" +.. dialect:: sqlite+pysqlite + :name: pysqlite + :dbapi: sqlite3 + :connectstring: sqlite+pysqlite:///file_path + :url: http://docs.python.org/library/sqlite3.html + + Note that ``pysqlite`` is the same driver as the ``sqlite3`` + module included with the Python distribution. + +Driver +------ + +When using Python 2.5 and above, the built in ``sqlite3`` driver is +already installed and no additional installation is needed. Otherwise, +the ``pysqlite2`` driver needs to be present. This is the same driver as +``sqlite3``, just with a different name. + +The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3`` +is loaded. This allows an explicitly installed pysqlite driver to take +precedence over the built in one. As with all dialects, a specific +DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control +this explicitly:: + + from sqlite3 import dbapi2 as sqlite + e = create_engine('sqlite+pysqlite:///file.db', module=sqlite) + + +Connect Strings +--------------- + +The file specification for the SQLite database is taken as the "database" +portion of the URL. Note that the format of a SQLAlchemy url is:: + + driver://user:pass@host/database + +This means that the actual filename to be used starts with the characters to +the **right** of the third slash. So connecting to a relative filepath +looks like:: + + # relative path + e = create_engine('sqlite:///path/to/database.db') + +An absolute path, which is denoted by starting with a slash, means you +need **four** slashes:: + + # absolute path + e = create_engine('sqlite:////path/to/database.db') + +To use a Windows path, regular drive specifications and backslashes can be +used. Double backslashes are probably needed:: + + # absolute path on Windows + e = create_engine('sqlite:///C:\\path\\to\\database.db') + +The sqlite ``:memory:`` identifier is the default if no filepath is +present. Specify ``sqlite://`` and nothing else:: + + # in-memory database + e = create_engine('sqlite://') + +Compatibility with sqlite3 "native" date and datetime types +----------------------------------------------------------- + +The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and +sqlite3.PARSE_COLNAMES options, which have the effect of any column +or expression explicitly cast as "date" or "timestamp" will be converted +to a Python date or datetime object. The date and datetime types provided +with the pysqlite dialect are not currently compatible with these options, +since they render the ISO date/datetime including microseconds, which +pysqlite's driver does not. Additionally, SQLAlchemy does not at +this time automatically render the "cast" syntax required for the +freestanding functions "current_timestamp" and "current_date" to return +datetime/date types natively. Unfortunately, pysqlite +does not provide the standard DBAPI types in ``cursor.description``, +leaving SQLAlchemy with no way to detect these types on the fly +without expensive per-row type checks. + +Keeping in mind that pysqlite's parsing option is not recommended, +nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES +can be forced if one configures "native_datetime=True" on create_engine():: + + engine = create_engine('sqlite://', + connect_args={'detect_types': + sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES}, + native_datetime=True + ) + +With this flag enabled, the DATE and TIMESTAMP types (but note - not the +DATETIME or TIME types...confused yet ?) will not perform any bind parameter +or result processing. Execution of "func.current_date()" will return a string. +"func.current_timestamp()" is registered as returning a DATETIME type in +SQLAlchemy, so this function still receives SQLAlchemy-level result +processing. + +.. _pysqlite_threading_pooling: + +Threading/Pooling Behavior +--------------------------- + +Pysqlite's default behavior is to prohibit the usage of a single connection +in more than one thread. This is originally intended to work with older +versions of SQLite that did not support multithreaded operation under +various circumstances. In particular, older SQLite versions +did not allow a ``:memory:`` database to be used in multiple threads +under any circumstances. + +Pysqlite does include a now-undocumented flag known as +``check_same_thread`` which will disable this check, however note that +pysqlite connections are still not safe to use in concurrently in multiple +threads. In particular, any statement execution calls would need to be +externally mutexed, as Pysqlite does not provide for thread-safe propagation +of error messages among other things. So while even ``:memory:`` databases +can be shared among threads in modern SQLite, Pysqlite doesn't provide enough +thread-safety to make this usage worth it. + +SQLAlchemy sets up pooling to work with Pysqlite's default behavior: + +* When a ``:memory:`` SQLite database is specified, the dialect by default + will use :class:`.SingletonThreadPool`. This pool maintains a single + connection per thread, so that all access to the engine within the current + thread use the same ``:memory:`` database - other threads would access a + different ``:memory:`` database. +* When a file-based database is specified, the dialect will use + :class:`.NullPool` as the source of connections. This pool closes and + discards connections which are returned to the pool immediately. SQLite + file-based connections have extremely low overhead, so pooling is not + necessary. The scheme also prevents a connection from being used again in + a different thread and works best with SQLite's coarse-grained file locking. + + .. versionchanged:: 0.7 + Default selection of :class:`.NullPool` for SQLite file-based databases. + Previous versions select :class:`.SingletonThreadPool` by + default for all SQLite databases. + + +Using a Memory Database in Multiple Threads +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To use a ``:memory:`` database in a multithreaded scenario, the same +connection object must be shared among threads, since the database exists +only within the scope of that connection. The +:class:`.StaticPool` implementation will maintain a single connection +globally, and the ``check_same_thread`` flag can be passed to Pysqlite +as ``False``:: + + from sqlalchemy.pool import StaticPool + engine = create_engine('sqlite://', + connect_args={'check_same_thread':False}, + poolclass=StaticPool) + +Note that using a ``:memory:`` database in multiple threads requires a recent +version of SQLite. + +Using Temporary Tables with SQLite +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Due to the way SQLite deals with temporary tables, if you wish to use a +temporary table in a file-based SQLite database across multiple checkouts +from the connection pool, such as when using an ORM :class:`.Session` where +the temporary table should continue to remain after :meth:`.Session.commit` or +:meth:`.Session.rollback` is called, a pool which maintains a single +connection must be used. Use :class:`.SingletonThreadPool` if the scope is +only needed within the current thread, or :class:`.StaticPool` is scope is +needed within multiple threads for this case:: + + # maintain the same connection per thread + from sqlalchemy.pool import SingletonThreadPool + engine = create_engine('sqlite:///mydb.db', + poolclass=SingletonThreadPool) + + + # maintain the same connection across all threads + from sqlalchemy.pool import StaticPool + engine = create_engine('sqlite:///mydb.db', + poolclass=StaticPool) + +Note that :class:`.SingletonThreadPool` should be configured for the number +of threads that are to be used; beyond that number, connections will be +closed out in a non deterministic way. + +Unicode +------- + +The pysqlite driver only returns Python ``unicode`` objects in result sets, +never plain strings, and accommodates ``unicode`` objects within bound +parameter values in all cases. Regardless of the SQLAlchemy string type in +use, string-based result values will by Python ``unicode`` in Python 2. +The :class:`.Unicode` type should still be used to indicate those columns that +require unicode, however, so that non-``unicode`` values passed inadvertently +will emit a warning. Pysqlite will emit an error if a non-``unicode`` string +is passed containing non-ASCII characters. + +.. _pysqlite_serializable: + +Serializable isolation / Savepoints / Transactional DDL +------------------------------------------------------- + +In the section :ref:`sqlite_concurrency`, we refer to the pysqlite +driver's assortment of issues that prevent several features of SQLite +from working correctly. The pysqlite DBAPI driver has several +long-standing bugs which impact the correctness of its transactional +behavior. In its default mode of operation, SQLite features such as +SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are +non-functional, and in order to use these features, workarounds must +be taken. + +The issue is essentially that the driver attempts to second-guess the user's +intent, failing to start transactions and sometimes ending them prematurely, in +an effort to minimize the SQLite databases's file locking behavior, even +though SQLite itself uses "shared" locks for read-only activities. + +SQLAlchemy chooses to not alter this behavior by default, as it is the +long-expected behavior of the pysqlite driver; if and when the pysqlite +driver attempts to repair these issues, that will be more of a driver towards +defaults for SQLAlchemy. + +The good news is that with a few events, we can implement transactional +support fully, by disabling pysqlite's feature entirely and emitting BEGIN +ourselves. This is achieved using two event listeners:: + + from sqlalchemy import create_engine, event + + engine = create_engine("sqlite:///myfile.db") + + @event.listens_for(engine, "connect") + def do_connect(dbapi_connection, connection_record): + # disable pysqlite's emitting of the BEGIN statement entirely. + # also stops it from emitting COMMIT before any DDL. + dbapi_connection.isolation_level = None + + @event.listens_for(engine, "begin") + def do_begin(conn): + # emit our own BEGIN + conn.execute("BEGIN") + +Above, we intercept a new pysqlite connection and disable any transactional +integration. Then, at the point at which SQLAlchemy knows that transaction +scope is to begin, we emit ``"BEGIN"`` ourselves. + +When we take control of ``"BEGIN"``, we can also control directly SQLite's +locking modes, introduced at `BEGIN TRANSACTION `_, +by adding the desired locking mode to our ``"BEGIN"``:: + + @event.listens_for(engine, "begin") + def do_begin(conn): + conn.execute("BEGIN EXCLUSIVE") + +.. seealso:: + + `BEGIN TRANSACTION `_ - on the SQLite site + + `sqlite3 SELECT does not BEGIN a transaction `_ - on the Python bug tracker + + `sqlite3 module breaks transactions and potentially corrupts data `_ - on the Python bug tracker + + +""" + +from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE +from sqlalchemy import exc, pool +from sqlalchemy import types as sqltypes +from sqlalchemy import util + +import os + + +class _SQLite_pysqliteTimeStamp(DATETIME): + def bind_processor(self, dialect): + if dialect.native_datetime: + return None + else: + return DATETIME.bind_processor(self, dialect) + + def result_processor(self, dialect, coltype): + if dialect.native_datetime: + return None + else: + return DATETIME.result_processor(self, dialect, coltype) + + +class _SQLite_pysqliteDate(DATE): + def bind_processor(self, dialect): + if dialect.native_datetime: + return None + else: + return DATE.bind_processor(self, dialect) + + def result_processor(self, dialect, coltype): + if dialect.native_datetime: + return None + else: + return DATE.result_processor(self, dialect, coltype) + + +class SQLiteDialect_pysqlite(SQLiteDialect): + default_paramstyle = 'qmark' + + colspecs = util.update_copy( + SQLiteDialect.colspecs, + { + sqltypes.Date: _SQLite_pysqliteDate, + sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp, + } + ) + + if not util.py2k: + description_encoding = None + + driver = 'pysqlite' + + def __init__(self, **kwargs): + SQLiteDialect.__init__(self, **kwargs) + + if self.dbapi is not None: + sqlite_ver = self.dbapi.version_info + if sqlite_ver < (2, 1, 3): + util.warn( + ("The installed version of pysqlite2 (%s) is out-dated " + "and will cause errors in some cases. Version 2.1.3 " + "or greater is recommended.") % + '.'.join([str(subver) for subver in sqlite_ver])) + + @classmethod + def dbapi(cls): + try: + from pysqlite2 import dbapi2 as sqlite + except ImportError as e: + try: + from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name. + except ImportError: + raise e + return sqlite + + @classmethod + def get_pool_class(cls, url): + if url.database and url.database != ':memory:': + return pool.NullPool + else: + return pool.SingletonThreadPool + + def _get_server_version_info(self, connection): + return self.dbapi.sqlite_version_info + + def create_connect_args(self, url): + if url.username or url.password or url.host or url.port: + raise exc.ArgumentError( + "Invalid SQLite URL: %s\n" + "Valid SQLite URL forms are:\n" + " sqlite:///:memory: (or, sqlite://)\n" + " sqlite:///relative/path/to/file.db\n" + " sqlite:////absolute/path/to/file.db" % (url,)) + filename = url.database or ':memory:' + if filename != ':memory:': + filename = os.path.abspath(filename) + + opts = url.query.copy() + util.coerce_kw_type(opts, 'timeout', float) + util.coerce_kw_type(opts, 'isolation_level', str) + util.coerce_kw_type(opts, 'detect_types', int) + util.coerce_kw_type(opts, 'check_same_thread', bool) + util.coerce_kw_type(opts, 'cached_statements', int) + + return ([filename], opts) + + def is_disconnect(self, e, connection, cursor): + return isinstance(e, self.dbapi.ProgrammingError) and \ + "Cannot operate on a closed database." in str(e) + +dialect = SQLiteDialect_pysqlite diff --git a/app/lib/sqlalchemy/dialects/sybase/__init__.py b/app/lib/sqlalchemy/dialects/sybase/__init__.py new file mode 100644 index 0000000..1e72790 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sybase/__init__.py @@ -0,0 +1,28 @@ +# sybase/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from sqlalchemy.dialects.sybase import base, pysybase, pyodbc + +# default dialect +base.dialect = pyodbc.dialect + +from .base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ + TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ + BIGINT, INT, INTEGER, SMALLINT, BINARY,\ + VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\ + IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\ + dialect + + +__all__ = ( + 'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR', + 'TEXT', 'DATE', 'DATETIME', 'FLOAT', 'NUMERIC', + 'BIGINT', 'INT', 'INTEGER', 'SMALLINT', 'BINARY', + 'VARBINARY', 'UNITEXT', 'UNICHAR', 'UNIVARCHAR', + 'IMAGE', 'BIT', 'MONEY', 'SMALLMONEY', 'TINYINT', + 'dialect' +) diff --git a/app/lib/sqlalchemy/dialects/sybase/base.py b/app/lib/sqlalchemy/dialects/sybase/base.py new file mode 100644 index 0000000..5d2f0f7 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sybase/base.py @@ -0,0 +1,821 @@ +# sybase/base.py +# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors +# +# get_select_precolumns(), limit_clause() implementation +# copyright (C) 2007 Fisch Asset Management +# AG http://www.fam.ch, with coding by Alexander Houben +# alexander.houben@thor-solutions.ch +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +.. dialect:: sybase + :name: Sybase + +.. note:: + + The Sybase dialect functions on current SQLAlchemy versions + but is not regularly tested, and may have many issues and + caveats not currently handled. + +""" +import operator +import re + +from sqlalchemy.sql import compiler, expression, text, bindparam +from sqlalchemy.engine import default, base, reflection +from sqlalchemy import types as sqltypes +from sqlalchemy.sql import operators as sql_operators +from sqlalchemy import schema as sa_schema +from sqlalchemy import util, sql, exc + +from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ + TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ + BIGINT, INT, INTEGER, SMALLINT, BINARY,\ + VARBINARY, DECIMAL, TIMESTAMP, Unicode,\ + UnicodeText, REAL + +RESERVED_WORDS = set([ + "add", "all", "alter", "and", + "any", "as", "asc", "backup", + "begin", "between", "bigint", "binary", + "bit", "bottom", "break", "by", + "call", "capability", "cascade", "case", + "cast", "char", "char_convert", "character", + "check", "checkpoint", "close", "comment", + "commit", "connect", "constraint", "contains", + "continue", "convert", "create", "cross", + "cube", "current", "current_timestamp", "current_user", + "cursor", "date", "dbspace", "deallocate", + "dec", "decimal", "declare", "default", + "delete", "deleting", "desc", "distinct", + "do", "double", "drop", "dynamic", + "else", "elseif", "encrypted", "end", + "endif", "escape", "except", "exception", + "exec", "execute", "existing", "exists", + "externlogin", "fetch", "first", "float", + "for", "force", "foreign", "forward", + "from", "full", "goto", "grant", + "group", "having", "holdlock", "identified", + "if", "in", "index", "index_lparen", + "inner", "inout", "insensitive", "insert", + "inserting", "install", "instead", "int", + "integer", "integrated", "intersect", "into", + "iq", "is", "isolation", "join", + "key", "lateral", "left", "like", + "lock", "login", "long", "match", + "membership", "message", "mode", "modify", + "natural", "new", "no", "noholdlock", + "not", "notify", "null", "numeric", + "of", "off", "on", "open", + "option", "options", "or", "order", + "others", "out", "outer", "over", + "passthrough", "precision", "prepare", "primary", + "print", "privileges", "proc", "procedure", + "publication", "raiserror", "readtext", "real", + "reference", "references", "release", "remote", + "remove", "rename", "reorganize", "resource", + "restore", "restrict", "return", "revoke", + "right", "rollback", "rollup", "save", + "savepoint", "scroll", "select", "sensitive", + "session", "set", "setuser", "share", + "smallint", "some", "sqlcode", "sqlstate", + "start", "stop", "subtrans", "subtransaction", + "synchronize", "syntax_error", "table", "temporary", + "then", "time", "timestamp", "tinyint", + "to", "top", "tran", "trigger", + "truncate", "tsequal", "unbounded", "union", + "unique", "unknown", "unsigned", "update", + "updating", "user", "using", "validate", + "values", "varbinary", "varchar", "variable", + "varying", "view", "wait", "waitfor", + "when", "where", "while", "window", + "with", "with_cube", "with_lparen", "with_rollup", + "within", "work", "writetext", +]) + + +class _SybaseUnitypeMixin(object): + """these types appear to return a buffer object.""" + + def result_processor(self, dialect, coltype): + def process(value): + if value is not None: + return str(value) # decode("ucs-2") + else: + return None + return process + + +class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode): + __visit_name__ = 'UNICHAR' + + +class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode): + __visit_name__ = 'UNIVARCHAR' + + +class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText): + __visit_name__ = 'UNITEXT' + + +class TINYINT(sqltypes.Integer): + __visit_name__ = 'TINYINT' + + +class BIT(sqltypes.TypeEngine): + __visit_name__ = 'BIT' + + +class MONEY(sqltypes.TypeEngine): + __visit_name__ = "MONEY" + + +class SMALLMONEY(sqltypes.TypeEngine): + __visit_name__ = "SMALLMONEY" + + +class UNIQUEIDENTIFIER(sqltypes.TypeEngine): + __visit_name__ = "UNIQUEIDENTIFIER" + + +class IMAGE(sqltypes.LargeBinary): + __visit_name__ = 'IMAGE' + + +class SybaseTypeCompiler(compiler.GenericTypeCompiler): + def visit_large_binary(self, type_, **kw): + return self.visit_IMAGE(type_) + + def visit_boolean(self, type_, **kw): + return self.visit_BIT(type_) + + def visit_unicode(self, type_, **kw): + return self.visit_NVARCHAR(type_) + + def visit_UNICHAR(self, type_, **kw): + return "UNICHAR(%d)" % type_.length + + def visit_UNIVARCHAR(self, type_, **kw): + return "UNIVARCHAR(%d)" % type_.length + + def visit_UNITEXT(self, type_, **kw): + return "UNITEXT" + + def visit_TINYINT(self, type_, **kw): + return "TINYINT" + + def visit_IMAGE(self, type_, **kw): + return "IMAGE" + + def visit_BIT(self, type_, **kw): + return "BIT" + + def visit_MONEY(self, type_, **kw): + return "MONEY" + + def visit_SMALLMONEY(self, type_, **kw): + return "SMALLMONEY" + + def visit_UNIQUEIDENTIFIER(self, type_, **kw): + return "UNIQUEIDENTIFIER" + +ischema_names = { + 'bigint': BIGINT, + 'int': INTEGER, + 'integer': INTEGER, + 'smallint': SMALLINT, + 'tinyint': TINYINT, + 'unsigned bigint': BIGINT, # TODO: unsigned flags + 'unsigned int': INTEGER, # TODO: unsigned flags + 'unsigned smallint': SMALLINT, # TODO: unsigned flags + 'numeric': NUMERIC, + 'decimal': DECIMAL, + 'dec': DECIMAL, + 'float': FLOAT, + 'double': NUMERIC, # TODO + 'double precision': NUMERIC, # TODO + 'real': REAL, + 'smallmoney': SMALLMONEY, + 'money': MONEY, + 'smalldatetime': DATETIME, + 'datetime': DATETIME, + 'date': DATE, + 'time': TIME, + 'char': CHAR, + 'character': CHAR, + 'varchar': VARCHAR, + 'character varying': VARCHAR, + 'char varying': VARCHAR, + 'unichar': UNICHAR, + 'unicode character': UNIVARCHAR, + 'nchar': NCHAR, + 'national char': NCHAR, + 'national character': NCHAR, + 'nvarchar': NVARCHAR, + 'nchar varying': NVARCHAR, + 'national char varying': NVARCHAR, + 'national character varying': NVARCHAR, + 'text': TEXT, + 'unitext': UNITEXT, + 'binary': BINARY, + 'varbinary': VARBINARY, + 'image': IMAGE, + 'bit': BIT, + + # not in documentation for ASE 15.7 + 'long varchar': TEXT, # TODO + 'timestamp': TIMESTAMP, + 'uniqueidentifier': UNIQUEIDENTIFIER, + +} + + +class SybaseInspector(reflection.Inspector): + + def __init__(self, conn): + reflection.Inspector.__init__(self, conn) + + def get_table_id(self, table_name, schema=None): + """Return the table id from `table_name` and `schema`.""" + + return self.dialect.get_table_id(self.bind, table_name, schema, + info_cache=self.info_cache) + + +class SybaseExecutionContext(default.DefaultExecutionContext): + _enable_identity_insert = False + + def set_ddl_autocommit(self, connection, value): + """Must be implemented by subclasses to accommodate DDL executions. + + "connection" is the raw unwrapped DBAPI connection. "value" + is True or False. when True, the connection should be configured + such that a DDL can take place subsequently. when False, + a DDL has taken place and the connection should be resumed + into non-autocommit mode. + + """ + raise NotImplementedError() + + def pre_exec(self): + if self.isinsert: + tbl = self.compiled.statement.table + seq_column = tbl._autoincrement_column + insert_has_sequence = seq_column is not None + + if insert_has_sequence: + self._enable_identity_insert = \ + seq_column.key in self.compiled_parameters[0] + else: + self._enable_identity_insert = False + + if self._enable_identity_insert: + self.cursor.execute( + "SET IDENTITY_INSERT %s ON" % + self.dialect.identifier_preparer.format_table(tbl)) + + if self.isddl: + # TODO: to enhance this, we can detect "ddl in tran" on the + # database settings. this error message should be improved to + # include a note about that. + if not self.should_autocommit: + raise exc.InvalidRequestError( + "The Sybase dialect only supports " + "DDL in 'autocommit' mode at this time.") + + self.root_connection.engine.logger.info( + "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')") + + self.set_ddl_autocommit( + self.root_connection.connection.connection, + True) + + def post_exec(self): + if self.isddl: + self.set_ddl_autocommit(self.root_connection, False) + + if self._enable_identity_insert: + self.cursor.execute( + "SET IDENTITY_INSERT %s OFF" % + self.dialect.identifier_preparer. + format_table(self.compiled.statement.table) + ) + + def get_lastrowid(self): + cursor = self.create_cursor() + cursor.execute("SELECT @@identity AS lastrowid") + lastrowid = cursor.fetchone()[0] + cursor.close() + return lastrowid + + +class SybaseSQLCompiler(compiler.SQLCompiler): + ansi_bind_rules = True + + extract_map = util.update_copy( + compiler.SQLCompiler.extract_map, + { + 'doy': 'dayofyear', + 'dow': 'weekday', + 'milliseconds': 'millisecond' + }) + + def get_select_precolumns(self, select, **kw): + s = select._distinct and "DISTINCT " or "" + # TODO: don't think Sybase supports + # bind params for FIRST / TOP + limit = select._limit + if limit: + # if select._limit == 1: + # s += "FIRST " + # else: + # s += "TOP %s " % (select._limit,) + s += "TOP %s " % (limit,) + offset = select._offset + if offset: + raise NotImplementedError("Sybase ASE does not support OFFSET") + return s + + def get_from_hint_text(self, table, text): + return text + + def limit_clause(self, select, **kw): + # Limit in sybase is after the select keyword + return "" + + def visit_extract(self, extract, **kw): + field = self.extract_map.get(extract.field, extract.field) + return 'DATEPART("%s", %s)' % ( + field, self.process(extract.expr, **kw)) + + def visit_now_func(self, fn, **kw): + return "GETDATE()" + + def for_update_clause(self, select): + # "FOR UPDATE" is only allowed on "DECLARE CURSOR" + # which SQLAlchemy doesn't use + return '' + + def order_by_clause(self, select, **kw): + kw['literal_binds'] = True + order_by = self.process(select._order_by_clause, **kw) + + # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT + if order_by and (not self.is_subquery() or select._limit): + return " ORDER BY " + order_by + else: + return "" + + +class SybaseDDLCompiler(compiler.DDLCompiler): + def get_column_specification(self, column, **kwargs): + colspec = self.preparer.format_column(column) + " " + \ + self.dialect.type_compiler.process( + column.type, type_expression=column) + + if column.table is None: + raise exc.CompileError( + "The Sybase dialect requires Table-bound " + "columns in order to generate DDL") + seq_col = column.table._autoincrement_column + + # install a IDENTITY Sequence if we have an implicit IDENTITY column + if seq_col is column: + sequence = isinstance(column.default, sa_schema.Sequence) \ + and column.default + if sequence: + start, increment = sequence.start or 1, \ + sequence.increment or 1 + else: + start, increment = 1, 1 + if (start, increment) == (1, 1): + colspec += " IDENTITY" + else: + # TODO: need correct syntax for this + colspec += " IDENTITY(%s,%s)" % (start, increment) + else: + default = self.get_column_default_string(column) + if default is not None: + colspec += " DEFAULT " + default + + if column.nullable is not None: + if not column.nullable or column.primary_key: + colspec += " NOT NULL" + else: + colspec += " NULL" + + return colspec + + def visit_drop_index(self, drop): + index = drop.element + return "\nDROP INDEX %s.%s" % ( + self.preparer.quote_identifier(index.table.name), + self._prepared_index_name(drop.element, + include_schema=False) + ) + + +class SybaseIdentifierPreparer(compiler.IdentifierPreparer): + reserved_words = RESERVED_WORDS + + +class SybaseDialect(default.DefaultDialect): + name = 'sybase' + supports_unicode_statements = False + supports_sane_rowcount = False + supports_sane_multi_rowcount = False + + supports_native_boolean = False + supports_unicode_binds = False + postfetch_lastrowid = True + + colspecs = {} + ischema_names = ischema_names + + type_compiler = SybaseTypeCompiler + statement_compiler = SybaseSQLCompiler + ddl_compiler = SybaseDDLCompiler + preparer = SybaseIdentifierPreparer + inspector = SybaseInspector + + construct_arguments = [] + + def _get_default_schema_name(self, connection): + return connection.scalar( + text("SELECT user_name() as user_name", + typemap={'user_name': Unicode}) + ) + + def initialize(self, connection): + super(SybaseDialect, self).initialize(connection) + if self.server_version_info is not None and\ + self.server_version_info < (15, ): + self.max_identifier_length = 30 + else: + self.max_identifier_length = 255 + + def get_table_id(self, connection, table_name, schema=None, **kw): + """Fetch the id for schema.table_name. + + Several reflection methods require the table id. The idea for using + this method is that it can be fetched one time and cached for + subsequent calls. + + """ + + table_id = None + if schema is None: + schema = self.default_schema_name + + TABLEID_SQL = text(""" + SELECT o.id AS id + FROM sysobjects o JOIN sysusers u ON o.uid=u.uid + WHERE u.name = :schema_name + AND o.name = :table_name + AND o.type in ('U', 'V') + """) + + if util.py2k: + if isinstance(schema, unicode): + schema = schema.encode("ascii") + if isinstance(table_name, unicode): + table_name = table_name.encode("ascii") + result = connection.execute(TABLEID_SQL, + schema_name=schema, + table_name=table_name) + table_id = result.scalar() + if table_id is None: + raise exc.NoSuchTableError(table_name) + return table_id + + @reflection.cache + def get_columns(self, connection, table_name, schema=None, **kw): + table_id = self.get_table_id(connection, table_name, schema, + info_cache=kw.get("info_cache")) + + COLUMN_SQL = text(""" + SELECT col.name AS name, + t.name AS type, + (col.status & 8) AS nullable, + (col.status & 128) AS autoincrement, + com.text AS 'default', + col.prec AS precision, + col.scale AS scale, + col.length AS length + FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON + col.cdefault = com.id + WHERE col.usertype = t.usertype + AND col.id = :table_id + ORDER BY col.colid + """) + + results = connection.execute(COLUMN_SQL, table_id=table_id) + + columns = [] + for (name, type_, nullable, autoincrement, default, precision, scale, + length) in results: + col_info = self._get_column_info(name, type_, bool(nullable), + bool(autoincrement), + default, precision, scale, + length) + columns.append(col_info) + + return columns + + def _get_column_info(self, name, type_, nullable, autoincrement, default, + precision, scale, length): + + coltype = self.ischema_names.get(type_, None) + + kwargs = {} + + if coltype in (NUMERIC, DECIMAL): + args = (precision, scale) + elif coltype == FLOAT: + args = (precision,) + elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): + args = (length,) + else: + args = () + + if coltype: + coltype = coltype(*args, **kwargs) + # is this necessary + # if is_array: + # coltype = ARRAY(coltype) + else: + util.warn("Did not recognize type '%s' of column '%s'" % + (type_, name)) + coltype = sqltypes.NULLTYPE + + if default: + default = default.replace("DEFAULT", "").strip() + default = re.sub("^'(.*)'$", lambda m: m.group(1), default) + else: + default = None + + column_info = dict(name=name, type=coltype, nullable=nullable, + default=default, autoincrement=autoincrement) + return column_info + + @reflection.cache + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + + table_id = self.get_table_id(connection, table_name, schema, + info_cache=kw.get("info_cache")) + + table_cache = {} + column_cache = {} + foreign_keys = [] + + table_cache[table_id] = {"name": table_name, "schema": schema} + + COLUMN_SQL = text(""" + SELECT c.colid AS id, c.name AS name + FROM syscolumns c + WHERE c.id = :table_id + """) + + results = connection.execute(COLUMN_SQL, table_id=table_id) + columns = {} + for col in results: + columns[col["id"]] = col["name"] + column_cache[table_id] = columns + + REFCONSTRAINT_SQL = text(""" + SELECT o.name AS name, r.reftabid AS reftable_id, + r.keycnt AS 'count', + r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3, + r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6, + r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9, + r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12, + r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15, + r.fokey16 AS fokey16, + r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3, + r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6, + r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9, + r.refkey10 AS refkey10, r.refkey11 AS refkey11, + r.refkey12 AS refkey12, r.refkey13 AS refkey13, + r.refkey14 AS refkey14, r.refkey15 AS refkey15, + r.refkey16 AS refkey16 + FROM sysreferences r JOIN sysobjects o on r.tableid = o.id + WHERE r.tableid = :table_id + """) + referential_constraints = connection.execute( + REFCONSTRAINT_SQL, table_id=table_id).fetchall() + + REFTABLE_SQL = text(""" + SELECT o.name AS name, u.name AS 'schema' + FROM sysobjects o JOIN sysusers u ON o.uid = u.uid + WHERE o.id = :table_id + """) + + for r in referential_constraints: + reftable_id = r["reftable_id"] + + if reftable_id not in table_cache: + c = connection.execute(REFTABLE_SQL, table_id=reftable_id) + reftable = c.fetchone() + c.close() + table_info = {"name": reftable["name"], "schema": None} + if (schema is not None or + reftable["schema"] != self.default_schema_name): + table_info["schema"] = reftable["schema"] + + table_cache[reftable_id] = table_info + results = connection.execute(COLUMN_SQL, table_id=reftable_id) + reftable_columns = {} + for col in results: + reftable_columns[col["id"]] = col["name"] + column_cache[reftable_id] = reftable_columns + + reftable = table_cache[reftable_id] + reftable_columns = column_cache[reftable_id] + + constrained_columns = [] + referred_columns = [] + for i in range(1, r["count"] + 1): + constrained_columns.append(columns[r["fokey%i" % i]]) + referred_columns.append(reftable_columns[r["refkey%i" % i]]) + + fk_info = { + "constrained_columns": constrained_columns, + "referred_schema": reftable["schema"], + "referred_table": reftable["name"], + "referred_columns": referred_columns, + "name": r["name"] + } + + foreign_keys.append(fk_info) + + return foreign_keys + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + table_id = self.get_table_id(connection, table_name, schema, + info_cache=kw.get("info_cache")) + + INDEX_SQL = text(""" + SELECT object_name(i.id) AS table_name, + i.keycnt AS 'count', + i.name AS name, + (i.status & 0x2) AS 'unique', + index_col(object_name(i.id), i.indid, 1) AS col_1, + index_col(object_name(i.id), i.indid, 2) AS col_2, + index_col(object_name(i.id), i.indid, 3) AS col_3, + index_col(object_name(i.id), i.indid, 4) AS col_4, + index_col(object_name(i.id), i.indid, 5) AS col_5, + index_col(object_name(i.id), i.indid, 6) AS col_6, + index_col(object_name(i.id), i.indid, 7) AS col_7, + index_col(object_name(i.id), i.indid, 8) AS col_8, + index_col(object_name(i.id), i.indid, 9) AS col_9, + index_col(object_name(i.id), i.indid, 10) AS col_10, + index_col(object_name(i.id), i.indid, 11) AS col_11, + index_col(object_name(i.id), i.indid, 12) AS col_12, + index_col(object_name(i.id), i.indid, 13) AS col_13, + index_col(object_name(i.id), i.indid, 14) AS col_14, + index_col(object_name(i.id), i.indid, 15) AS col_15, + index_col(object_name(i.id), i.indid, 16) AS col_16 + FROM sysindexes i, sysobjects o + WHERE o.id = i.id + AND o.id = :table_id + AND (i.status & 2048) = 0 + AND i.indid BETWEEN 1 AND 254 + """) + + results = connection.execute(INDEX_SQL, table_id=table_id) + indexes = [] + for r in results: + column_names = [] + for i in range(1, r["count"]): + column_names.append(r["col_%i" % (i,)]) + index_info = {"name": r["name"], + "unique": bool(r["unique"]), + "column_names": column_names} + indexes.append(index_info) + + return indexes + + @reflection.cache + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + table_id = self.get_table_id(connection, table_name, schema, + info_cache=kw.get("info_cache")) + + PK_SQL = text(""" + SELECT object_name(i.id) AS table_name, + i.keycnt AS 'count', + i.name AS name, + index_col(object_name(i.id), i.indid, 1) AS pk_1, + index_col(object_name(i.id), i.indid, 2) AS pk_2, + index_col(object_name(i.id), i.indid, 3) AS pk_3, + index_col(object_name(i.id), i.indid, 4) AS pk_4, + index_col(object_name(i.id), i.indid, 5) AS pk_5, + index_col(object_name(i.id), i.indid, 6) AS pk_6, + index_col(object_name(i.id), i.indid, 7) AS pk_7, + index_col(object_name(i.id), i.indid, 8) AS pk_8, + index_col(object_name(i.id), i.indid, 9) AS pk_9, + index_col(object_name(i.id), i.indid, 10) AS pk_10, + index_col(object_name(i.id), i.indid, 11) AS pk_11, + index_col(object_name(i.id), i.indid, 12) AS pk_12, + index_col(object_name(i.id), i.indid, 13) AS pk_13, + index_col(object_name(i.id), i.indid, 14) AS pk_14, + index_col(object_name(i.id), i.indid, 15) AS pk_15, + index_col(object_name(i.id), i.indid, 16) AS pk_16 + FROM sysindexes i, sysobjects o + WHERE o.id = i.id + AND o.id = :table_id + AND (i.status & 2048) = 2048 + AND i.indid BETWEEN 1 AND 254 + """) + + results = connection.execute(PK_SQL, table_id=table_id) + pks = results.fetchone() + results.close() + + constrained_columns = [] + if pks: + for i in range(1, pks["count"] + 1): + constrained_columns.append(pks["pk_%i" % (i,)]) + return {"constrained_columns": constrained_columns, + "name": pks["name"]} + else: + return {"constrained_columns": [], "name": None} + + @reflection.cache + def get_schema_names(self, connection, **kw): + + SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u") + + schemas = connection.execute(SCHEMA_SQL) + + return [s["name"] for s in schemas] + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): + if schema is None: + schema = self.default_schema_name + + TABLE_SQL = text(""" + SELECT o.name AS name + FROM sysobjects o JOIN sysusers u ON o.uid = u.uid + WHERE u.name = :schema_name + AND o.type = 'U' + """) + + if util.py2k: + if isinstance(schema, unicode): + schema = schema.encode("ascii") + + tables = connection.execute(TABLE_SQL, schema_name=schema) + + return [t["name"] for t in tables] + + @reflection.cache + def get_view_definition(self, connection, view_name, schema=None, **kw): + if schema is None: + schema = self.default_schema_name + + VIEW_DEF_SQL = text(""" + SELECT c.text + FROM syscomments c JOIN sysobjects o ON c.id = o.id + WHERE o.name = :view_name + AND o.type = 'V' + """) + + if util.py2k: + if isinstance(view_name, unicode): + view_name = view_name.encode("ascii") + + view = connection.execute(VIEW_DEF_SQL, view_name=view_name) + + return view.scalar() + + @reflection.cache + def get_view_names(self, connection, schema=None, **kw): + if schema is None: + schema = self.default_schema_name + + VIEW_SQL = text(""" + SELECT o.name AS name + FROM sysobjects o JOIN sysusers u ON o.uid = u.uid + WHERE u.name = :schema_name + AND o.type = 'V' + """) + + if util.py2k: + if isinstance(schema, unicode): + schema = schema.encode("ascii") + views = connection.execute(VIEW_SQL, schema_name=schema) + + return [v["name"] for v in views] + + def has_table(self, connection, table_name, schema=None): + try: + self.get_table_id(connection, table_name, schema) + except exc.NoSuchTableError: + return False + else: + return True diff --git a/app/lib/sqlalchemy/dialects/sybase/mxodbc.py b/app/lib/sqlalchemy/dialects/sybase/mxodbc.py new file mode 100644 index 0000000..1e77edc --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sybase/mxodbc.py @@ -0,0 +1,33 @@ +# sybase/mxodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" + +.. dialect:: sybase+mxodbc + :name: mxODBC + :dbapi: mxodbc + :connectstring: sybase+mxodbc://:@ + :url: http://www.egenix.com/ + +.. note:: + + This dialect is a stub only and is likely non functional at this time. + + +""" +from sqlalchemy.dialects.sybase.base import SybaseDialect +from sqlalchemy.dialects.sybase.base import SybaseExecutionContext +from sqlalchemy.connectors.mxodbc import MxODBCConnector + + +class SybaseExecutionContext_mxodbc(SybaseExecutionContext): + pass + + +class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect): + execution_ctx_cls = SybaseExecutionContext_mxodbc + +dialect = SybaseDialect_mxodbc diff --git a/app/lib/sqlalchemy/dialects/sybase/pyodbc.py b/app/lib/sqlalchemy/dialects/sybase/pyodbc.py new file mode 100644 index 0000000..9690f49 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -0,0 +1,86 @@ +# sybase/pyodbc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: sybase+pyodbc + :name: PyODBC + :dbapi: pyodbc + :connectstring: sybase+pyodbc://:@\ +[/] + :url: http://pypi.python.org/pypi/pyodbc/ + + +Unicode Support +--------------- + +The pyodbc driver currently supports usage of these Sybase types with +Unicode or multibyte strings:: + + CHAR + NCHAR + NVARCHAR + TEXT + VARCHAR + +Currently *not* supported are:: + + UNICHAR + UNITEXT + UNIVARCHAR + +""" + +from sqlalchemy.dialects.sybase.base import SybaseDialect,\ + SybaseExecutionContext +from sqlalchemy.connectors.pyodbc import PyODBCConnector +from sqlalchemy import types as sqltypes, processors +import decimal + + +class _SybNumeric_pyodbc(sqltypes.Numeric): + """Turns Decimals with adjusted() < -6 into floats. + + It's not yet known how to get decimals with many + significant digits or very large adjusted() into Sybase + via pyodbc. + + """ + + def bind_processor(self, dialect): + super_process = super(_SybNumeric_pyodbc, self).\ + bind_processor(dialect) + + def process(value): + if self.asdecimal and \ + isinstance(value, decimal.Decimal): + + if value.adjusted() < -6: + return processors.to_float(value) + + if super_process: + return super_process(value) + else: + return value + return process + + +class SybaseExecutionContext_pyodbc(SybaseExecutionContext): + def set_ddl_autocommit(self, connection, value): + if value: + connection.autocommit = True + else: + connection.autocommit = False + + +class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): + execution_ctx_cls = SybaseExecutionContext_pyodbc + + colspecs = { + sqltypes.Numeric: _SybNumeric_pyodbc, + } + +dialect = SybaseDialect_pyodbc diff --git a/app/lib/sqlalchemy/dialects/sybase/pysybase.py b/app/lib/sqlalchemy/dialects/sybase/pysybase.py new file mode 100644 index 0000000..00a7ca3 --- /dev/null +++ b/app/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -0,0 +1,102 @@ +# sybase/pysybase.py +# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: sybase+pysybase + :name: Python-Sybase + :dbapi: Sybase + :connectstring: sybase+pysybase://:@/\ +[database name] + :url: http://python-sybase.sourceforge.net/ + +Unicode Support +--------------- + +The python-sybase driver does not appear to support non-ASCII strings of any +kind at this time. + +""" + +from sqlalchemy import types as sqltypes, processors +from sqlalchemy.dialects.sybase.base import SybaseDialect, \ + SybaseExecutionContext, SybaseSQLCompiler + + +class _SybNumeric(sqltypes.Numeric): + def result_processor(self, dialect, type_): + if not self.asdecimal: + return processors.to_float + else: + return sqltypes.Numeric.result_processor(self, dialect, type_) + + +class SybaseExecutionContext_pysybase(SybaseExecutionContext): + + def set_ddl_autocommit(self, dbapi_connection, value): + if value: + # call commit() on the Sybase connection directly, + # to avoid any side effects of calling a Connection + # transactional method inside of pre_exec() + dbapi_connection.commit() + + def pre_exec(self): + SybaseExecutionContext.pre_exec(self) + + for param in self.parameters: + for key in list(param): + param["@" + key] = param[key] + del param[key] + + +class SybaseSQLCompiler_pysybase(SybaseSQLCompiler): + def bindparam_string(self, name, **kw): + return "@" + name + + +class SybaseDialect_pysybase(SybaseDialect): + driver = 'pysybase' + execution_ctx_cls = SybaseExecutionContext_pysybase + statement_compiler = SybaseSQLCompiler_pysybase + + colspecs = { + sqltypes.Numeric: _SybNumeric, + sqltypes.Float: sqltypes.Float + } + + @classmethod + def dbapi(cls): + import Sybase + return Sybase + + def create_connect_args(self, url): + opts = url.translate_connect_args(username='user', password='passwd') + + return ([opts.pop('host')], opts) + + def do_executemany(self, cursor, statement, parameters, context=None): + # calling python-sybase executemany yields: + # TypeError: string too long for buffer + for param in parameters: + cursor.execute(statement, param) + + def _get_server_version_info(self, connection): + vers = connection.scalar("select @@version_number") + # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0), + # (12, 5, 0, 0) + return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10) + + def is_disconnect(self, e, connection, cursor): + if isinstance(e, (self.dbapi.OperationalError, + self.dbapi.ProgrammingError)): + msg = str(e) + return ('Unable to complete network request to host' in msg or + 'Invalid connection state' in msg or + 'Invalid cursor state' in msg) + else: + return False + +dialect = SybaseDialect_pysybase diff --git a/app/lib/sqlalchemy/engine/__init__.py b/app/lib/sqlalchemy/engine/__init__.py new file mode 100644 index 0000000..2a6c68d --- /dev/null +++ b/app/lib/sqlalchemy/engine/__init__.py @@ -0,0 +1,434 @@ +# engine/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""SQL connections, SQL execution and high-level DB-API interface. + +The engine package defines the basic components used to interface +DB-API modules with higher-level statement construction, +connection-management, execution and result contexts. The primary +"entry point" class into this package is the Engine and its public +constructor ``create_engine()``. + +This package includes: + +base.py + Defines interface classes and some implementation classes which + comprise the basic components used to interface between a DB-API, + constructed and plain-text statements, connections, transactions, + and results. + +default.py + Contains default implementations of some of the components defined + in base.py. All current database dialects use the classes in + default.py as base classes for their own database-specific + implementations. + +strategies.py + The mechanics of constructing ``Engine`` objects are represented + here. Defines the ``EngineStrategy`` class which represents how + to go from arguments specified to the ``create_engine()`` + function, to a fully constructed ``Engine``, including + initialization of connection pooling, dialects, and specific + subclasses of ``Engine``. + +threadlocal.py + The ``TLEngine`` class is defined here, which is a subclass of + the generic ``Engine`` and tracks ``Connection`` and + ``Transaction`` objects against the identity of the current + thread. This allows certain programming patterns based around + the concept of a "thread-local connection" to be possible. + The ``TLEngine`` is created by using the "threadlocal" engine + strategy in conjunction with the ``create_engine()`` function. + +url.py + Defines the ``URL`` class which represents the individual + components of a string URL passed to ``create_engine()``. Also + defines a basic module-loading strategy for the dialect specifier + within a URL. +""" + +from .interfaces import ( + Connectable, + CreateEnginePlugin, + Dialect, + ExecutionContext, + ExceptionContext, + + # backwards compat + Compiled, + TypeCompiler +) + +from .base import ( + Connection, + Engine, + NestedTransaction, + RootTransaction, + Transaction, + TwoPhaseTransaction, +) + +from .result import ( + BaseRowProxy, + BufferedColumnResultProxy, + BufferedColumnRow, + BufferedRowResultProxy, + FullyBufferedResultProxy, + ResultProxy, + RowProxy, +) + +from .util import ( + connection_memoize +) + + +from . import util, strategies + +# backwards compat +from ..sql import ddl + +default_strategy = 'plain' + + +def create_engine(*args, **kwargs): + """Create a new :class:`.Engine` instance. + + The standard calling form is to send the URL as the + first positional argument, usually a string + that indicates database dialect and connection arguments:: + + + engine = create_engine("postgresql://scott:tiger@localhost/test") + + Additional keyword arguments may then follow it which + establish various options on the resulting :class:`.Engine` + and its underlying :class:`.Dialect` and :class:`.Pool` + constructs:: + + engine = create_engine("mysql://scott:tiger@hostname/dbname", + encoding='latin1', echo=True) + + The string form of the URL is + ``dialect[+driver]://user:password@host/dbname[?key=value..]``, where + ``dialect`` is a database name such as ``mysql``, ``oracle``, + ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as + ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, + the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. + + ``**kwargs`` takes a wide variety of options which are routed + towards their appropriate components. Arguments may be specific to + the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the + :class:`.Pool`. Specific dialects also accept keyword arguments that + are unique to that dialect. Here, we describe the parameters + that are common to most :func:`.create_engine()` usage. + + Once established, the newly resulting :class:`.Engine` will + request a connection from the underlying :class:`.Pool` once + :meth:`.Engine.connect` is called, or a method which depends on it + such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn + will establish the first actual DBAPI connection when this request + is received. The :func:`.create_engine` call itself does **not** + establish any actual DBAPI connections directly. + + .. seealso:: + + :doc:`/core/engines` + + :doc:`/dialects/index` + + :ref:`connections_toplevel` + + :param case_sensitive=True: if False, result column names + will match in a case-insensitive fashion, that is, + ``row['SomeColumn']``. + + .. versionchanged:: 0.8 + By default, result row names match case-sensitively. + In version 0.7 and prior, all matches were case-insensitive. + + :param connect_args: a dictionary of options which will be + passed directly to the DBAPI's ``connect()`` method as + additional keyword arguments. See the example + at :ref:`custom_dbapi_args`. + + :param convert_unicode=False: if set to True, sets + the default behavior of ``convert_unicode`` on the + :class:`.String` type to ``True``, regardless + of a setting of ``False`` on an individual + :class:`.String` type, thus causing all :class:`.String` + -based columns + to accommodate Python ``unicode`` objects. This flag + is useful as an engine-wide setting when using a + DBAPI that does not natively support Python + ``unicode`` objects and raises an error when + one is received (such as pyodbc with FreeTDS). + + See :class:`.String` for further details on + what this flag indicates. + + :param creator: a callable which returns a DBAPI connection. + This creation function will be passed to the underlying + connection pool and will be used to create all new database + connections. Usage of this function causes connection + parameters specified in the URL argument to be bypassed. + + :param echo=False: if True, the Engine will log all statements + as well as a repr() of their parameter lists to the engines + logger, which defaults to sys.stdout. The ``echo`` attribute of + ``Engine`` can be modified at any time to turn logging on and + off. If set to the string ``"debug"``, result rows will be + printed to the standard output as well. This flag ultimately + controls a Python logger; see :ref:`dbengine_logging` for + information on how to configure logging directly. + + :param echo_pool=False: if True, the connection pool will log + all checkouts/checkins to the logging stream, which defaults to + sys.stdout. This flag ultimately controls a Python logger; see + :ref:`dbengine_logging` for information on how to configure logging + directly. + + :param encoding: Defaults to ``utf-8``. This is the string + encoding used by SQLAlchemy for string encode/decode + operations which occur within SQLAlchemy, **outside of + the DBAPI.** Most modern DBAPIs feature some degree of + direct support for Python ``unicode`` objects, + what you see in Python 2 as a string of the form + ``u'some string'``. For those scenarios where the + DBAPI is detected as not supporting a Python ``unicode`` + object, this encoding is used to determine the + source/destination encoding. It is **not used** + for those cases where the DBAPI handles unicode + directly. + + To properly configure a system to accommodate Python + ``unicode`` objects, the DBAPI should be + configured to handle unicode to the greatest + degree as is appropriate - see + the notes on unicode pertaining to the specific + target database in use at :ref:`dialect_toplevel`. + + Areas where string encoding may need to be accommodated + outside of the DBAPI include zero or more of: + + * the values passed to bound parameters, corresponding to + the :class:`.Unicode` type or the :class:`.String` type + when ``convert_unicode`` is ``True``; + * the values returned in result set columns corresponding + to the :class:`.Unicode` type or the :class:`.String` + type when ``convert_unicode`` is ``True``; + * the string SQL statement passed to the DBAPI's + ``cursor.execute()`` method; + * the string names of the keys in the bound parameter + dictionary passed to the DBAPI's ``cursor.execute()`` + as well as ``cursor.setinputsizes()`` methods; + * the string column names retrieved from the DBAPI's + ``cursor.description`` attribute. + + When using Python 3, the DBAPI is required to support + *all* of the above values as Python ``unicode`` objects, + which in Python 3 are just known as ``str``. In Python 2, + the DBAPI does not specify unicode behavior at all, + so SQLAlchemy must make decisions for each of the above + values on a per-DBAPI basis - implementations are + completely inconsistent in their behavior. + + :param execution_options: Dictionary execution options which will + be applied to all connections. See + :meth:`~sqlalchemy.engine.Connection.execution_options` + + :param implicit_returning=True: When ``True``, a RETURNING- + compatible construct, if available, will be used to + fetch newly generated primary key values when a single row + INSERT statement is emitted with no existing returning() + clause. This applies to those backends which support RETURNING + or a compatible construct, including PostgreSQL, Firebird, Oracle, + Microsoft SQL Server. Set this to ``False`` to disable + the automatic usage of RETURNING. + + :param isolation_level: this string parameter is interpreted by various + dialects in order to affect the transaction isolation level of the + database connection. The parameter essentially accepts some subset of + these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``, + ``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``. + Behavior here varies per backend, and + individual dialects should be consulted directly. + + Note that the isolation level can also be set on a per-:class:`.Connection` + basis as well, using the + :paramref:`.Connection.execution_options.isolation_level` + feature. + + .. seealso:: + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`.Connection` isolation level + + :ref:`SQLite Transaction Isolation ` + + :ref:`PostgreSQL Transaction Isolation ` + + :ref:`MySQL Transaction Isolation ` + + :ref:`session_transaction_isolation` - for the ORM + + :param label_length=None: optional integer value which limits + the size of dynamically generated column labels to that many + characters. If less than 6, labels are generated as + "_(counter)". If ``None``, the value of + ``dialect.max_identifier_length`` is used instead. + + :param listeners: A list of one or more + :class:`~sqlalchemy.interfaces.PoolListener` objects which will + receive connection pool events. + + :param logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.engine" logger. Defaults to a hexstring of the + object's id. + + :param max_overflow=10: the number of connections to allow in + connection pool "overflow", that is connections that can be + opened above and beyond the pool_size setting, which defaults + to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. + + :param module=None: reference to a Python module object (the module + itself, not its string name). Specifies an alternate DBAPI module to + be used by the engine's dialect. Each sub-dialect references a + specific DBAPI which will be imported before first connect. This + parameter causes the import to be bypassed, and the given module to + be used instead. Can be used for testing of DBAPIs as well as to + inject "mock" DBAPI implementations into the :class:`.Engine`. + + :param paramstyle=None: The `paramstyle `_ + to use when rendering bound parameters. This style defaults to the + one recommended by the DBAPI itself, which is retrieved from the + ``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept + more than one paramstyle, and in particular it may be desirable + to change a "named" paramstyle into a "positional" one, or vice versa. + When this attribute is passed, it should be one of the values + ``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or + ``"pyformat"``, and should correspond to a parameter style known + to be supported by the DBAPI in use. + + :param pool=None: an already-constructed instance of + :class:`~sqlalchemy.pool.Pool`, such as a + :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this + pool will be used directly as the underlying connection pool + for the engine, bypassing whatever connection parameters are + present in the URL argument. For information on constructing + connection pools manually, see :ref:`pooling_toplevel`. + + :param poolclass=None: a :class:`~sqlalchemy.pool.Pool` + subclass, which will be used to create a connection pool + instance using the connection parameters given in the URL. Note + this differs from ``pool`` in that you don't actually + instantiate the pool in this case, you just indicate what type + of pool to be used. + + :param pool_logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.pool" logger. Defaults to a hexstring of the object's + id. + + :param pool_size=5: the number of connections to keep open + inside the connection pool. This used with + :class:`~sqlalchemy.pool.QueuePool` as + well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With + :class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting + of 0 indicates no limit; to disable pooling, set ``poolclass`` to + :class:`~sqlalchemy.pool.NullPool` instead. + + :param pool_recycle=-1: this setting causes the pool to recycle + connections after the given number of seconds has passed. It + defaults to -1, or no timeout. For example, setting to 3600 + means connections will be recycled after one hour. Note that + MySQL in particular will disconnect automatically if no + activity is detected on a connection for eight hours (although + this is configurable with the MySQLDB connection itself and the + server configuration as well). + + :param pool_reset_on_return='rollback': set the "reset on return" + behavior of the pool, which is whether ``rollback()``, + ``commit()``, or nothing is called upon connections + being returned to the pool. See the docstring for + ``reset_on_return`` at :class:`.Pool`. + + .. versionadded:: 0.7.6 + + :param pool_timeout=30: number of seconds to wait before giving + up on getting a connection from the pool. This is only used + with :class:`~sqlalchemy.pool.QueuePool`. + + :param strategy='plain': selects alternate engine implementations. + Currently available are: + + * the ``threadlocal`` strategy, which is described in + :ref:`threadlocal_strategy`; + * the ``mock`` strategy, which dispatches all statement + execution to a function passed as the argument ``executor``. + See `example in the FAQ + `_. + + :param executor=None: a function taking arguments + ``(sql, *multiparams, **params)``, to which the ``mock`` strategy will + dispatch all statement execution. Used only by ``strategy='mock'``. + + """ + + strategy = kwargs.pop('strategy', default_strategy) + strategy = strategies.strategies[strategy] + return strategy.create(*args, **kwargs) + + +def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): + """Create a new Engine instance using a configuration dictionary. + + The dictionary is typically produced from a config file. + + The keys of interest to ``engine_from_config()`` should be prefixed, e.g. + ``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument + indicates the prefix to be searched for. Each matching key (after the + prefix is stripped) is treated as though it were the corresponding keyword + argument to a :func:`.create_engine` call. + + The only required key is (assuming the default prefix) ``sqlalchemy.url``, + which provides the :ref:`database URL `. + + A select set of keyword arguments will be "coerced" to their + expected type based on string values. The set of arguments + is extensible per-dialect using the ``engine_config_types`` accessor. + + :param configuration: A dictionary (typically produced from a config file, + but this is not a requirement). Items whose keys start with the value + of 'prefix' will have that prefix stripped, and will then be passed to + :ref:`create_engine`. + + :param prefix: Prefix to match and then strip from keys + in 'configuration'. + + :param kwargs: Each keyword argument to ``engine_from_config()`` itself + overrides the corresponding item taken from the 'configuration' + dictionary. Keyword arguments should *not* be prefixed. + + """ + + options = dict((key[len(prefix):], configuration[key]) + for key in configuration + if key.startswith(prefix)) + options['_coerce_config'] = True + options.update(kwargs) + url = options.pop('url') + return create_engine(url, **options) + + +__all__ = ( + 'create_engine', + 'engine_from_config', +) diff --git a/app/lib/sqlalchemy/engine/base.py b/app/lib/sqlalchemy/engine/base.py new file mode 100644 index 0000000..91f4493 --- /dev/null +++ b/app/lib/sqlalchemy/engine/base.py @@ -0,0 +1,2207 @@ +# engine/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +from __future__ import with_statement + +"""Defines :class:`.Connection` and :class:`.Engine`. + +""" + + +import sys +from .. import exc, util, log, interfaces +from ..sql import util as sql_util +from ..sql import schema +from .interfaces import Connectable, ExceptionContext +from .util import _distill_params +import contextlib + + +class Connection(Connectable): + """Provides high-level functionality for a wrapped DB-API connection. + + Provides execution support for string-based SQL statements as well as + :class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator` + objects. Provides a :meth:`begin` method to return :class:`.Transaction` + objects. + + The Connection object is **not** thread-safe. While a Connection can be + shared among threads using properly synchronized access, it is still + possible that the underlying DBAPI connection may not support shared + access between threads. Check the DBAPI documentation for details. + + The Connection object represents a single dbapi connection checked out + from the connection pool. In this state, the connection pool has no affect + upon the connection, including its expiration or timeout state. For the + connection pool to properly manage connections, connections should be + returned to the connection pool (i.e. ``connection.close()``) whenever the + connection is not in use. + + .. index:: + single: thread safety; Connection + + """ + + schema_for_object = schema._schema_getter(None) + """Return the ".schema" attribute for an object. + + Used for :class:`.Table`, :class:`.Sequence` and similar objects, + and takes into account + the :paramref:`.Connection.execution_options.schema_translate_map` + parameter. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`schema_translating` + + """ + + def __init__(self, engine, connection=None, close_with_result=False, + _branch_from=None, _execution_options=None, + _dispatch=None, + _has_events=None): + """Construct a new Connection. + + The constructor here is not public and is only called only by an + :class:`.Engine`. See :meth:`.Engine.connect` and + :meth:`.Engine.contextual_connect` methods. + + """ + self.engine = engine + self.dialect = engine.dialect + self.__branch_from = _branch_from + self.__branch = _branch_from is not None + + if _branch_from: + self.__connection = connection + self._execution_options = _execution_options + self._echo = _branch_from._echo + self.should_close_with_result = False + self.dispatch = _dispatch + self._has_events = _branch_from._has_events + self.schema_for_object = _branch_from.schema_for_object + else: + self.__connection = connection \ + if connection is not None else engine.raw_connection() + self.__transaction = None + self.__savepoint_seq = 0 + self.should_close_with_result = close_with_result + self.__invalid = False + self.__can_reconnect = True + self._echo = self.engine._should_log_info() + + if _has_events is None: + # if _has_events is sent explicitly as False, + # then don't join the dispatch of the engine; we don't + # want to handle any of the engine's events in that case. + self.dispatch = self.dispatch._join(engine.dispatch) + self._has_events = _has_events or ( + _has_events is None and engine._has_events) + + assert not _execution_options + self._execution_options = engine._execution_options + + if self._has_events or self.engine._has_events: + self.dispatch.engine_connect(self, self.__branch) + + def _branch(self): + """Return a new Connection which references this Connection's + engine and connection; but does not have close_with_result enabled, + and also whose close() method does nothing. + + The Core uses this very sparingly, only in the case of + custom SQL default functions that are to be INSERTed as the + primary key of a row where we need to get the value back, so we have + to invoke it distinctly - this is a very uncommon case. + + Userland code accesses _branch() when the connect() or + contextual_connect() methods are called. The branched connection + acts as much as possible like the parent, except that it stays + connected when a close() event occurs. + + """ + if self.__branch_from: + return self.__branch_from._branch() + else: + return self.engine._connection_cls( + self.engine, + self.__connection, + _branch_from=self, + _execution_options=self._execution_options, + _has_events=self._has_events, + _dispatch=self.dispatch) + + @property + def _root(self): + """return the 'root' connection. + + Returns 'self' if this connection is not a branch, else + returns the root connection from which we ultimately branched. + + """ + + if self.__branch_from: + return self.__branch_from + else: + return self + + def _clone(self): + """Create a shallow copy of this Connection. + + """ + c = self.__class__.__new__(self.__class__) + c.__dict__ = self.__dict__.copy() + return c + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def execution_options(self, **opt): + r""" Set non-SQL options for the connection which take effect + during execution. + + The method returns a copy of this :class:`.Connection` which references + the same underlying DBAPI connection, but also defines the given + execution options which will take effect for a call to + :meth:`execute`. As the new :class:`.Connection` references the same + underlying resource, it's usually a good idea to ensure that the copies + will be discarded immediately, which is implicit if used as in:: + + result = connection.execution_options(stream_results=True).\ + execute(stmt) + + Note that any key/value can be passed to + :meth:`.Connection.execution_options`, and it will be stored in the + ``_execution_options`` dictionary of the :class:`.Connection`. It + is suitable for usage by end-user schemes to communicate with + event listeners, for example. + + The keywords that are currently recognized by SQLAlchemy itself + include all those listed under :meth:`.Executable.execution_options`, + as well as others that are specific to :class:`.Connection`. + + :param autocommit: Available on: Connection, statement. + When True, a COMMIT will be invoked after execution + when executed in 'autocommit' mode, i.e. when an explicit + transaction is not begun on the connection. Note that DBAPI + connections by default are always in a transaction - SQLAlchemy uses + rules applied to different kinds of statements to determine if + COMMIT will be invoked in order to provide its "autocommit" feature. + Typically, all INSERT/UPDATE/DELETE statements as well as + CREATE/DROP statements have autocommit behavior enabled; SELECT + constructs do not. Use this option when invoking a SELECT or other + specific SQL construct where COMMIT is desired (typically when + calling stored procedures and such), and an explicit + transaction is not in progress. + + :param compiled_cache: Available on: Connection. + A dictionary where :class:`.Compiled` objects + will be cached when the :class:`.Connection` compiles a clause + expression into a :class:`.Compiled` object. + It is the user's responsibility to + manage the size of this dictionary, which will have keys + corresponding to the dialect, clause element, the column + names within the VALUES or SET clause of an INSERT or UPDATE, + as well as the "batch" mode for an INSERT or UPDATE statement. + The format of this dictionary is not guaranteed to stay the + same in future releases. + + Note that the ORM makes use of its own "compiled" caches for + some operations, including flush operations. The caching + used by the ORM internally supersedes a cache dictionary + specified here. + + :param isolation_level: Available on: :class:`.Connection`. + Set the transaction isolation level for + the lifespan of this :class:`.Connection` object (*not* the + underlying DBAPI connection, for which the level is reset + to its original setting upon termination of this + :class:`.Connection` object). + + Valid values include + those string values accepted by the + :paramref:`.create_engine.isolation_level` + parameter passed to :func:`.create_engine`. These levels are + semi-database specific; see individual dialect documentation for + valid levels. + + Note that this option necessarily affects the underlying + DBAPI connection for the lifespan of the originating + :class:`.Connection`, and is not per-execution. This + setting is not removed until the underlying DBAPI connection + is returned to the connection pool, i.e. + the :meth:`.Connection.close` method is called. + + .. warning:: The ``isolation_level`` execution option should + **not** be used when a transaction is already established, that + is, the :meth:`.Connection.begin` method or similar has been + called. A database cannot change the isolation level on a + transaction in progress, and different DBAPIs and/or + SQLAlchemy dialects may implicitly roll back or commit + the transaction, or not affect the connection at all. + + .. versionchanged:: 0.9.9 A warning is emitted when the + ``isolation_level`` execution option is used after a + transaction has been started with :meth:`.Connection.begin` + or similar. + + .. note:: The ``isolation_level`` execution option is implicitly + reset if the :class:`.Connection` is invalidated, e.g. via + the :meth:`.Connection.invalidate` method, or if a + disconnection error occurs. The new connection produced after + the invalidation will not have the isolation level re-applied + to it automatically. + + .. seealso:: + + :paramref:`.create_engine.isolation_level` + - set per :class:`.Engine` isolation level + + :meth:`.Connection.get_isolation_level` - view current level + + :ref:`SQLite Transaction Isolation ` + + :ref:`PostgreSQL Transaction Isolation ` + + :ref:`MySQL Transaction Isolation ` + + :ref:`SQL Server Transaction Isolation ` + + :ref:`session_transaction_isolation` - for the ORM + + :param no_parameters: When ``True``, if the final parameter + list or dictionary is totally empty, will invoke the + statement on the cursor as ``cursor.execute(statement)``, + not passing the parameter collection at all. + Some DBAPIs such as psycopg2 and mysql-python consider + percent signs as significant only when parameters are + present; this option allows code to generate SQL + containing percent signs (and possibly other characters) + that is neutral regarding whether it's executed by the DBAPI + or piped into a script that's later invoked by + command line tools. + + .. versionadded:: 0.7.6 + + :param stream_results: Available on: Connection, statement. + Indicate to the dialect that results should be + "streamed" and not pre-buffered, if possible. This is a limitation + of many DBAPIs. The flag is currently understood only by the + psycopg2, mysqldb and pymysql dialects. + + :param schema_translate_map: Available on: Connection, Engine. + A dictionary mapping schema names to schema names, that will be + applied to the :paramref:`.Table.schema` element of each + :class:`.Table` encountered when SQL or DDL expression elements + are compiled into strings; the resulting schema name will be + converted based on presence in the map of the original name. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`schema_translating` + + """ + c = self._clone() + c._execution_options = c._execution_options.union(opt) + if self._has_events or self.engine._has_events: + self.dispatch.set_connection_execution_options(c, opt) + self.dialect.set_connection_execution_options(c, opt) + return c + + @property + def closed(self): + """Return True if this connection is closed.""" + + return '_Connection__connection' not in self.__dict__ \ + and not self.__can_reconnect + + @property + def invalidated(self): + """Return True if this connection was invalidated.""" + + return self._root.__invalid + + @property + def connection(self): + """The underlying DB-API connection managed by this Connection. + + .. seealso:: + + + :ref:`dbapi_connections` + + """ + + try: + return self.__connection + except AttributeError: + try: + return self._revalidate_connection() + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def get_isolation_level(self): + """Return the current isolation level assigned to this + :class:`.Connection`. + + This will typically be the default isolation level as determined + by the dialect, unless if the + :paramref:`.Connection.execution_options.isolation_level` + feature has been used to alter the isolation level on a + per-:class:`.Connection` basis. + + This attribute will typically perform a live SQL operation in order + to procure the current isolation level, so the value returned is the + actual level on the underlying DBAPI connection regardless of how + this state was set. Compare to the + :attr:`.Connection.default_isolation_level` accessor + which returns the dialect-level setting without performing a SQL + query. + + .. versionadded:: 0.9.9 + + .. seealso:: + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.create_engine.isolation_level` + - set per :class:`.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`.Connection` isolation level + + """ + try: + return self.dialect.get_isolation_level(self.connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + @property + def default_isolation_level(self): + """The default isolation level assigned to this :class:`.Connection`. + + This is the isolation level setting that the :class:`.Connection` + has when first procured via the :meth:`.Engine.connect` method. + This level stays in place until the + :paramref:`.Connection.execution_options.isolation_level` is used + to change the setting on a per-:class:`.Connection` basis. + + Unlike :meth:`.Connection.get_isolation_level`, this attribute is set + ahead of time from the first connection procured by the dialect, + so SQL query is not invoked when this accessor is called. + + .. versionadded:: 0.9.9 + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :paramref:`.create_engine.isolation_level` + - set per :class:`.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`.Connection` isolation level + + """ + return self.dialect.default_isolation_level + + def _revalidate_connection(self): + if self.__branch_from: + return self.__branch_from._revalidate_connection() + if self.__can_reconnect and self.__invalid: + if self.__transaction is not None: + raise exc.InvalidRequestError( + "Can't reconnect until invalid " + "transaction is rolled back") + self.__connection = self.engine.raw_connection(_connection=self) + self.__invalid = False + return self.__connection + raise exc.ResourceClosedError("This Connection is closed") + + @property + def _connection_is_valid(self): + # use getattr() for is_valid to support exceptions raised in + # dialect initializer, where the connection is not wrapped in + # _ConnectionFairy + + return getattr(self.__connection, 'is_valid', False) + + @property + def _still_open_and_connection_is_valid(self): + return \ + not self.closed and \ + not self.invalidated and \ + getattr(self.__connection, 'is_valid', False) + + @property + def info(self): + """Info dictionary associated with the underlying DBAPI connection + referred to by this :class:`.Connection`, allowing user-defined + data to be associated with the connection. + + The data here will follow along with the DBAPI connection including + after it is returned to the connection pool and used again + in subsequent instances of :class:`.Connection`. + + """ + + return self.connection.info + + def connect(self): + """Returns a branched version of this :class:`.Connection`. + + The :meth:`.Connection.close` method on the returned + :class:`.Connection` can be called and this + :class:`.Connection` will remain open. + + This method provides usage symmetry with + :meth:`.Engine.connect`, including for usage + with context managers. + + """ + + return self._branch() + + def contextual_connect(self, **kwargs): + """Returns a branched version of this :class:`.Connection`. + + The :meth:`.Connection.close` method on the returned + :class:`.Connection` can be called and this + :class:`.Connection` will remain open. + + This method provides usage symmetry with + :meth:`.Engine.contextual_connect`, including for usage + with context managers. + + """ + + return self._branch() + + def invalidate(self, exception=None): + """Invalidate the underlying DBAPI connection associated with + this :class:`.Connection`. + + The underlying DBAPI connection is literally closed (if + possible), and is discarded. Its source connection pool will + typically lazily create a new connection to replace it. + + Upon the next use (where "use" typically means using the + :meth:`.Connection.execute` method or similar), + this :class:`.Connection` will attempt to + procure a new DBAPI connection using the services of the + :class:`.Pool` as a source of connectivity (e.g. a "reconnection"). + + If a transaction was in progress (e.g. the + :meth:`.Connection.begin` method has been called) when + :meth:`.Connection.invalidate` method is called, at the DBAPI + level all state associated with this transaction is lost, as + the DBAPI connection is closed. The :class:`.Connection` + will not allow a reconnection to proceed until the + :class:`.Transaction` object is ended, by calling the + :meth:`.Transaction.rollback` method; until that point, any attempt at + continuing to use the :class:`.Connection` will raise an + :class:`~sqlalchemy.exc.InvalidRequestError`. + This is to prevent applications from accidentally + continuing an ongoing transactional operations despite the + fact that the transaction has been lost due to an + invalidation. + + The :meth:`.Connection.invalidate` method, just like auto-invalidation, + will at the connection pool level invoke the + :meth:`.PoolEvents.invalidate` event. + + .. seealso:: + + :ref:`pool_connection_invalidation` + + """ + + if self.invalidated: + return + + if self.closed: + raise exc.ResourceClosedError("This Connection is closed") + + if self._root._connection_is_valid: + self._root.__connection.invalidate(exception) + del self._root.__connection + self._root.__invalid = True + + def detach(self): + """Detach the underlying DB-API connection from its connection pool. + + E.g.:: + + with engine.connect() as conn: + conn.detach() + conn.execute("SET search_path TO schema1, schema2") + + # work with connection + + # connection is fully closed (since we used "with:", can + # also call .close()) + + This :class:`.Connection` instance will remain usable. When closed + (or exited from a context manager context as above), + the DB-API connection will be literally closed and not + returned to its originating pool. + + This method can be used to insulate the rest of an application + from a modified state on a connection (such as a transaction + isolation level or similar). + + """ + + self.__connection.detach() + + def begin(self): + """Begin a transaction and return a transaction handle. + + The returned object is an instance of :class:`.Transaction`. + This object represents the "scope" of the transaction, + which completes when either the :meth:`.Transaction.rollback` + or :meth:`.Transaction.commit` method is called. + + Nested calls to :meth:`.begin` on the same :class:`.Connection` + will return new :class:`.Transaction` objects that represent + an emulated transaction within the scope of the enclosing + transaction, that is:: + + trans = conn.begin() # outermost transaction + trans2 = conn.begin() # "nested" + trans2.commit() # does nothing + trans.commit() # actually commits + + Calls to :meth:`.Transaction.commit` only have an effect + when invoked via the outermost :class:`.Transaction` object, though the + :meth:`.Transaction.rollback` method of any of the + :class:`.Transaction` objects will roll back the + transaction. + + See also: + + :meth:`.Connection.begin_nested` - use a SAVEPOINT + + :meth:`.Connection.begin_twophase` - use a two phase /XID transaction + + :meth:`.Engine.begin` - context manager available from + :class:`.Engine`. + + """ + if self.__branch_from: + return self.__branch_from.begin() + + if self.__transaction is None: + self.__transaction = RootTransaction(self) + return self.__transaction + else: + return Transaction(self, self.__transaction) + + def begin_nested(self): + """Begin a nested transaction and return a transaction handle. + + The returned object is an instance of :class:`.NestedTransaction`. + + Nested transactions require SAVEPOINT support in the + underlying database. Any transaction in the hierarchy may + ``commit`` and ``rollback``, however the outermost transaction + still controls the overall ``commit`` or ``rollback`` of the + transaction of a whole. + + See also :meth:`.Connection.begin`, + :meth:`.Connection.begin_twophase`. + """ + if self.__branch_from: + return self.__branch_from.begin_nested() + + if self.__transaction is None: + self.__transaction = RootTransaction(self) + else: + self.__transaction = NestedTransaction(self, self.__transaction) + return self.__transaction + + def begin_twophase(self, xid=None): + """Begin a two-phase or XA transaction and return a transaction + handle. + + The returned object is an instance of :class:`.TwoPhaseTransaction`, + which in addition to the methods provided by + :class:`.Transaction`, also provides a + :meth:`~.TwoPhaseTransaction.prepare` method. + + :param xid: the two phase transaction id. If not supplied, a + random id will be generated. + + See also :meth:`.Connection.begin`, + :meth:`.Connection.begin_twophase`. + + """ + + if self.__branch_from: + return self.__branch_from.begin_twophase(xid=xid) + + if self.__transaction is not None: + raise exc.InvalidRequestError( + "Cannot start a two phase transaction when a transaction " + "is already in progress.") + if xid is None: + xid = self.engine.dialect.create_xid() + self.__transaction = TwoPhaseTransaction(self, xid) + return self.__transaction + + def recover_twophase(self): + return self.engine.dialect.do_recover_twophase(self) + + def rollback_prepared(self, xid, recover=False): + self.engine.dialect.do_rollback_twophase(self, xid, recover=recover) + + def commit_prepared(self, xid, recover=False): + self.engine.dialect.do_commit_twophase(self, xid, recover=recover) + + def in_transaction(self): + """Return True if a transaction is in progress.""" + return self._root.__transaction is not None + + def _begin_impl(self, transaction): + assert not self.__branch_from + + if self._echo: + self.engine.logger.info("BEGIN (implicit)") + + if self._has_events or self.engine._has_events: + self.dispatch.begin(self) + + try: + self.engine.dialect.do_begin(self.connection) + if self.connection._reset_agent is None: + self.connection._reset_agent = transaction + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def _rollback_impl(self): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.rollback(self) + + if self._still_open_and_connection_is_valid: + if self._echo: + self.engine.logger.info("ROLLBACK") + try: + self.engine.dialect.do_rollback(self.connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + finally: + if not self.__invalid and \ + self.connection._reset_agent is self.__transaction: + self.connection._reset_agent = None + self.__transaction = None + else: + self.__transaction = None + + def _commit_impl(self, autocommit=False): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.commit(self) + + if self._echo: + self.engine.logger.info("COMMIT") + try: + self.engine.dialect.do_commit(self.connection) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + finally: + if not self.__invalid and \ + self.connection._reset_agent is self.__transaction: + self.connection._reset_agent = None + self.__transaction = None + + def _savepoint_impl(self, name=None): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.savepoint(self, name) + + if name is None: + self.__savepoint_seq += 1 + name = 'sa_savepoint_%s' % self.__savepoint_seq + if self._still_open_and_connection_is_valid: + self.engine.dialect.do_savepoint(self, name) + return name + + def _rollback_to_savepoint_impl(self, name, context): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.rollback_savepoint(self, name, context) + + if self._still_open_and_connection_is_valid: + self.engine.dialect.do_rollback_to_savepoint(self, name) + self.__transaction = context + + def _release_savepoint_impl(self, name, context): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.release_savepoint(self, name, context) + + if self._still_open_and_connection_is_valid: + self.engine.dialect.do_release_savepoint(self, name) + self.__transaction = context + + def _begin_twophase_impl(self, transaction): + assert not self.__branch_from + + if self._echo: + self.engine.logger.info("BEGIN TWOPHASE (implicit)") + if self._has_events or self.engine._has_events: + self.dispatch.begin_twophase(self, transaction.xid) + + if self._still_open_and_connection_is_valid: + self.engine.dialect.do_begin_twophase(self, transaction.xid) + + if self.connection._reset_agent is None: + self.connection._reset_agent = transaction + + def _prepare_twophase_impl(self, xid): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.prepare_twophase(self, xid) + + if self._still_open_and_connection_is_valid: + assert isinstance(self.__transaction, TwoPhaseTransaction) + self.engine.dialect.do_prepare_twophase(self, xid) + + def _rollback_twophase_impl(self, xid, is_prepared): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.rollback_twophase(self, xid, is_prepared) + + if self._still_open_and_connection_is_valid: + assert isinstance(self.__transaction, TwoPhaseTransaction) + try: + self.engine.dialect.do_rollback_twophase( + self, xid, is_prepared) + finally: + if self.connection._reset_agent is self.__transaction: + self.connection._reset_agent = None + self.__transaction = None + else: + self.__transaction = None + + def _commit_twophase_impl(self, xid, is_prepared): + assert not self.__branch_from + + if self._has_events or self.engine._has_events: + self.dispatch.commit_twophase(self, xid, is_prepared) + + if self._still_open_and_connection_is_valid: + assert isinstance(self.__transaction, TwoPhaseTransaction) + try: + self.engine.dialect.do_commit_twophase(self, xid, is_prepared) + finally: + if self.connection._reset_agent is self.__transaction: + self.connection._reset_agent = None + self.__transaction = None + else: + self.__transaction = None + + def _autorollback(self): + if not self._root.in_transaction(): + self._root._rollback_impl() + + def close(self): + """Close this :class:`.Connection`. + + This results in a release of the underlying database + resources, that is, the DBAPI connection referenced + internally. The DBAPI connection is typically restored + back to the connection-holding :class:`.Pool` referenced + by the :class:`.Engine` that produced this + :class:`.Connection`. Any transactional state present on + the DBAPI connection is also unconditionally released via + the DBAPI connection's ``rollback()`` method, regardless + of any :class:`.Transaction` object that may be + outstanding with regards to this :class:`.Connection`. + + After :meth:`~.Connection.close` is called, the + :class:`.Connection` is permanently in a closed state, + and will allow no further operations. + + """ + if self.__branch_from: + try: + del self.__connection + except AttributeError: + pass + finally: + self.__can_reconnect = False + return + try: + conn = self.__connection + except AttributeError: + pass + else: + + conn.close() + if conn._reset_agent is self.__transaction: + conn._reset_agent = None + + # the close() process can end up invalidating us, + # as the pool will call our transaction as the "reset_agent" + # for rollback(), which can then cause an invalidation + if not self.__invalid: + del self.__connection + self.__can_reconnect = False + self.__transaction = None + + def scalar(self, object, *multiparams, **params): + """Executes and returns the first column of the first row. + + The underlying result/cursor is closed after execution. + """ + + return self.execute(object, *multiparams, **params).scalar() + + def execute(self, object, *multiparams, **params): + r"""Executes a SQL statement construct and returns a + :class:`.ResultProxy`. + + :param object: The statement to be executed. May be + one of: + + * a plain string + * any :class:`.ClauseElement` construct that is also + a subclass of :class:`.Executable`, such as a + :func:`~.expression.select` construct + * a :class:`.FunctionElement`, such as that generated + by :data:`.func`, will be automatically wrapped in + a SELECT statement, which is then executed. + * a :class:`.DDLElement` object + * a :class:`.DefaultGenerator` object + * a :class:`.Compiled` object + + :param \*multiparams/\**params: represent bound parameter + values to be used in the execution. Typically, + the format is either a collection of one or more + dictionaries passed to \*multiparams:: + + conn.execute( + table.insert(), + {"id":1, "value":"v1"}, + {"id":2, "value":"v2"} + ) + + ...or individual key/values interpreted by \**params:: + + conn.execute( + table.insert(), id=1, value="v1" + ) + + In the case that a plain SQL string is passed, and the underlying + DBAPI accepts positional bind parameters, a collection of tuples + or individual values in \*multiparams may be passed:: + + conn.execute( + "INSERT INTO table (id, value) VALUES (?, ?)", + (1, "v1"), (2, "v2") + ) + + conn.execute( + "INSERT INTO table (id, value) VALUES (?, ?)", + 1, "v1" + ) + + Note above, the usage of a question mark "?" or other + symbol is contingent upon the "paramstyle" accepted by the DBAPI + in use, which may be any of "qmark", "named", "pyformat", "format", + "numeric". See `pep-249 `_ + for details on paramstyle. + + To execute a textual SQL statement which uses bound parameters in a + DBAPI-agnostic way, use the :func:`~.expression.text` construct. + + """ + if isinstance(object, util.string_types[0]): + return self._execute_text(object, multiparams, params) + try: + meth = object._execute_on_connection + except AttributeError: + raise exc.ObjectNotExecutableError(object) + else: + return meth(self, multiparams, params) + + def _execute_function(self, func, multiparams, params): + """Execute a sql.FunctionElement object.""" + + return self._execute_clauseelement(func.select(), + multiparams, params) + + def _execute_default(self, default, multiparams, params): + """Execute a schema.ColumnDefault object.""" + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_execute: + default, multiparams, params = \ + fn(self, default, multiparams, params) + + try: + try: + conn = self.__connection + except AttributeError: + conn = self._revalidate_connection() + + dialect = self.dialect + ctx = dialect.execution_ctx_cls._init_default( + dialect, self, conn) + except BaseException as e: + self._handle_dbapi_exception(e, None, None, None, None) + + ret = ctx._exec_default(default, None) + if self.should_close_with_result: + self.close() + + if self._has_events or self.engine._has_events: + self.dispatch.after_execute(self, + default, multiparams, params, ret) + + return ret + + def _execute_ddl(self, ddl, multiparams, params): + """Execute a schema.DDL object.""" + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_execute: + ddl, multiparams, params = \ + fn(self, ddl, multiparams, params) + + dialect = self.dialect + + compiled = ddl.compile( + dialect=dialect, + schema_translate_map=self.schema_for_object + if not self.schema_for_object.is_default else None) + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_ddl, + compiled, + None, + compiled + ) + if self._has_events or self.engine._has_events: + self.dispatch.after_execute(self, + ddl, multiparams, params, ret) + return ret + + def _execute_clauseelement(self, elem, multiparams, params): + """Execute a sql.ClauseElement object.""" + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_execute: + elem, multiparams, params = \ + fn(self, elem, multiparams, params) + + distilled_params = _distill_params(multiparams, params) + if distilled_params: + # note this is usually dict but we support RowProxy + # as well; but dict.keys() as an iterable is OK + keys = distilled_params[0].keys() + else: + keys = [] + + dialect = self.dialect + if 'compiled_cache' in self._execution_options: + key = ( + dialect, elem, tuple(sorted(keys)), + self.schema_for_object.hash_key, + len(distilled_params) > 1 + ) + compiled_sql = self._execution_options['compiled_cache'].get(key) + if compiled_sql is None: + compiled_sql = elem.compile( + dialect=dialect, column_keys=keys, + inline=len(distilled_params) > 1, + schema_translate_map=self.schema_for_object + if not self.schema_for_object.is_default else None + ) + self._execution_options['compiled_cache'][key] = compiled_sql + else: + compiled_sql = elem.compile( + dialect=dialect, column_keys=keys, + inline=len(distilled_params) > 1, + schema_translate_map=self.schema_for_object + if not self.schema_for_object.is_default else None) + + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_compiled, + compiled_sql, + distilled_params, + compiled_sql, distilled_params + ) + if self._has_events or self.engine._has_events: + self.dispatch.after_execute(self, + elem, multiparams, params, ret) + return ret + + def _execute_compiled(self, compiled, multiparams, params): + """Execute a sql.Compiled object.""" + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_execute: + compiled, multiparams, params = \ + fn(self, compiled, multiparams, params) + + dialect = self.dialect + parameters = _distill_params(multiparams, params) + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_compiled, + compiled, + parameters, + compiled, parameters + ) + if self._has_events or self.engine._has_events: + self.dispatch.after_execute(self, + compiled, multiparams, params, ret) + return ret + + def _execute_text(self, statement, multiparams, params): + """Execute a string SQL statement.""" + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_execute: + statement, multiparams, params = \ + fn(self, statement, multiparams, params) + + dialect = self.dialect + parameters = _distill_params(multiparams, params) + ret = self._execute_context( + dialect, + dialect.execution_ctx_cls._init_statement, + statement, + parameters, + statement, parameters + ) + if self._has_events or self.engine._has_events: + self.dispatch.after_execute(self, + statement, multiparams, params, ret) + return ret + + def _execute_context(self, dialect, constructor, + statement, parameters, + *args): + """Create an :class:`.ExecutionContext` and execute, returning + a :class:`.ResultProxy`.""" + + try: + try: + conn = self.__connection + except AttributeError: + conn = self._revalidate_connection() + + context = constructor(dialect, self, conn, *args) + except BaseException as e: + self._handle_dbapi_exception( + e, + util.text_type(statement), parameters, + None, None) + + if context.compiled: + context.pre_exec() + + cursor, statement, parameters = context.cursor, \ + context.statement, \ + context.parameters + + if not context.executemany: + parameters = parameters[0] + + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_cursor_execute: + statement, parameters = \ + fn(self, cursor, statement, parameters, + context, context.executemany) + + if self._echo: + self.engine.logger.info(statement) + self.engine.logger.info( + "%r", + sql_util._repr_params(parameters, batches=10) + ) + + evt_handled = False + try: + if context.executemany: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_executemany: + if fn(cursor, statement, parameters, context): + evt_handled = True + break + if not evt_handled: + self.dialect.do_executemany( + cursor, + statement, + parameters, + context) + elif not parameters and context.no_parameters: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_execute_no_params: + if fn(cursor, statement, context): + evt_handled = True + break + if not evt_handled: + self.dialect.do_execute_no_params( + cursor, + statement, + context) + else: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_execute: + if fn(cursor, statement, parameters, context): + evt_handled = True + break + if not evt_handled: + self.dialect.do_execute( + cursor, + statement, + parameters, + context) + except BaseException as e: + self._handle_dbapi_exception( + e, + statement, + parameters, + cursor, + context) + + if self._has_events or self.engine._has_events: + self.dispatch.after_cursor_execute(self, cursor, + statement, + parameters, + context, + context.executemany) + + if context.compiled: + context.post_exec() + + if context.is_crud or context.is_text: + result = context._setup_crud_result_proxy() + else: + result = context.get_result_proxy() + if result._metadata is None: + result._soft_close() + + if context.should_autocommit and self._root.__transaction is None: + self._root._commit_impl(autocommit=True) + + # for "connectionless" execution, we have to close this + # Connection after the statement is complete. + if self.should_close_with_result: + # ResultProxy already exhausted rows / has no rows. + # close us now + if result._soft_closed: + self.close() + else: + # ResultProxy will close this Connection when no more + # rows to fetch. + result._autoclose_connection = True + return result + + def _cursor_execute(self, cursor, statement, parameters, context=None): + """Execute a statement + params on the given cursor. + + Adds appropriate logging and exception handling. + + This method is used by DefaultDialect for special-case + executions, such as for sequences and column defaults. + The path of statement execution in the majority of cases + terminates at _execute_context(). + + """ + if self._has_events or self.engine._has_events: + for fn in self.dispatch.before_cursor_execute: + statement, parameters = \ + fn(self, cursor, statement, parameters, + context, + False) + + if self._echo: + self.engine.logger.info(statement) + self.engine.logger.info("%r", parameters) + try: + for fn in () if not self.dialect._has_events \ + else self.dialect.dispatch.do_execute: + if fn(cursor, statement, parameters, context): + break + else: + self.dialect.do_execute( + cursor, + statement, + parameters, + context) + except BaseException as e: + self._handle_dbapi_exception( + e, + statement, + parameters, + cursor, + context) + + if self._has_events or self.engine._has_events: + self.dispatch.after_cursor_execute(self, cursor, + statement, + parameters, + context, + False) + + def _safe_close_cursor(self, cursor): + """Close the given cursor, catching exceptions + and turning into log warnings. + + """ + try: + cursor.close() + except Exception: + # log the error through the connection pool's logger. + self.engine.pool.logger.error( + "Error closing cursor", exc_info=True) + + _reentrant_error = False + _is_disconnect = False + + def _handle_dbapi_exception(self, + e, + statement, + parameters, + cursor, + context): + exc_info = sys.exc_info() + + if context and context.exception is None: + context.exception = e + + is_exit_exception = not isinstance(e, Exception) + + if not self._is_disconnect: + self._is_disconnect = ( + isinstance(e, self.dialect.dbapi.Error) and + not self.closed and + self.dialect.is_disconnect( + e, + self.__connection if not self.invalidated else None, + cursor) + ) or ( + is_exit_exception and not self.closed + ) + + if context: + context.is_disconnect = self._is_disconnect + + invalidate_pool_on_disconnect = not is_exit_exception + + if self._reentrant_error: + util.raise_from_cause( + exc.DBAPIError.instance(statement, + parameters, + e, + self.dialect.dbapi.Error, + dialect=self.dialect), + exc_info + ) + self._reentrant_error = True + try: + # non-DBAPI error - if we already got a context, + # or there's no string statement, don't wrap it + should_wrap = isinstance(e, self.dialect.dbapi.Error) or \ + (statement is not None + and context is None and not is_exit_exception) + + if should_wrap: + sqlalchemy_exception = exc.DBAPIError.instance( + statement, + parameters, + e, + self.dialect.dbapi.Error, + connection_invalidated=self._is_disconnect, + dialect=self.dialect) + else: + sqlalchemy_exception = None + + newraise = None + + if (self._has_events or self.engine._has_events) and \ + not self._execution_options.get( + 'skip_user_error_events', False): + # legacy dbapi_error event + if should_wrap and context: + self.dispatch.dbapi_error(self, + cursor, + statement, + parameters, + context, + e) + + # new handle_error event + ctx = ExceptionContextImpl( + e, sqlalchemy_exception, self.engine, + self, cursor, statement, + parameters, context, self._is_disconnect, + invalidate_pool_on_disconnect) + + for fn in self.dispatch.handle_error: + try: + # handler returns an exception; + # call next handler in a chain + per_fn = fn(ctx) + if per_fn is not None: + ctx.chained_exception = newraise = per_fn + except Exception as _raised: + # handler raises an exception - stop processing + newraise = _raised + break + + if self._is_disconnect != ctx.is_disconnect: + self._is_disconnect = ctx.is_disconnect + if sqlalchemy_exception: + sqlalchemy_exception.connection_invalidated = \ + ctx.is_disconnect + + # set up potentially user-defined value for + # invalidate pool. + invalidate_pool_on_disconnect = \ + ctx.invalidate_pool_on_disconnect + + if should_wrap and context: + context.handle_dbapi_exception(e) + + if not self._is_disconnect: + if cursor: + self._safe_close_cursor(cursor) + with util.safe_reraise(warn_only=True): + self._autorollback() + + if newraise: + util.raise_from_cause(newraise, exc_info) + elif should_wrap: + util.raise_from_cause( + sqlalchemy_exception, + exc_info + ) + else: + util.reraise(*exc_info) + + finally: + del self._reentrant_error + if self._is_disconnect: + del self._is_disconnect + if not self.invalidated: + dbapi_conn_wrapper = self.__connection + if invalidate_pool_on_disconnect: + self.engine.pool._invalidate(dbapi_conn_wrapper, e) + self.invalidate(e) + if self.should_close_with_result: + self.close() + + @classmethod + def _handle_dbapi_exception_noconnection(cls, e, dialect, engine): + exc_info = sys.exc_info() + + is_disconnect = dialect.is_disconnect(e, None, None) + + should_wrap = isinstance(e, dialect.dbapi.Error) + + if should_wrap: + sqlalchemy_exception = exc.DBAPIError.instance( + None, + None, + e, + dialect.dbapi.Error, + connection_invalidated=is_disconnect) + else: + sqlalchemy_exception = None + + newraise = None + + if engine._has_events: + ctx = ExceptionContextImpl( + e, sqlalchemy_exception, engine, None, None, None, + None, None, is_disconnect, True) + for fn in engine.dispatch.handle_error: + try: + # handler returns an exception; + # call next handler in a chain + per_fn = fn(ctx) + if per_fn is not None: + ctx.chained_exception = newraise = per_fn + except Exception as _raised: + # handler raises an exception - stop processing + newraise = _raised + break + + if sqlalchemy_exception and \ + is_disconnect != ctx.is_disconnect: + sqlalchemy_exception.connection_invalidated = \ + is_disconnect = ctx.is_disconnect + + if newraise: + util.raise_from_cause(newraise, exc_info) + elif should_wrap: + util.raise_from_cause( + sqlalchemy_exception, + exc_info + ) + else: + util.reraise(*exc_info) + + def transaction(self, callable_, *args, **kwargs): + r"""Execute the given function within a transaction boundary. + + The function is passed this :class:`.Connection` + as the first argument, followed by the given \*args and \**kwargs, + e.g.:: + + def do_something(conn, x, y): + conn.execute("some statement", {'x':x, 'y':y}) + + conn.transaction(do_something, 5, 10) + + The operations inside the function are all invoked within the + context of a single :class:`.Transaction`. + Upon success, the transaction is committed. If an + exception is raised, the transaction is rolled back + before propagating the exception. + + .. note:: + + The :meth:`.transaction` method is superseded by + the usage of the Python ``with:`` statement, which can + be used with :meth:`.Connection.begin`:: + + with conn.begin(): + conn.execute("some statement", {'x':5, 'y':10}) + + As well as with :meth:`.Engine.begin`:: + + with engine.begin() as conn: + conn.execute("some statement", {'x':5, 'y':10}) + + See also: + + :meth:`.Engine.begin` - engine-level transactional + context + + :meth:`.Engine.transaction` - engine-level version of + :meth:`.Connection.transaction` + + """ + + trans = self.begin() + try: + ret = self.run_callable(callable_, *args, **kwargs) + trans.commit() + return ret + except: + with util.safe_reraise(): + trans.rollback() + + def run_callable(self, callable_, *args, **kwargs): + r"""Given a callable object or function, execute it, passing + a :class:`.Connection` as the first argument. + + The given \*args and \**kwargs are passed subsequent + to the :class:`.Connection` argument. + + This function, along with :meth:`.Engine.run_callable`, + allows a function to be run with a :class:`.Connection` + or :class:`.Engine` object without the need to know + which one is being dealt with. + + """ + return callable_(self, *args, **kwargs) + + def _run_visitor(self, visitorcallable, element, **kwargs): + visitorcallable(self.dialect, self, + **kwargs).traverse_single(element) + + +class ExceptionContextImpl(ExceptionContext): + """Implement the :class:`.ExceptionContext` interface.""" + + def __init__(self, exception, sqlalchemy_exception, + engine, connection, cursor, statement, parameters, + context, is_disconnect, invalidate_pool_on_disconnect): + self.engine = engine + self.connection = connection + self.sqlalchemy_exception = sqlalchemy_exception + self.original_exception = exception + self.execution_context = context + self.statement = statement + self.parameters = parameters + self.is_disconnect = is_disconnect + self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect + + +class Transaction(object): + """Represent a database transaction in progress. + + The :class:`.Transaction` object is procured by + calling the :meth:`~.Connection.begin` method of + :class:`.Connection`:: + + from sqlalchemy import create_engine + engine = create_engine("postgresql://scott:tiger@localhost/test") + connection = engine.connect() + trans = connection.begin() + connection.execute("insert into x (a, b) values (1, 2)") + trans.commit() + + The object provides :meth:`.rollback` and :meth:`.commit` + methods in order to control transaction boundaries. It + also implements a context manager interface so that + the Python ``with`` statement can be used with the + :meth:`.Connection.begin` method:: + + with connection.begin(): + connection.execute("insert into x (a, b) values (1, 2)") + + The Transaction object is **not** threadsafe. + + See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`, + :meth:`.Connection.begin_nested`. + + .. index:: + single: thread safety; Transaction + """ + + def __init__(self, connection, parent): + self.connection = connection + self._actual_parent = parent + self.is_active = True + + @property + def _parent(self): + return self._actual_parent or self + + def close(self): + """Close this :class:`.Transaction`. + + If this transaction is the base transaction in a begin/commit + nesting, the transaction will rollback(). Otherwise, the + method returns. + + This is used to cancel a Transaction without affecting the scope of + an enclosing transaction. + + """ + if not self._parent.is_active: + return + if self._parent is self: + self.rollback() + + def rollback(self): + """Roll back this :class:`.Transaction`. + + """ + if not self._parent.is_active: + return + self._do_rollback() + self.is_active = False + + def _do_rollback(self): + self._parent.rollback() + + def commit(self): + """Commit this :class:`.Transaction`.""" + + if not self._parent.is_active: + raise exc.InvalidRequestError("This transaction is inactive") + self._do_commit() + self.is_active = False + + def _do_commit(self): + pass + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if type is None and self.is_active: + try: + self.commit() + except: + with util.safe_reraise(): + self.rollback() + else: + self.rollback() + + +class RootTransaction(Transaction): + def __init__(self, connection): + super(RootTransaction, self).__init__(connection, None) + self.connection._begin_impl(self) + + def _do_rollback(self): + if self.is_active: + self.connection._rollback_impl() + + def _do_commit(self): + if self.is_active: + self.connection._commit_impl() + + +class NestedTransaction(Transaction): + """Represent a 'nested', or SAVEPOINT transaction. + + A new :class:`.NestedTransaction` object may be procured + using the :meth:`.Connection.begin_nested` method. + + The interface is the same as that of :class:`.Transaction`. + + """ + + def __init__(self, connection, parent): + super(NestedTransaction, self).__init__(connection, parent) + self._savepoint = self.connection._savepoint_impl() + + def _do_rollback(self): + if self.is_active: + self.connection._rollback_to_savepoint_impl( + self._savepoint, self._parent) + + def _do_commit(self): + if self.is_active: + self.connection._release_savepoint_impl( + self._savepoint, self._parent) + + +class TwoPhaseTransaction(Transaction): + """Represent a two-phase transaction. + + A new :class:`.TwoPhaseTransaction` object may be procured + using the :meth:`.Connection.begin_twophase` method. + + The interface is the same as that of :class:`.Transaction` + with the addition of the :meth:`prepare` method. + + """ + + def __init__(self, connection, xid): + super(TwoPhaseTransaction, self).__init__(connection, None) + self._is_prepared = False + self.xid = xid + self.connection._begin_twophase_impl(self) + + def prepare(self): + """Prepare this :class:`.TwoPhaseTransaction`. + + After a PREPARE, the transaction can be committed. + + """ + if not self._parent.is_active: + raise exc.InvalidRequestError("This transaction is inactive") + self.connection._prepare_twophase_impl(self.xid) + self._is_prepared = True + + def _do_rollback(self): + self.connection._rollback_twophase_impl(self.xid, self._is_prepared) + + def _do_commit(self): + self.connection._commit_twophase_impl(self.xid, self._is_prepared) + + +class Engine(Connectable, log.Identified): + """ + Connects a :class:`~sqlalchemy.pool.Pool` and + :class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a + source of database connectivity and behavior. + + An :class:`.Engine` object is instantiated publicly using the + :func:`~sqlalchemy.create_engine` function. + + See also: + + :doc:`/core/engines` + + :ref:`connections_toplevel` + + """ + + _execution_options = util.immutabledict() + _has_events = False + _connection_cls = Connection + + schema_for_object = schema._schema_getter(None) + """Return the ".schema" attribute for an object. + + Used for :class:`.Table`, :class:`.Sequence` and similar objects, + and takes into account + the :paramref:`.Connection.execution_options.schema_translate_map` + parameter. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`schema_translating` + + """ + + def __init__(self, pool, dialect, url, + logging_name=None, echo=None, proxy=None, + execution_options=None + ): + self.pool = pool + self.url = url + self.dialect = dialect + if logging_name: + self.logging_name = logging_name + self.echo = echo + self.engine = self + log.instance_logger(self, echoflag=echo) + if proxy: + interfaces.ConnectionProxy._adapt_listener(self, proxy) + if execution_options: + self.update_execution_options(**execution_options) + + def update_execution_options(self, **opt): + r"""Update the default execution_options dictionary + of this :class:`.Engine`. + + The given keys/values in \**opt are added to the + default execution options that will be used for + all connections. The initial contents of this dictionary + can be sent via the ``execution_options`` parameter + to :func:`.create_engine`. + + .. seealso:: + + :meth:`.Connection.execution_options` + + :meth:`.Engine.execution_options` + + """ + self._execution_options = \ + self._execution_options.union(opt) + self.dispatch.set_engine_execution_options(self, opt) + self.dialect.set_engine_execution_options(self, opt) + + def execution_options(self, **opt): + """Return a new :class:`.Engine` that will provide + :class:`.Connection` objects with the given execution options. + + The returned :class:`.Engine` remains related to the original + :class:`.Engine` in that it shares the same connection pool and + other state: + + * The :class:`.Pool` used by the new :class:`.Engine` is the + same instance. The :meth:`.Engine.dispose` method will replace + the connection pool instance for the parent engine as well + as this one. + * Event listeners are "cascaded" - meaning, the new :class:`.Engine` + inherits the events of the parent, and new events can be associated + with the new :class:`.Engine` individually. + * The logging configuration and logging_name is copied from the parent + :class:`.Engine`. + + The intent of the :meth:`.Engine.execution_options` method is + to implement "sharding" schemes where multiple :class:`.Engine` + objects refer to the same connection pool, but are differentiated + by options that would be consumed by a custom event:: + + primary_engine = create_engine("mysql://") + shard1 = primary_engine.execution_options(shard_id="shard1") + shard2 = primary_engine.execution_options(shard_id="shard2") + + Above, the ``shard1`` engine serves as a factory for + :class:`.Connection` objects that will contain the execution option + ``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection` + objects that contain the execution option ``shard_id=shard2``. + + An event handler can consume the above execution option to perform + a schema switch or other operation, given a connection. Below + we emit a MySQL ``use`` statement to switch databases, at the same + time keeping track of which database we've established using the + :attr:`.Connection.info` dictionary, which gives us a persistent + storage space that follows the DBAPI connection:: + + from sqlalchemy import event + from sqlalchemy.engine import Engine + + shards = {"default": "base", shard_1: "db1", "shard_2": "db2"} + + @event.listens_for(Engine, "before_cursor_execute") + def _switch_shard(conn, cursor, stmt, + params, context, executemany): + shard_id = conn._execution_options.get('shard_id', "default") + current_shard = conn.info.get("current_shard", None) + + if current_shard != shard_id: + cursor.execute("use %s" % shards[shard_id]) + conn.info["current_shard"] = shard_id + + .. versionadded:: 0.8 + + .. seealso:: + + :meth:`.Connection.execution_options` - update execution options + on a :class:`.Connection` object. + + :meth:`.Engine.update_execution_options` - update the execution + options for a given :class:`.Engine` in place. + + """ + return OptionEngine(self, opt) + + @property + def name(self): + """String name of the :class:`~sqlalchemy.engine.interfaces.Dialect` + in use by this :class:`Engine`.""" + + return self.dialect.name + + @property + def driver(self): + """Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect` + in use by this :class:`Engine`.""" + + return self.dialect.driver + + echo = log.echo_property() + + def __repr__(self): + return 'Engine(%r)' % self.url + + def dispose(self): + """Dispose of the connection pool used by this :class:`.Engine`. + + This has the effect of fully closing all **currently checked in** + database connections. Connections that are still checked out + will **not** be closed, however they will no longer be associated + with this :class:`.Engine`, so when they are closed individually, + eventually the :class:`.Pool` which they are associated with will + be garbage collected and they will be closed out fully, if + not already closed on checkin. + + A new connection pool is created immediately after the old one has + been disposed. This new pool, like all SQLAlchemy connection pools, + does not make any actual connections to the database until one is + first requested, so as long as the :class:`.Engine` isn't used again, + no new connections will be made. + + .. seealso:: + + :ref:`engine_disposal` + + """ + self.pool.dispose() + self.pool = self.pool.recreate() + self.dispatch.engine_disposed(self) + + def _execute_default(self, default): + with self.contextual_connect() as conn: + return conn._execute_default(default, (), {}) + + @contextlib.contextmanager + def _optional_conn_ctx_manager(self, connection=None): + if connection is None: + with self.contextual_connect() as conn: + yield conn + else: + yield connection + + def _run_visitor(self, visitorcallable, element, + connection=None, **kwargs): + with self._optional_conn_ctx_manager(connection) as conn: + conn._run_visitor(visitorcallable, element, **kwargs) + + class _trans_ctx(object): + def __init__(self, conn, transaction, close_with_result): + self.conn = conn + self.transaction = transaction + self.close_with_result = close_with_result + + def __enter__(self): + return self.conn + + def __exit__(self, type, value, traceback): + if type is not None: + self.transaction.rollback() + else: + self.transaction.commit() + if not self.close_with_result: + self.conn.close() + + def begin(self, close_with_result=False): + """Return a context manager delivering a :class:`.Connection` + with a :class:`.Transaction` established. + + E.g.:: + + with engine.begin() as conn: + conn.execute("insert into table (x, y, z) values (1, 2, 3)") + conn.execute("my_special_procedure(5)") + + Upon successful operation, the :class:`.Transaction` + is committed. If an error is raised, the :class:`.Transaction` + is rolled back. + + The ``close_with_result`` flag is normally ``False``, and indicates + that the :class:`.Connection` will be closed when the operation + is complete. When set to ``True``, it indicates the + :class:`.Connection` is in "single use" mode, where the + :class:`.ResultProxy` returned by the first call to + :meth:`.Connection.execute` will close the :class:`.Connection` when + that :class:`.ResultProxy` has exhausted all result rows. + + .. versionadded:: 0.7.6 + + See also: + + :meth:`.Engine.connect` - procure a :class:`.Connection` from + an :class:`.Engine`. + + :meth:`.Connection.begin` - start a :class:`.Transaction` + for a particular :class:`.Connection`. + + """ + conn = self.contextual_connect(close_with_result=close_with_result) + try: + trans = conn.begin() + except: + with util.safe_reraise(): + conn.close() + return Engine._trans_ctx(conn, trans, close_with_result) + + def transaction(self, callable_, *args, **kwargs): + r"""Execute the given function within a transaction boundary. + + The function is passed a :class:`.Connection` newly procured + from :meth:`.Engine.contextual_connect` as the first argument, + followed by the given \*args and \**kwargs. + + e.g.:: + + def do_something(conn, x, y): + conn.execute("some statement", {'x':x, 'y':y}) + + engine.transaction(do_something, 5, 10) + + The operations inside the function are all invoked within the + context of a single :class:`.Transaction`. + Upon success, the transaction is committed. If an + exception is raised, the transaction is rolled back + before propagating the exception. + + .. note:: + + The :meth:`.transaction` method is superseded by + the usage of the Python ``with:`` statement, which can + be used with :meth:`.Engine.begin`:: + + with engine.begin() as conn: + conn.execute("some statement", {'x':5, 'y':10}) + + See also: + + :meth:`.Engine.begin` - engine-level transactional + context + + :meth:`.Connection.transaction` - connection-level version of + :meth:`.Engine.transaction` + + """ + + with self.contextual_connect() as conn: + return conn.transaction(callable_, *args, **kwargs) + + def run_callable(self, callable_, *args, **kwargs): + r"""Given a callable object or function, execute it, passing + a :class:`.Connection` as the first argument. + + The given \*args and \**kwargs are passed subsequent + to the :class:`.Connection` argument. + + This function, along with :meth:`.Connection.run_callable`, + allows a function to be run with a :class:`.Connection` + or :class:`.Engine` object without the need to know + which one is being dealt with. + + """ + with self.contextual_connect() as conn: + return conn.run_callable(callable_, *args, **kwargs) + + def execute(self, statement, *multiparams, **params): + """Executes the given construct and returns a :class:`.ResultProxy`. + + The arguments are the same as those used by + :meth:`.Connection.execute`. + + Here, a :class:`.Connection` is acquired using the + :meth:`~.Engine.contextual_connect` method, and the statement executed + with that connection. The returned :class:`.ResultProxy` is flagged + such that when the :class:`.ResultProxy` is exhausted and its + underlying cursor is closed, the :class:`.Connection` created here + will also be closed, which allows its associated DBAPI connection + resource to be returned to the connection pool. + + """ + + connection = self.contextual_connect(close_with_result=True) + return connection.execute(statement, *multiparams, **params) + + def scalar(self, statement, *multiparams, **params): + return self.execute(statement, *multiparams, **params).scalar() + + def _execute_clauseelement(self, elem, multiparams=None, params=None): + connection = self.contextual_connect(close_with_result=True) + return connection._execute_clauseelement(elem, multiparams, params) + + def _execute_compiled(self, compiled, multiparams, params): + connection = self.contextual_connect(close_with_result=True) + return connection._execute_compiled(compiled, multiparams, params) + + def connect(self, **kwargs): + """Return a new :class:`.Connection` object. + + The :class:`.Connection` object is a facade that uses a DBAPI + connection internally in order to communicate with the database. This + connection is procured from the connection-holding :class:`.Pool` + referenced by this :class:`.Engine`. When the + :meth:`~.Connection.close` method of the :class:`.Connection` object + is called, the underlying DBAPI connection is then returned to the + connection pool, where it may be used again in a subsequent call to + :meth:`~.Engine.connect`. + + """ + + return self._connection_cls(self, **kwargs) + + def contextual_connect(self, close_with_result=False, **kwargs): + """Return a :class:`.Connection` object which may be part of some + ongoing context. + + By default, this method does the same thing as :meth:`.Engine.connect`. + Subclasses of :class:`.Engine` may override this method + to provide contextual behavior. + + :param close_with_result: When True, the first :class:`.ResultProxy` + created by the :class:`.Connection` will call the + :meth:`.Connection.close` method of that connection as soon as any + pending result rows are exhausted. This is used to supply the + "connectionless execution" behavior provided by the + :meth:`.Engine.execute` method. + + """ + + return self._connection_cls( + self, + self._wrap_pool_connect(self.pool.connect, None), + close_with_result=close_with_result, + **kwargs) + + def table_names(self, schema=None, connection=None): + """Return a list of all table names available in the database. + + :param schema: Optional, retrieve names from a non-default schema. + + :param connection: Optional, use a specified connection. Default is + the ``contextual_connect`` for this ``Engine``. + """ + + with self._optional_conn_ctx_manager(connection) as conn: + if not schema: + schema = self.dialect.default_schema_name + return self.dialect.get_table_names(conn, schema) + + def has_table(self, table_name, schema=None): + """Return True if the given backend has a table of the given name. + + .. seealso:: + + :ref:`metadata_reflection_inspector` - detailed schema inspection + using the :class:`.Inspector` interface. + + :class:`.quoted_name` - used to pass quoting information along + with a schema identifier. + + """ + return self.run_callable(self.dialect.has_table, table_name, schema) + + def _wrap_pool_connect(self, fn, connection): + dialect = self.dialect + try: + return fn() + except dialect.dbapi.Error as e: + if connection is None: + Connection._handle_dbapi_exception_noconnection( + e, dialect, self) + else: + util.reraise(*sys.exc_info()) + + def raw_connection(self, _connection=None): + """Return a "raw" DBAPI connection from the connection pool. + + The returned object is a proxied version of the DBAPI + connection object used by the underlying driver in use. + The object will have all the same behavior as the real DBAPI + connection, except that its ``close()`` method will result in the + connection being returned to the pool, rather than being closed + for real. + + This method provides direct DBAPI connection access for + special situations when the API provided by :class:`.Connection` + is not needed. When a :class:`.Connection` object is already + present, the DBAPI connection is available using + the :attr:`.Connection.connection` accessor. + + .. seealso:: + + :ref:`dbapi_connections` + + """ + return self._wrap_pool_connect( + self.pool.unique_connection, _connection) + + +class OptionEngine(Engine): + def __init__(self, proxied, execution_options): + self._proxied = proxied + self.url = proxied.url + self.dialect = proxied.dialect + self.logging_name = proxied.logging_name + self.echo = proxied.echo + log.instance_logger(self, echoflag=self.echo) + self.dispatch = self.dispatch._join(proxied.dispatch) + self._execution_options = proxied._execution_options + self.update_execution_options(**execution_options) + + def _get_pool(self): + return self._proxied.pool + + def _set_pool(self, pool): + self._proxied.pool = pool + + pool = property(_get_pool, _set_pool) + + def _get_has_events(self): + return self._proxied._has_events or \ + self.__dict__.get('_has_events', False) + + def _set_has_events(self, value): + self.__dict__['_has_events'] = value + + _has_events = property(_get_has_events, _set_has_events) diff --git a/app/lib/sqlalchemy/engine/default.py b/app/lib/sqlalchemy/engine/default.py new file mode 100644 index 0000000..bcc78be --- /dev/null +++ b/app/lib/sqlalchemy/engine/default.py @@ -0,0 +1,1119 @@ +# engine/default.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Default implementations of per-dialect sqlalchemy.engine classes. + +These are semi-private implementation classes which are only of importance +to database dialect authors; dialects will usually use the classes here +as the base class for their own corresponding classes. + +""" + +import re +import random +from . import reflection, interfaces, result +from ..sql import compiler, expression, schema +from .. import types as sqltypes +from .. import exc, util, pool, processors +import codecs +import weakref +from .. import event + +AUTOCOMMIT_REGEXP = re.compile( + r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)', + re.I | re.UNICODE) + +# When we're handed literal SQL, ensure it's a SELECT query +SERVER_SIDE_CURSOR_RE = re.compile( + r'\s*SELECT', + re.I | re.UNICODE) + + +class DefaultDialect(interfaces.Dialect): + """Default implementation of Dialect""" + + statement_compiler = compiler.SQLCompiler + ddl_compiler = compiler.DDLCompiler + type_compiler = compiler.GenericTypeCompiler + preparer = compiler.IdentifierPreparer + supports_alter = True + + # the first value we'd get for an autoincrement + # column. + default_sequence_base = 1 + + # most DBAPIs happy with this for execute(). + # not cx_oracle. + execute_sequence_format = tuple + + supports_views = True + supports_sequences = False + sequences_optional = False + preexecute_autoincrement_sequences = False + postfetch_lastrowid = True + implicit_returning = False + + supports_right_nested_joins = True + + supports_native_enum = False + supports_native_boolean = False + + supports_simple_order_by_label = True + + engine_config_types = util.immutabledict([ + ('convert_unicode', util.bool_or_str('force')), + ('pool_timeout', util.asint), + ('echo', util.bool_or_str('debug')), + ('echo_pool', util.bool_or_str('debug')), + ('pool_recycle', util.asint), + ('pool_size', util.asint), + ('max_overflow', util.asint), + ('pool_threadlocal', util.asbool), + ]) + + # if the NUMERIC type + # returns decimal.Decimal. + # *not* the FLOAT type however. + supports_native_decimal = False + + if util.py3k: + supports_unicode_statements = True + supports_unicode_binds = True + returns_unicode_strings = True + description_encoding = None + else: + supports_unicode_statements = False + supports_unicode_binds = False + returns_unicode_strings = False + description_encoding = 'use_encoding' + + name = 'default' + + # length at which to truncate + # any identifier. + max_identifier_length = 9999 + + # length at which to truncate + # the name of an index. + # Usually None to indicate + # 'use max_identifier_length'. + # thanks to MySQL, sigh + max_index_name_length = None + + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + dbapi_type_map = {} + colspecs = {} + default_paramstyle = 'named' + supports_default_values = False + supports_empty_insert = True + supports_multivalues_insert = False + + supports_server_side_cursors = False + + server_version_info = None + + construct_arguments = None + """Optional set of argument specifiers for various SQLAlchemy + constructs, typically schema items. + + To implement, establish as a series of tuples, as in:: + + construct_arguments = [ + (schema.Index, { + "using": False, + "where": None, + "ops": None + }) + ] + + If the above construct is established on the PostgreSQL dialect, + the :class:`.Index` construct will now accept the keyword arguments + ``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``. + Any other argument specified to the constructor of :class:`.Index` + which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`. + + A dialect which does not include a ``construct_arguments`` member will + not participate in the argument validation system. For such a dialect, + any argument name is accepted by all participating constructs, within + the namespace of arguments prefixed with that dialect name. The rationale + here is so that third-party dialects that haven't yet implemented this + feature continue to function in the old way. + + .. versionadded:: 0.9.2 + + .. seealso:: + + :class:`.DialectKWArgs` - implementing base class which consumes + :attr:`.DefaultDialect.construct_arguments` + + + """ + + # indicates symbol names are + # UPPERCASEd if they are case insensitive + # within the database. + # if this is True, the methods normalize_name() + # and denormalize_name() must be provided. + requires_name_normalize = False + + reflection_options = () + + dbapi_exception_translation_map = util.immutabledict() + """mapping used in the extremely unusual case that a DBAPI's + published exceptions don't actually have the __name__ that they + are linked towards. + + .. versionadded:: 1.0.5 + + """ + + def __init__(self, convert_unicode=False, + encoding='utf-8', paramstyle=None, dbapi=None, + implicit_returning=None, + supports_right_nested_joins=None, + case_sensitive=True, + supports_native_boolean=None, + label_length=None, **kwargs): + + if not getattr(self, 'ported_sqla_06', True): + util.warn( + "The %s dialect is not yet ported to the 0.6 format" % + self.name) + + self.convert_unicode = convert_unicode + self.encoding = encoding + self.positional = False + self._ischema = None + self.dbapi = dbapi + if paramstyle is not None: + self.paramstyle = paramstyle + elif self.dbapi is not None: + self.paramstyle = self.dbapi.paramstyle + else: + self.paramstyle = self.default_paramstyle + if implicit_returning is not None: + self.implicit_returning = implicit_returning + self.positional = self.paramstyle in ('qmark', 'format', 'numeric') + self.identifier_preparer = self.preparer(self) + self.type_compiler = self.type_compiler(self) + if supports_right_nested_joins is not None: + self.supports_right_nested_joins = supports_right_nested_joins + if supports_native_boolean is not None: + self.supports_native_boolean = supports_native_boolean + self.case_sensitive = case_sensitive + + if label_length and label_length > self.max_identifier_length: + raise exc.ArgumentError( + "Label length of %d is greater than this dialect's" + " maximum identifier length of %d" % + (label_length, self.max_identifier_length)) + self.label_length = label_length + + if self.description_encoding == 'use_encoding': + self._description_decoder = \ + processors.to_unicode_processor_factory( + encoding + ) + elif self.description_encoding is not None: + self._description_decoder = \ + processors.to_unicode_processor_factory( + self.description_encoding + ) + self._encoder = codecs.getencoder(self.encoding) + self._decoder = processors.to_unicode_processor_factory(self.encoding) + + @util.memoized_property + def _type_memos(self): + return weakref.WeakKeyDictionary() + + @property + def dialect_description(self): + return self.name + "+" + self.driver + + @classmethod + def get_pool_class(cls, url): + return getattr(cls, 'poolclass', pool.QueuePool) + + def initialize(self, connection): + try: + self.server_version_info = \ + self._get_server_version_info(connection) + except NotImplementedError: + self.server_version_info = None + try: + self.default_schema_name = \ + self._get_default_schema_name(connection) + except NotImplementedError: + self.default_schema_name = None + + try: + self.default_isolation_level = \ + self.get_isolation_level(connection.connection) + except NotImplementedError: + self.default_isolation_level = None + + self.returns_unicode_strings = self._check_unicode_returns(connection) + + if self.description_encoding is not None and \ + self._check_unicode_description(connection): + self._description_decoder = self.description_encoding = None + + self.do_rollback(connection.connection) + + def on_connect(self): + """return a callable which sets up a newly created DBAPI connection. + + This is used to set dialect-wide per-connection options such as + isolation modes, unicode modes, etc. + + If a callable is returned, it will be assembled into a pool listener + that receives the direct DBAPI connection, with all wrappers removed. + + If None is returned, no listener will be generated. + + """ + return None + + def _check_unicode_returns(self, connection, additional_tests=None): + if util.py2k and not self.supports_unicode_statements: + cast_to = util.binary_type + else: + cast_to = util.text_type + + if self.positional: + parameters = self.execute_sequence_format() + else: + parameters = {} + + def check_unicode(test): + statement = cast_to( + expression.select([test]).compile(dialect=self)) + try: + cursor = connection.connection.cursor() + connection._cursor_execute(cursor, statement, parameters) + row = cursor.fetchone() + cursor.close() + except exc.DBAPIError as de: + # note that _cursor_execute() will have closed the cursor + # if an exception is thrown. + util.warn("Exception attempting to " + "detect unicode returns: %r" % de) + return False + else: + return isinstance(row[0], util.text_type) + + tests = [ + # detect plain VARCHAR + expression.cast( + expression.literal_column("'test plain returns'"), + sqltypes.VARCHAR(60) + ), + # detect if there's an NVARCHAR type with different behavior + # available + expression.cast( + expression.literal_column("'test unicode returns'"), + sqltypes.Unicode(60) + ), + ] + + if additional_tests: + tests += additional_tests + + results = set([check_unicode(test) for test in tests]) + + if results.issuperset([True, False]): + return "conditional" + else: + return results == set([True]) + + def _check_unicode_description(self, connection): + # all DBAPIs on Py2K return cursor.description as encoded, + # until pypy2.1beta2 with sqlite, so let's just check it - + # it's likely others will start doing this too in Py2k. + + if util.py2k and not self.supports_unicode_statements: + cast_to = util.binary_type + else: + cast_to = util.text_type + + cursor = connection.connection.cursor() + try: + cursor.execute( + cast_to( + expression.select([ + expression.literal_column("'x'").label("some_label") + ]).compile(dialect=self) + ) + ) + return isinstance(cursor.description[0][0], util.text_type) + finally: + cursor.close() + + def type_descriptor(self, typeobj): + """Provide a database-specific :class:`.TypeEngine` object, given + the generic object which comes from the types module. + + This method looks for a dictionary called + ``colspecs`` as a class or instance-level variable, + and passes on to :func:`.types.adapt_type`. + + """ + return sqltypes.adapt_type(typeobj, self.colspecs) + + def reflecttable( + self, connection, table, include_columns, exclude_columns, **opts): + insp = reflection.Inspector.from_engine(connection) + return insp.reflecttable( + table, include_columns, exclude_columns, **opts) + + def get_pk_constraint(self, conn, table_name, schema=None, **kw): + """Compatibility method, adapts the result of get_primary_keys() + for those dialects which don't implement get_pk_constraint(). + + """ + return { + 'constrained_columns': + self.get_primary_keys(conn, table_name, + schema=schema, **kw) + } + + def validate_identifier(self, ident): + if len(ident) > self.max_identifier_length: + raise exc.IdentifierError( + "Identifier '%s' exceeds maximum length of %d characters" % + (ident, self.max_identifier_length) + ) + + def connect(self, *cargs, **cparams): + return self.dbapi.connect(*cargs, **cparams) + + def create_connect_args(self, url): + opts = url.translate_connect_args() + opts.update(url.query) + return [[], opts] + + def set_engine_execution_options(self, engine, opts): + if 'isolation_level' in opts: + isolation_level = opts['isolation_level'] + + @event.listens_for(engine, "engine_connect") + def set_isolation(connection, branch): + if not branch: + self._set_connection_isolation(connection, isolation_level) + + if 'schema_translate_map' in opts: + getter = schema._schema_getter(opts['schema_translate_map']) + engine.schema_for_object = getter + + @event.listens_for(engine, "engine_connect") + def set_schema_translate_map(connection, branch): + connection.schema_for_object = getter + + def set_connection_execution_options(self, connection, opts): + if 'isolation_level' in opts: + self._set_connection_isolation(connection, opts['isolation_level']) + + if 'schema_translate_map' in opts: + getter = schema._schema_getter(opts['schema_translate_map']) + connection.schema_for_object = getter + + def _set_connection_isolation(self, connection, level): + if connection.in_transaction(): + util.warn( + "Connection is already established with a Transaction; " + "setting isolation_level may implicitly rollback or commit " + "the existing transaction, or have no effect until " + "next transaction") + self.set_isolation_level(connection.connection, level) + connection.connection._connection_record.\ + finalize_callback.append(self.reset_isolation_level) + + def do_begin(self, dbapi_connection): + pass + + def do_rollback(self, dbapi_connection): + dbapi_connection.rollback() + + def do_commit(self, dbapi_connection): + dbapi_connection.commit() + + def do_close(self, dbapi_connection): + dbapi_connection.close() + + def create_xid(self): + """Create a random two-phase transaction ID. + + This id will be passed to do_begin_twophase(), do_rollback_twophase(), + do_commit_twophase(). Its format is unspecified. + """ + + return "_sa_%032x" % random.randint(0, 2 ** 128) + + def do_savepoint(self, connection, name): + connection.execute(expression.SavepointClause(name)) + + def do_rollback_to_savepoint(self, connection, name): + connection.execute(expression.RollbackToSavepointClause(name)) + + def do_release_savepoint(self, connection, name): + connection.execute(expression.ReleaseSavepointClause(name)) + + def do_executemany(self, cursor, statement, parameters, context=None): + cursor.executemany(statement, parameters) + + def do_execute(self, cursor, statement, parameters, context=None): + cursor.execute(statement, parameters) + + def do_execute_no_params(self, cursor, statement, context=None): + cursor.execute(statement) + + def is_disconnect(self, e, connection, cursor): + return False + + def reset_isolation_level(self, dbapi_conn): + # default_isolation_level is read from the first connection + # after the initial set of 'isolation_level', if any, so is + # the configured default of this dialect. + self.set_isolation_level(dbapi_conn, self.default_isolation_level) + + +class StrCompileDialect(DefaultDialect): + + statement_compiler = compiler.StrSQLCompiler + ddl_compiler = compiler.DDLCompiler + type_compiler = compiler.StrSQLTypeCompiler + preparer = compiler.IdentifierPreparer + + supports_sequences = True + sequences_optional = True + preexecute_autoincrement_sequences = False + implicit_returning = False + + supports_native_boolean = True + + supports_simple_order_by_label = True + + +class DefaultExecutionContext(interfaces.ExecutionContext): + isinsert = False + isupdate = False + isdelete = False + is_crud = False + is_text = False + isddl = False + executemany = False + compiled = None + statement = None + result_column_struct = None + returned_defaults = None + _is_implicit_returning = False + _is_explicit_returning = False + + # a hook for SQLite's translation of + # result column names + _translate_colname = None + + @classmethod + def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl): + """Initialize execution context for a DDLElement construct.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + + self.compiled = compiled = compiled_ddl + self.isddl = True + + self.execution_options = compiled.execution_options + if connection._execution_options: + self.execution_options = dict(self.execution_options) + self.execution_options.update(connection._execution_options) + + if not dialect.supports_unicode_statements: + self.unicode_statement = util.text_type(compiled) + self.statement = dialect._encoder(self.unicode_statement)[0] + else: + self.statement = self.unicode_statement = util.text_type(compiled) + + self.cursor = self.create_cursor() + self.compiled_parameters = [] + + if dialect.positional: + self.parameters = [dialect.execute_sequence_format()] + else: + self.parameters = [{}] + + return self + + @classmethod + def _init_compiled(cls, dialect, connection, dbapi_connection, + compiled, parameters): + """Initialize execution context for a Compiled construct.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + + self.compiled = compiled + + # this should be caught in the engine before + # we get here + assert compiled.can_execute + + self.execution_options = compiled.execution_options.union( + connection._execution_options) + + self.result_column_struct = ( + compiled._result_columns, compiled._ordered_columns, + compiled._textual_ordered_columns) + + self.unicode_statement = util.text_type(compiled) + if not dialect.supports_unicode_statements: + self.statement = self.unicode_statement.encode( + self.dialect.encoding) + else: + self.statement = self.unicode_statement + + self.isinsert = compiled.isinsert + self.isupdate = compiled.isupdate + self.isdelete = compiled.isdelete + self.is_text = compiled.isplaintext + + if not parameters: + self.compiled_parameters = [compiled.construct_params()] + else: + self.compiled_parameters = \ + [compiled.construct_params(m, _group_number=grp) for + grp, m in enumerate(parameters)] + + self.executemany = len(parameters) > 1 + + self.cursor = self.create_cursor() + + if self.isinsert or self.isupdate or self.isdelete: + self.is_crud = True + self._is_explicit_returning = bool(compiled.statement._returning) + self._is_implicit_returning = bool( + compiled.returning and not compiled.statement._returning) + + if self.compiled.insert_prefetch or self.compiled.update_prefetch: + if self.executemany: + self._process_executemany_defaults() + else: + self._process_executesingle_defaults() + + processors = compiled._bind_processors + + # Convert the dictionary of bind parameter values + # into a dict or list to be sent to the DBAPI's + # execute() or executemany() method. + parameters = [] + if dialect.positional: + for compiled_params in self.compiled_parameters: + param = [] + for key in self.compiled.positiontup: + if key in processors: + param.append(processors[key](compiled_params[key])) + else: + param.append(compiled_params[key]) + parameters.append(dialect.execute_sequence_format(param)) + else: + encode = not dialect.supports_unicode_statements + for compiled_params in self.compiled_parameters: + + if encode: + param = dict( + ( + dialect._encoder(key)[0], + processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + ) + for key in compiled_params + ) + else: + param = dict( + ( + key, + processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + ) + for key in compiled_params + ) + + parameters.append(param) + self.parameters = dialect.execute_sequence_format(parameters) + + return self + + @classmethod + def _init_statement(cls, dialect, connection, dbapi_connection, + statement, parameters): + """Initialize execution context for a string SQL statement.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + self.is_text = True + + # plain text statement + self.execution_options = connection._execution_options + + if not parameters: + if self.dialect.positional: + self.parameters = [dialect.execute_sequence_format()] + else: + self.parameters = [{}] + elif isinstance(parameters[0], dialect.execute_sequence_format): + self.parameters = parameters + elif isinstance(parameters[0], dict): + if dialect.supports_unicode_statements: + self.parameters = parameters + else: + self.parameters = [ + dict((dialect._encoder(k)[0], d[k]) for k in d) + for d in parameters + ] or [{}] + else: + self.parameters = [dialect.execute_sequence_format(p) + for p in parameters] + + self.executemany = len(parameters) > 1 + + if not dialect.supports_unicode_statements and \ + isinstance(statement, util.text_type): + self.unicode_statement = statement + self.statement = dialect._encoder(statement)[0] + else: + self.statement = self.unicode_statement = statement + + self.cursor = self.create_cursor() + return self + + @classmethod + def _init_default(cls, dialect, connection, dbapi_connection): + """Initialize execution context for a ColumnDefault construct.""" + + self = cls.__new__(cls) + self.root_connection = connection + self._dbapi_connection = dbapi_connection + self.dialect = connection.dialect + self.execution_options = connection._execution_options + self.cursor = self.create_cursor() + return self + + @util.memoized_property + def engine(self): + return self.root_connection.engine + + @util.memoized_property + def postfetch_cols(self): + return self.compiled.postfetch + + @util.memoized_property + def prefetch_cols(self): + if self.isinsert: + return self.compiled.insert_prefetch + elif self.isupdate: + return self.compiled.update_prefetch + else: + return () + + @util.memoized_property + def returning_cols(self): + self.compiled.returning + + @util.memoized_property + def no_parameters(self): + return self.execution_options.get("no_parameters", False) + + @util.memoized_property + def should_autocommit(self): + autocommit = self.execution_options.get('autocommit', + not self.compiled and + self.statement and + expression.PARSE_AUTOCOMMIT + or False) + + if autocommit is expression.PARSE_AUTOCOMMIT: + return self.should_autocommit_text(self.unicode_statement) + else: + return autocommit + + def _execute_scalar(self, stmt, type_): + """Execute a string statement on the current cursor, returning a + scalar result. + + Used to fire off sequences, default phrases, and "select lastrowid" + types of statements individually or in the context of a parent INSERT + or UPDATE statement. + + """ + + conn = self.root_connection + if isinstance(stmt, util.text_type) and \ + not self.dialect.supports_unicode_statements: + stmt = self.dialect._encoder(stmt)[0] + + if self.dialect.positional: + default_params = self.dialect.execute_sequence_format() + else: + default_params = {} + + conn._cursor_execute(self.cursor, stmt, default_params, context=self) + r = self.cursor.fetchone()[0] + if type_ is not None: + # apply type post processors to the result + proc = type_._cached_result_processor( + self.dialect, + self.cursor.description[0][1] + ) + if proc: + return proc(r) + return r + + @property + def connection(self): + return self.root_connection._branch() + + def should_autocommit_text(self, statement): + return AUTOCOMMIT_REGEXP.match(statement) + + def _use_server_side_cursor(self): + if not self.dialect.supports_server_side_cursors: + return False + + if self.dialect.server_side_cursors: + use_server_side = \ + self.execution_options.get('stream_results', True) and ( + (self.compiled and isinstance(self.compiled.statement, + expression.Selectable) + or + ( + (not self.compiled or + isinstance(self.compiled.statement, + expression.TextClause)) + and self.statement and SERVER_SIDE_CURSOR_RE.match( + self.statement)) + ) + ) + else: + use_server_side = \ + self.execution_options.get('stream_results', False) + + return use_server_side + + def create_cursor(self): + if self._use_server_side_cursor(): + self._is_server_side = True + return self.create_server_side_cursor() + else: + self._is_server_side = False + return self._dbapi_connection.cursor() + + def create_server_side_cursor(self): + raise NotImplementedError() + + def pre_exec(self): + pass + + def post_exec(self): + pass + + def get_result_processor(self, type_, colname, coltype): + """Return a 'result processor' for a given type as present in + cursor.description. + + This has a default implementation that dialects can override + for context-sensitive result type handling. + + """ + return type_._cached_result_processor(self.dialect, coltype) + + def get_lastrowid(self): + """return self.cursor.lastrowid, or equivalent, after an INSERT. + + This may involve calling special cursor functions, + issuing a new SELECT on the cursor (or a new one), + or returning a stored value that was + calculated within post_exec(). + + This function will only be called for dialects + which support "implicit" primary key generation, + keep preexecute_autoincrement_sequences set to False, + and when no explicit id value was bound to the + statement. + + The function is called once, directly after + post_exec() and before the transaction is committed + or ResultProxy is generated. If the post_exec() + method assigns a value to `self._lastrowid`, the + value is used in place of calling get_lastrowid(). + + Note that this method is *not* equivalent to the + ``lastrowid`` method on ``ResultProxy``, which is a + direct proxy to the DBAPI ``lastrowid`` accessor + in all cases. + + """ + return self.cursor.lastrowid + + def handle_dbapi_exception(self, e): + pass + + def get_result_proxy(self): + if self._is_server_side: + return result.BufferedRowResultProxy(self) + else: + return result.ResultProxy(self) + + @property + def rowcount(self): + return self.cursor.rowcount + + def supports_sane_rowcount(self): + return self.dialect.supports_sane_rowcount + + def supports_sane_multi_rowcount(self): + return self.dialect.supports_sane_multi_rowcount + + def _setup_crud_result_proxy(self): + if self.isinsert and \ + not self.executemany: + if not self._is_implicit_returning and \ + not self.compiled.inline and \ + self.dialect.postfetch_lastrowid: + + self._setup_ins_pk_from_lastrowid() + + elif not self._is_implicit_returning: + self._setup_ins_pk_from_empty() + + result = self.get_result_proxy() + + if self.isinsert: + if self._is_implicit_returning: + row = result.fetchone() + self.returned_defaults = row + self._setup_ins_pk_from_implicit_returning(row) + result._soft_close() + result._metadata = None + elif not self._is_explicit_returning: + result._soft_close() + result._metadata = None + elif self.isupdate and self._is_implicit_returning: + row = result.fetchone() + self.returned_defaults = row + result._soft_close() + result._metadata = None + + elif result._metadata is None: + # no results, get rowcount + # (which requires open cursor on some drivers + # such as kintersbasdb, mxodbc) + result.rowcount + result._soft_close() + return result + + def _setup_ins_pk_from_lastrowid(self): + key_getter = self.compiled._key_getters_for_crud_column[2] + table = self.compiled.statement.table + compiled_params = self.compiled_parameters[0] + + lastrowid = self.get_lastrowid() + if lastrowid is not None: + autoinc_col = table._autoincrement_column + if autoinc_col is not None: + # apply type post processors to the lastrowid + proc = autoinc_col.type._cached_result_processor( + self.dialect, None) + if proc is not None: + lastrowid = proc(lastrowid) + self.inserted_primary_key = [ + lastrowid if c is autoinc_col else + compiled_params.get(key_getter(c), None) + for c in table.primary_key + ] + else: + # don't have a usable lastrowid, so + # do the same as _setup_ins_pk_from_empty + self.inserted_primary_key = [ + compiled_params.get(key_getter(c), None) + for c in table.primary_key + ] + + def _setup_ins_pk_from_empty(self): + key_getter = self.compiled._key_getters_for_crud_column[2] + table = self.compiled.statement.table + compiled_params = self.compiled_parameters[0] + self.inserted_primary_key = [ + compiled_params.get(key_getter(c), None) + for c in table.primary_key + ] + + def _setup_ins_pk_from_implicit_returning(self, row): + if row is None: + self.inserted_primary_key = None + return + + key_getter = self.compiled._key_getters_for_crud_column[2] + table = self.compiled.statement.table + compiled_params = self.compiled_parameters[0] + self.inserted_primary_key = [ + row[col] if value is None else value + for col, value in [ + (col, compiled_params.get(key_getter(col), None)) + for col in table.primary_key + ] + ] + + def lastrow_has_defaults(self): + return (self.isinsert or self.isupdate) and \ + bool(self.compiled.postfetch) + + def set_input_sizes(self, translate=None, exclude_types=None): + """Given a cursor and ClauseParameters, call the appropriate + style of ``setinputsizes()`` on the cursor, using DB-API types + from the bind parameter's ``TypeEngine`` objects. + + This method only called by those dialects which require it, + currently cx_oracle. + + """ + + if not hasattr(self.compiled, 'bind_names'): + return + + types = dict( + (self.compiled.bind_names[bindparam], bindparam.type) + for bindparam in self.compiled.bind_names) + + if self.dialect.positional: + inputsizes = [] + for key in self.compiled.positiontup: + typeengine = types[key] + dbtype = typeengine.dialect_impl(self.dialect).\ + get_dbapi_type(self.dialect.dbapi) + if dbtype is not None and \ + (not exclude_types or dbtype not in exclude_types): + inputsizes.append(dbtype) + try: + self.cursor.setinputsizes(*inputsizes) + except BaseException as e: + self.root_connection._handle_dbapi_exception( + e, None, None, None, self) + else: + inputsizes = {} + for key in self.compiled.bind_names.values(): + typeengine = types[key] + dbtype = typeengine.dialect_impl(self.dialect).\ + get_dbapi_type(self.dialect.dbapi) + if dbtype is not None and \ + (not exclude_types or dbtype not in exclude_types): + if translate: + key = translate.get(key, key) + if not self.dialect.supports_unicode_binds: + key = self.dialect._encoder(key)[0] + inputsizes[key] = dbtype + try: + self.cursor.setinputsizes(**inputsizes) + except BaseException as e: + self.root_connection._handle_dbapi_exception( + e, None, None, None, self) + + def _exec_default(self, default, type_): + if default.is_sequence: + return self.fire_sequence(default, type_) + elif default.is_callable: + return default.arg(self) + elif default.is_clause_element: + # TODO: expensive branching here should be + # pulled into _exec_scalar() + conn = self.connection + c = expression.select([default.arg]).compile(bind=conn) + return conn._execute_compiled(c, (), {}).scalar() + else: + return default.arg + + def get_insert_default(self, column): + if column.default is None: + return None + else: + return self._exec_default(column.default, column.type) + + def get_update_default(self, column): + if column.onupdate is None: + return None + else: + return self._exec_default(column.onupdate, column.type) + + def _process_executemany_defaults(self): + key_getter = self.compiled._key_getters_for_crud_column[2] + + scalar_defaults = {} + + insert_prefetch = self.compiled.insert_prefetch + update_prefetch = self.compiled.update_prefetch + + # pre-determine scalar Python-side defaults + # to avoid many calls of get_insert_default()/ + # get_update_default() + for c in insert_prefetch: + if c.default and c.default.is_scalar: + scalar_defaults[c] = c.default.arg + for c in update_prefetch: + if c.onupdate and c.onupdate.is_scalar: + scalar_defaults[c] = c.onupdate.arg + + for param in self.compiled_parameters: + self.current_parameters = param + for c in insert_prefetch: + if c in scalar_defaults: + val = scalar_defaults[c] + else: + val = self.get_insert_default(c) + if val is not None: + param[key_getter(c)] = val + for c in update_prefetch: + if c in scalar_defaults: + val = scalar_defaults[c] + else: + val = self.get_update_default(c) + if val is not None: + param[key_getter(c)] = val + + del self.current_parameters + + def _process_executesingle_defaults(self): + key_getter = self.compiled._key_getters_for_crud_column[2] + self.current_parameters = compiled_parameters = \ + self.compiled_parameters[0] + + for c in self.compiled.insert_prefetch: + if c.default and \ + not c.default.is_sequence and c.default.is_scalar: + val = c.default.arg + else: + val = self.get_insert_default(c) + + if val is not None: + compiled_parameters[key_getter(c)] = val + + for c in self.compiled.update_prefetch: + val = self.get_update_default(c) + + if val is not None: + compiled_parameters[key_getter(c)] = val + del self.current_parameters + + +DefaultDialect.execution_ctx_cls = DefaultExecutionContext diff --git a/app/lib/sqlalchemy/engine/interfaces.py b/app/lib/sqlalchemy/engine/interfaces.py new file mode 100644 index 0000000..d0eff1c --- /dev/null +++ b/app/lib/sqlalchemy/engine/interfaces.py @@ -0,0 +1,1286 @@ +# engine/interfaces.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Define core interfaces used by the engine system.""" + +from .. import util + +# backwards compat +from ..sql.compiler import Compiled, TypeCompiler + + +class Dialect(object): + """Define the behavior of a specific database and DB-API combination. + + Any aspect of metadata definition, SQL query generation, + execution, result-set handling, or anything else which varies + between databases is defined under the general category of the + Dialect. The Dialect acts as a factory for other + database-specific object implementations including + ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. + + All Dialects implement the following attributes: + + name + identifying name for the dialect from a DBAPI-neutral point of view + (i.e. 'sqlite') + + driver + identifying name for the dialect's DBAPI + + positional + True if the paramstyle for this Dialect is positional. + + paramstyle + the paramstyle to be used (some DB-APIs support multiple + paramstyles). + + convert_unicode + True if Unicode conversion should be applied to all ``str`` + types. + + encoding + type of encoding to use for unicode, usually defaults to + 'utf-8'. + + statement_compiler + a :class:`.Compiled` class used to compile SQL statements + + ddl_compiler + a :class:`.Compiled` class used to compile DDL statements + + server_version_info + a tuple containing a version number for the DB backend in use. + This value is only available for supporting dialects, and is + typically populated during the initial connection to the database. + + default_schema_name + the name of the default schema. This value is only available for + supporting dialects, and is typically populated during the + initial connection to the database. + + execution_ctx_cls + a :class:`.ExecutionContext` class used to handle statement execution + + execute_sequence_format + either the 'tuple' or 'list' type, depending on what cursor.execute() + accepts for the second argument (they vary). + + preparer + a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to + quote identifiers. + + supports_alter + ``True`` if the database supports ``ALTER TABLE``. + + max_identifier_length + The maximum length of identifier names. + + supports_unicode_statements + Indicate whether the DB-API can receive SQL statements as Python + unicode strings + + supports_unicode_binds + Indicate whether the DB-API can receive string bind parameters + as Python unicode strings + + supports_sane_rowcount + Indicate whether the dialect properly implements rowcount for + ``UPDATE`` and ``DELETE`` statements. + + supports_sane_multi_rowcount + Indicate whether the dialect properly implements rowcount for + ``UPDATE`` and ``DELETE`` statements when executed via + executemany. + + preexecute_autoincrement_sequences + True if 'implicit' primary key functions must be executed separately + in order to get their value. This is currently oriented towards + PostgreSQL. + + implicit_returning + use RETURNING or equivalent during INSERT execution in order to load + newly generated primary keys and other column defaults in one execution, + which are then available via inserted_primary_key. + If an insert statement has returning() specified explicitly, + the "implicit" functionality is not used and inserted_primary_key + will not be available. + + dbapi_type_map + A mapping of DB-API type objects present in this Dialect's + DB-API implementation mapped to TypeEngine implementations used + by the dialect. + + This is used to apply types to result sets based on the DB-API + types present in cursor.description; it only takes effect for + result sets against textual statements where no explicit + typemap was present. + + colspecs + A dictionary of TypeEngine classes from sqlalchemy.types mapped + to subclasses that are specific to the dialect class. This + dictionary is class-level only and is not accessed from the + dialect instance itself. + + supports_default_values + Indicates if the construct ``INSERT INTO tablename DEFAULT + VALUES`` is supported + + supports_sequences + Indicates if the dialect supports CREATE SEQUENCE or similar. + + sequences_optional + If True, indicates if the "optional" flag on the Sequence() construct + should signal to not generate a CREATE SEQUENCE. Applies only to + dialects that support sequences. Currently used only to allow PostgreSQL + SERIAL to be used on a column that specifies Sequence() for usage on + other backends. + + supports_native_enum + Indicates if the dialect supports a native ENUM construct. + This will prevent types.Enum from generating a CHECK + constraint when that type is used. + + supports_native_boolean + Indicates if the dialect supports a native boolean construct. + This will prevent types.Boolean from generating a CHECK + constraint when that type is used. + + dbapi_exception_translation_map + A dictionary of names that will contain as values the names of + pep-249 exceptions ("IntegrityError", "OperationalError", etc) + keyed to alternate class names, to support the case where a + DBAPI has exception classes that aren't named as they are + referred to (e.g. IntegrityError = MyException). In the vast + majority of cases this dictionary is empty. + + .. versionadded:: 1.0.5 + + """ + + _has_events = False + + def create_connect_args(self, url): + """Build DB-API compatible connection arguments. + + Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple + consisting of a `*args`/`**kwargs` suitable to send directly + to the dbapi's connect function. + + """ + + raise NotImplementedError() + + @classmethod + def type_descriptor(cls, typeobj): + """Transform a generic type to a dialect-specific type. + + Dialect classes will usually use the + :func:`.types.adapt_type` function in the types module to + accomplish this. + + The returned result is cached *per dialect class* so can + contain no dialect-instance state. + + """ + + raise NotImplementedError() + + def initialize(self, connection): + """Called during strategized creation of the dialect with a + connection. + + Allows dialects to configure options based on server version info or + other properties. + + The connection passed here is a SQLAlchemy Connection object, + with full capabilities. + + The initialize() method of the base dialect should be called via + super(). + + """ + + pass + + def reflecttable( + self, connection, table, include_columns, exclude_columns): + """Load table description from the database. + + Given a :class:`.Connection` and a + :class:`~sqlalchemy.schema.Table` object, reflect its columns and + properties from the database. + + The implementation of this method is provided by + :meth:`.DefaultDialect.reflecttable`, which makes use of + :class:`.Inspector` to retrieve column information. + + Dialects should **not** seek to implement this method, and should + instead implement individual schema inspection operations such as + :meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`, + etc. + + """ + + raise NotImplementedError() + + def get_columns(self, connection, table_name, schema=None, **kw): + """Return information about columns in `table_name`. + + Given a :class:`.Connection`, a string + `table_name`, and an optional string `schema`, return column + information as a list of dictionaries with these keys: + + name + the column's name + + type + [sqlalchemy.types#TypeEngine] + + nullable + boolean + + default + the column's default value + + autoincrement + boolean + + sequence + a dictionary of the form + {'name' : str, 'start' :int, 'increment': int, 'minvalue': int, + 'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool, + 'cycle': bool} + + Additional column attributes may be present. + """ + + raise NotImplementedError() + + def get_primary_keys(self, connection, table_name, schema=None, **kw): + """Return information about primary keys in `table_name`. + + + Deprecated. This method is only called by the default + implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should + instead implement the :meth:`.Dialect.get_pk_constraint` method + directly. + + """ + + raise NotImplementedError() + + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + """Return information about the primary key constraint on + table_name`. + + Given a :class:`.Connection`, a string + `table_name`, and an optional string `schema`, return primary + key information as a dictionary with these keys: + + constrained_columns + a list of column names that make up the primary key + + name + optional name of the primary key constraint. + + """ + raise NotImplementedError() + + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + """Return information about foreign_keys in `table_name`. + + Given a :class:`.Connection`, a string + `table_name`, and an optional string `schema`, return foreign + key information as a list of dicts with these keys: + + name + the constraint's name + + constrained_columns + a list of column names that make up the foreign key + + referred_schema + the name of the referred schema + + referred_table + the name of the referred table + + referred_columns + a list of column names in the referred table that correspond to + constrained_columns + """ + + raise NotImplementedError() + + def get_table_names(self, connection, schema=None, **kw): + """Return a list of table names for `schema`.""" + + raise NotImplementedError() + + def get_temp_table_names(self, connection, schema=None, **kw): + """Return a list of temporary table names on the given connection, + if supported by the underlying backend. + + """ + + raise NotImplementedError() + + def get_view_names(self, connection, schema=None, **kw): + """Return a list of all view names available in the database. + + schema: + Optional, retrieve names from a non-default schema. + """ + + raise NotImplementedError() + + def get_temp_view_names(self, connection, schema=None, **kw): + """Return a list of temporary view names on the given connection, + if supported by the underlying backend. + + """ + + raise NotImplementedError() + + def get_view_definition(self, connection, view_name, schema=None, **kw): + """Return view definition. + + Given a :class:`.Connection`, a string + `view_name`, and an optional string `schema`, return the view + definition. + """ + + raise NotImplementedError() + + def get_indexes(self, connection, table_name, schema=None, **kw): + """Return information about indexes in `table_name`. + + Given a :class:`.Connection`, a string + `table_name` and an optional string `schema`, return index + information as a list of dictionaries with these keys: + + name + the index's name + + column_names + list of column names in order + + unique + boolean + """ + + raise NotImplementedError() + + def get_unique_constraints( + self, connection, table_name, schema=None, **kw): + r"""Return information about unique constraints in `table_name`. + + Given a string `table_name` and an optional string `schema`, return + unique constraint information as a list of dicts with these keys: + + name + the unique constraint's name + + column_names + list of column names in order + + \**kw + other options passed to the dialect's get_unique_constraints() + method. + + .. versionadded:: 0.9.0 + + """ + + raise NotImplementedError() + + def get_check_constraints( + self, connection, table_name, schema=None, **kw): + r"""Return information about check constraints in `table_name`. + + Given a string `table_name` and an optional string `schema`, return + check constraint information as a list of dicts with these keys: + + name + the check constraint's name + + sqltext + the check constraint's SQL expression + + \**kw + other options passed to the dialect's get_check_constraints() + method. + + .. versionadded:: 1.1.0 + + """ + + raise NotImplementedError() + + def normalize_name(self, name): + """convert the given name to lowercase if it is detected as + case insensitive. + + this method is only used if the dialect defines + requires_name_normalize=True. + + """ + raise NotImplementedError() + + def denormalize_name(self, name): + """convert the given name to a case insensitive identifier + for the backend if it is an all-lowercase name. + + this method is only used if the dialect defines + requires_name_normalize=True. + + """ + raise NotImplementedError() + + def has_table(self, connection, table_name, schema=None): + """Check the existence of a particular table in the database. + + Given a :class:`.Connection` object and a string + `table_name`, return True if the given table (possibly within + the specified `schema`) exists in the database, False + otherwise. + """ + + raise NotImplementedError() + + def has_sequence(self, connection, sequence_name, schema=None): + """Check the existence of a particular sequence in the database. + + Given a :class:`.Connection` object and a string + `sequence_name`, return True if the given sequence exists in + the database, False otherwise. + """ + + raise NotImplementedError() + + def _get_server_version_info(self, connection): + """Retrieve the server version info from the given connection. + + This is used by the default implementation to populate the + "server_version_info" attribute and is called exactly + once upon first connect. + + """ + + raise NotImplementedError() + + def _get_default_schema_name(self, connection): + """Return the string name of the currently selected schema from + the given connection. + + This is used by the default implementation to populate the + "default_schema_name" attribute and is called exactly + once upon first connect. + + """ + + raise NotImplementedError() + + def do_begin(self, dbapi_connection): + """Provide an implementation of ``connection.begin()``, given a + DB-API connection. + + The DBAPI has no dedicated "begin" method and it is expected + that transactions are implicit. This hook is provided for those + DBAPIs that might need additional help in this area. + + Note that :meth:`.Dialect.do_begin` is not called unless a + :class:`.Transaction` object is in use. The + :meth:`.Dialect.do_autocommit` + hook is provided for DBAPIs that need some extra commands emitted + after a commit in order to enter the next transaction, when the + SQLAlchemy :class:`.Connection` is used in its default "autocommit" + mode. + + :param dbapi_connection: a DBAPI connection, typically + proxied within a :class:`.ConnectionFairy`. + + """ + + raise NotImplementedError() + + def do_rollback(self, dbapi_connection): + """Provide an implementation of ``connection.rollback()``, given + a DB-API connection. + + :param dbapi_connection: a DBAPI connection, typically + proxied within a :class:`.ConnectionFairy`. + + """ + + raise NotImplementedError() + + def do_commit(self, dbapi_connection): + """Provide an implementation of ``connection.commit()``, given a + DB-API connection. + + :param dbapi_connection: a DBAPI connection, typically + proxied within a :class:`.ConnectionFairy`. + + """ + + raise NotImplementedError() + + def do_close(self, dbapi_connection): + """Provide an implementation of ``connection.close()``, given a DBAPI + connection. + + This hook is called by the :class:`.Pool` when a connection has been + detached from the pool, or is being returned beyond the normal + capacity of the pool. + + .. versionadded:: 0.8 + + """ + + raise NotImplementedError() + + def create_xid(self): + """Create a two-phase transaction ID. + + This id will be passed to do_begin_twophase(), + do_rollback_twophase(), do_commit_twophase(). Its format is + unspecified. + """ + + raise NotImplementedError() + + def do_savepoint(self, connection, name): + """Create a savepoint with the given name. + + :param connection: a :class:`.Connection`. + :param name: savepoint name. + + """ + + raise NotImplementedError() + + def do_rollback_to_savepoint(self, connection, name): + """Rollback a connection to the named savepoint. + + :param connection: a :class:`.Connection`. + :param name: savepoint name. + + """ + + raise NotImplementedError() + + def do_release_savepoint(self, connection, name): + """Release the named savepoint on a connection. + + :param connection: a :class:`.Connection`. + :param name: savepoint name. + """ + + raise NotImplementedError() + + def do_begin_twophase(self, connection, xid): + """Begin a two phase transaction on the given connection. + + :param connection: a :class:`.Connection`. + :param xid: xid + + """ + + raise NotImplementedError() + + def do_prepare_twophase(self, connection, xid): + """Prepare a two phase transaction on the given connection. + + :param connection: a :class:`.Connection`. + :param xid: xid + + """ + + raise NotImplementedError() + + def do_rollback_twophase(self, connection, xid, is_prepared=True, + recover=False): + """Rollback a two phase transaction on the given connection. + + :param connection: a :class:`.Connection`. + :param xid: xid + :param is_prepared: whether or not + :meth:`.TwoPhaseTransaction.prepare` was called. + :param recover: if the recover flag was passed. + + """ + + raise NotImplementedError() + + def do_commit_twophase(self, connection, xid, is_prepared=True, + recover=False): + """Commit a two phase transaction on the given connection. + + + :param connection: a :class:`.Connection`. + :param xid: xid + :param is_prepared: whether or not + :meth:`.TwoPhaseTransaction.prepare` was called. + :param recover: if the recover flag was passed. + + """ + + raise NotImplementedError() + + def do_recover_twophase(self, connection): + """Recover list of uncommitted prepared two phase transaction + identifiers on the given connection. + + :param connection: a :class:`.Connection`. + + """ + + raise NotImplementedError() + + def do_executemany(self, cursor, statement, parameters, context=None): + """Provide an implementation of ``cursor.executemany(statement, + parameters)``.""" + + raise NotImplementedError() + + def do_execute(self, cursor, statement, parameters, context=None): + """Provide an implementation of ``cursor.execute(statement, + parameters)``.""" + + raise NotImplementedError() + + def do_execute_no_params(self, cursor, statement, parameters, + context=None): + """Provide an implementation of ``cursor.execute(statement)``. + + The parameter collection should not be sent. + + """ + + raise NotImplementedError() + + def is_disconnect(self, e, connection, cursor): + """Return True if the given DB-API error indicates an invalid + connection""" + + raise NotImplementedError() + + def connect(self): + """return a callable which sets up a newly created DBAPI connection. + + The callable accepts a single argument "conn" which is the + DBAPI connection itself. It has no return value. + + This is used to set dialect-wide per-connection options such as + isolation modes, unicode modes, etc. + + If a callable is returned, it will be assembled into a pool listener + that receives the direct DBAPI connection, with all wrappers removed. + + If None is returned, no listener will be generated. + + """ + return None + + def reset_isolation_level(self, dbapi_conn): + """Given a DBAPI connection, revert its isolation to the default. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`.Connection` and + :class:`.Engine` + isolation level facilities; these APIs should be preferred for + most typical use cases. + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`.Connection` isolation level + + :paramref:`.create_engine.isolation_level` - + set per :class:`.Engine` isolation level + + """ + + raise NotImplementedError() + + def set_isolation_level(self, dbapi_conn, level): + """Given a DBAPI connection, set its isolation level. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`.Connection` and + :class:`.Engine` + isolation level facilities; these APIs should be preferred for + most typical use cases. + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`.Connection` isolation level + + :paramref:`.create_engine.isolation_level` - + set per :class:`.Engine` isolation level + + """ + + raise NotImplementedError() + + def get_isolation_level(self, dbapi_conn): + """Given a DBAPI connection, return its isolation level. + + When working with a :class:`.Connection` object, the corresponding + DBAPI connection may be procured using the + :attr:`.Connection.connection` accessor. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`.Connection` and + :class:`.Engine` isolation level facilities; + these APIs should be preferred for most typical use cases. + + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`.Connection` isolation level + + :paramref:`.create_engine.isolation_level` - + set per :class:`.Engine` isolation level + + + """ + + raise NotImplementedError() + + @classmethod + def get_dialect_cls(cls, url): + """Given a URL, return the :class:`.Dialect` that will be used. + + This is a hook that allows an external plugin to provide functionality + around an existing dialect, by allowing the plugin to be loaded + from the url based on an entrypoint, and then the plugin returns + the actual dialect to be used. + + By default this just returns the cls. + + .. versionadded:: 1.0.3 + + """ + return cls + + @classmethod + def engine_created(cls, engine): + """A convenience hook called before returning the final :class:`.Engine`. + + If the dialect returned a different class from the + :meth:`.get_dialect_cls` + method, then the hook is called on both classes, first on + the dialect class returned by the :meth:`.get_dialect_cls` method and + then on the class on which the method was called. + + The hook should be used by dialects and/or wrappers to apply special + events to the engine or its components. In particular, it allows + a dialect-wrapping class to apply dialect-level events. + + .. versionadded:: 1.0.3 + + """ + pass + + +class CreateEnginePlugin(object): + """A set of hooks intended to augment the construction of an + :class:`.Engine` object based on entrypoint names in a URL. + + The purpose of :class:`.CreateEnginePlugin` is to allow third-party + systems to apply engine, pool and dialect level event listeners without + the need for the target application to be modified; instead, the plugin + names can be added to the database URL. Target applications for + :class:`.CreateEnginePlugin` include: + + * connection and SQL performance tools, e.g. which use events to track + number of checkouts and/or time spent with statements + + * connectivity plugins such as proxies + + Plugins are registered using entry points in a similar way as that + of dialects:: + + entry_points={ + 'sqlalchemy.plugins': [ + 'myplugin = myapp.plugins:MyPlugin' + ] + + A plugin that uses the above names would be invoked from a database + URL as in:: + + from sqlalchemy import create_engine + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/test?plugin=myplugin") + + The ``plugin`` argument supports multiple instances, so that a URL + may specify multiple plugins; they are loaded in the order stated + in the URL:: + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/" + "test?plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three") + + A plugin can receive additional arguments from the URL string as + well as from the keyword arguments passed to :func:`.create_engine`. + The :class:`.URL` object and the keyword dictionary are passed to the + constructor so that these arguments can be extracted from the url's + :attr:`.URL.query` collection as well as from the dictionary:: + + class MyPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.my_argument_one = url.query.pop('my_argument_one') + self.my_argument_two = url.query.pop('my_argument_two') + self.my_argument_three = kwargs.pop('my_argument_three', None) + + Arguments like those illustrated above would be consumed from the + following:: + + from sqlalchemy import create_engine + + engine = create_engine( + "mysql+pymysql://scott:tiger@localhost/" + "test?plugin=myplugin&my_argument_one=foo&my_argument_two=bar", + my_argument_three='bat') + + The URL and dictionary are used for subsequent setup of the engine + as they are, so the plugin can modify their arguments in-place. + Arguments that are only understood by the plugin should be popped + or otherwise removed so that they aren't interpreted as erroneous + arguments afterwards. + + When the engine creation process completes and produces the + :class:`.Engine` object, it is again passed to the plugin via the + :meth:`.CreateEnginePlugin.engine_created` hook. In this hook, additional + changes can be made to the engine, most typically involving setup of + events (e.g. those defined in :ref:`core_event_toplevel`). + + .. versionadded:: 1.1 + + """ + def __init__(self, url, kwargs): + """Contruct a new :class:`.CreateEnginePlugin`. + + The plugin object is instantiated individually for each call + to :func:`.create_engine`. A single :class:`.Engine` will be + passed to the :meth:`.CreateEnginePlugin.engine_created` method + corresponding to this URL. + + :param url: the :class:`.URL` object. The plugin should inspect + what it needs here as well as remove its custom arguments from the + :attr:`.URL.query` collection. The URL can be modified in-place + in any other way as well. + :param kwargs: The keyword arguments passed to :func`.create_engine`. + The plugin can read and modify this dictionary in-place, to affect + the ultimate arguments used to create the engine. It should + remove its custom arguments from the dictionary as well. + + """ + self.url = url + + def handle_dialect_kwargs(self, dialect_cls, dialect_args): + """parse and modify dialect kwargs""" + + def handle_pool_kwargs(self, pool_cls, pool_args): + """parse and modify pool kwargs""" + + def engine_created(self, engine): + """Receive the :class:`.Engine` object when it is fully constructed. + + The plugin may make additional changes to the engine, such as + registering engine or connection pool events. + + """ + + +class ExecutionContext(object): + """A messenger object for a Dialect that corresponds to a single + execution. + + ExecutionContext should have these data members: + + connection + Connection object which can be freely used by default value + generators to execute SQL. This Connection should reference the + same underlying connection/transactional resources of + root_connection. + + root_connection + Connection object which is the source of this ExecutionContext. This + Connection may have close_with_result=True set, in which case it can + only be used once. + + dialect + dialect which created this ExecutionContext. + + cursor + DB-API cursor procured from the connection, + + compiled + if passed to constructor, sqlalchemy.engine.base.Compiled object + being executed, + + statement + string version of the statement to be executed. Is either + passed to the constructor, or must be created from the + sql.Compiled object by the time pre_exec() has completed. + + parameters + bind parameters passed to the execute() method. For compiled + statements, this is a dictionary or list of dictionaries. For + textual statements, it should be in a format suitable for the + dialect's paramstyle (i.e. dict or list of dicts for non + positional, list or list of lists/tuples for positional). + + isinsert + True if the statement is an INSERT. + + isupdate + True if the statement is an UPDATE. + + should_autocommit + True if the statement is a "committable" statement. + + prefetch_cols + a list of Column objects for which a client-side default + was fired off. Applies to inserts and updates. + + postfetch_cols + a list of Column objects for which a server-side default or + inline SQL expression value was fired off. Applies to inserts + and updates. + """ + + exception = None + """A DBAPI-level exception that was caught when this ExecutionContext + attempted to execute a statement. + + This attribute is meaningful only within the + :meth:`.ConnectionEvents.dbapi_error` event. + + .. versionadded:: 0.9.7 + + .. seealso:: + + :attr:`.ExecutionContext.is_disconnect` + + :meth:`.ConnectionEvents.dbapi_error` + + """ + + is_disconnect = None + """Boolean flag set to True or False when a DBAPI-level exception + is caught when this ExecutionContext attempted to execute a statement. + + This attribute is meaningful only within the + :meth:`.ConnectionEvents.dbapi_error` event. + + .. versionadded:: 0.9.7 + + .. seealso:: + + :attr:`.ExecutionContext.exception` + + :meth:`.ConnectionEvents.dbapi_error` + + """ + + def create_cursor(self): + """Return a new cursor generated from this ExecutionContext's + connection. + + Some dialects may wish to change the behavior of + connection.cursor(), such as postgresql which may return a PG + "server side" cursor. + """ + + raise NotImplementedError() + + def pre_exec(self): + """Called before an execution of a compiled statement. + + If a compiled statement was passed to this ExecutionContext, + the `statement` and `parameters` datamembers must be + initialized after this statement is complete. + """ + + raise NotImplementedError() + + def post_exec(self): + """Called after the execution of a compiled statement. + + If a compiled statement was passed to this ExecutionContext, + the `last_insert_ids`, `last_inserted_params`, etc. + datamembers should be available after this method completes. + """ + + raise NotImplementedError() + + def result(self): + """Return a result object corresponding to this ExecutionContext. + + Returns a ResultProxy. + """ + + raise NotImplementedError() + + def handle_dbapi_exception(self, e): + """Receive a DBAPI exception which occurred upon execute, result + fetch, etc.""" + + raise NotImplementedError() + + def should_autocommit_text(self, statement): + """Parse the given textual statement and return True if it refers to + a "committable" statement""" + + raise NotImplementedError() + + def lastrow_has_defaults(self): + """Return True if the last INSERT or UPDATE row contained + inlined or database-side defaults. + """ + + raise NotImplementedError() + + def get_rowcount(self): + """Return the DBAPI ``cursor.rowcount`` value, or in some + cases an interpreted value. + + See :attr:`.ResultProxy.rowcount` for details on this. + + """ + + raise NotImplementedError() + + +class Connectable(object): + """Interface for an object which supports execution of SQL constructs. + + The two implementations of :class:`.Connectable` are + :class:`.Connection` and :class:`.Engine`. + + Connectable must also implement the 'dialect' member which references a + :class:`.Dialect` instance. + + """ + + def connect(self, **kwargs): + """Return a :class:`.Connection` object. + + Depending on context, this may be ``self`` if this object + is already an instance of :class:`.Connection`, or a newly + procured :class:`.Connection` if this object is an instance + of :class:`.Engine`. + + """ + + def contextual_connect(self): + """Return a :class:`.Connection` object which may be part of an ongoing + context. + + Depending on context, this may be ``self`` if this object + is already an instance of :class:`.Connection`, or a newly + procured :class:`.Connection` if this object is an instance + of :class:`.Engine`. + + """ + + raise NotImplementedError() + + @util.deprecated("0.7", + "Use the create() method on the given schema " + "object directly, i.e. :meth:`.Table.create`, " + ":meth:`.Index.create`, :meth:`.MetaData.create_all`") + def create(self, entity, **kwargs): + """Emit CREATE statements for the given schema entity. + """ + + raise NotImplementedError() + + @util.deprecated("0.7", + "Use the drop() method on the given schema " + "object directly, i.e. :meth:`.Table.drop`, " + ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") + def drop(self, entity, **kwargs): + """Emit DROP statements for the given schema entity. + """ + + raise NotImplementedError() + + def execute(self, object, *multiparams, **params): + """Executes the given construct and returns a :class:`.ResultProxy`.""" + raise NotImplementedError() + + def scalar(self, object, *multiparams, **params): + """Executes and returns the first column of the first row. + + The underlying cursor is closed after execution. + """ + raise NotImplementedError() + + def _run_visitor(self, visitorcallable, element, + **kwargs): + raise NotImplementedError() + + def _execute_clauseelement(self, elem, multiparams=None, params=None): + raise NotImplementedError() + + +class ExceptionContext(object): + """Encapsulate information about an error condition in progress. + + This object exists solely to be passed to the + :meth:`.ConnectionEvents.handle_error` event, supporting an interface that + can be extended without backwards-incompatibility. + + .. versionadded:: 0.9.7 + + """ + + connection = None + """The :class:`.Connection` in use during the exception. + + This member is present, except in the case of a failure when + first connecting. + + .. seealso:: + + :attr:`.ExceptionContext.engine` + + + """ + + engine = None + """The :class:`.Engine` in use during the exception. + + This member should always be present, even in the case of a failure + when first connecting. + + .. versionadded:: 1.0.0 + + """ + + cursor = None + """The DBAPI cursor object. + + May be None. + + """ + + statement = None + """String SQL statement that was emitted directly to the DBAPI. + + May be None. + + """ + + parameters = None + """Parameter collection that was emitted directly to the DBAPI. + + May be None. + + """ + + original_exception = None + """The exception object which was caught. + + This member is always present. + + """ + + sqlalchemy_exception = None + """The :class:`sqlalchemy.exc.StatementError` which wraps the original, + and will be raised if exception handling is not circumvented by the event. + + May be None, as not all exception types are wrapped by SQLAlchemy. + For DBAPI-level exceptions that subclass the dbapi's Error class, this + field will always be present. + + """ + + chained_exception = None + """The exception that was returned by the previous handler in the + exception chain, if any. + + If present, this exception will be the one ultimately raised by + SQLAlchemy unless a subsequent handler replaces it. + + May be None. + + """ + + execution_context = None + """The :class:`.ExecutionContext` corresponding to the execution + operation in progress. + + This is present for statement execution operations, but not for + operations such as transaction begin/end. It also is not present when + the exception was raised before the :class:`.ExecutionContext` + could be constructed. + + Note that the :attr:`.ExceptionContext.statement` and + :attr:`.ExceptionContext.parameters` members may represent a + different value than that of the :class:`.ExecutionContext`, + potentially in the case where a + :meth:`.ConnectionEvents.before_cursor_execute` event or similar + modified the statement/parameters to be sent. + + May be None. + + """ + + is_disconnect = None + """Represent whether the exception as occurred represents a "disconnect" + condition. + + This flag will always be True or False within the scope of the + :meth:`.ConnectionEvents.handle_error` handler. + + SQLAlchemy will defer to this flag in order to determine whether or not + the connection should be invalidated subsequently. That is, by + assigning to this flag, a "disconnect" event which then results in + a connection and pool invalidation can be invoked or prevented by + changing this flag. + + """ + + invalidate_pool_on_disconnect = True + """Represent whether all connections in the pool should be invalidated + when a "disconnect" condition is in effect. + + Setting this flag to False within the scope of the + :meth:`.ConnectionEvents.handle_error` event will have the effect such + that the full collection of connections in the pool will not be + invalidated during a disconnect; only the current connection that is the + subject of the error will actually be invalidated. + + The purpose of this flag is for custom disconnect-handling schemes where + the invalidation of other connections in the pool is to be performed + based on other conditions, or even on a per-connection basis. + + .. versionadded:: 1.0.3 + + """ diff --git a/app/lib/sqlalchemy/engine/reflection.py b/app/lib/sqlalchemy/engine/reflection.py new file mode 100644 index 0000000..dfa81f4 --- /dev/null +++ b/app/lib/sqlalchemy/engine/reflection.py @@ -0,0 +1,843 @@ +# engine/reflection.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Provides an abstraction for obtaining database schema information. + +Usage Notes: + +Here are some general conventions when accessing the low level inspector +methods such as get_table_names, get_columns, etc. + +1. Inspector methods return lists of dicts in most cases for the following + reasons: + + * They're both standard types that can be serialized. + * Using a dict instead of a tuple allows easy expansion of attributes. + * Using a list for the outer structure maintains order and is easy to work + with (e.g. list comprehension [d['name'] for d in cols]). + +2. Records that contain a name, such as the column name in a column record + use the key 'name'. So for most return values, each record will have a + 'name' attribute.. +""" + +from .. import exc, sql +from ..sql import schema as sa_schema +from .. import util +from ..sql.type_api import TypeEngine +from ..util import deprecated +from ..util import topological +from .. import inspection +from .base import Connectable + + +@util.decorator +def cache(fn, self, con, *args, **kw): + info_cache = kw.get('info_cache', None) + if info_cache is None: + return fn(self, con, *args, **kw) + key = ( + fn.__name__, + tuple(a for a in args if isinstance(a, util.string_types)), + tuple((k, v) for k, v in kw.items() if + isinstance(v, + util.string_types + util.int_types + (float, ) + ) + ) + ) + ret = info_cache.get(key) + if ret is None: + ret = fn(self, con, *args, **kw) + info_cache[key] = ret + return ret + + +class Inspector(object): + """Performs database schema inspection. + + The Inspector acts as a proxy to the reflection methods of the + :class:`~sqlalchemy.engine.interfaces.Dialect`, providing a + consistent interface as well as caching support for previously + fetched metadata. + + A :class:`.Inspector` object is usually created via the + :func:`.inspect` function:: + + from sqlalchemy import inspect, create_engine + engine = create_engine('...') + insp = inspect(engine) + + The inspection method above is equivalent to using the + :meth:`.Inspector.from_engine` method, i.e.:: + + engine = create_engine('...') + insp = Inspector.from_engine(engine) + + Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` may opt + to return an :class:`.Inspector` subclass that provides additional + methods specific to the dialect's target database. + + """ + + def __init__(self, bind): + """Initialize a new :class:`.Inspector`. + + :param bind: a :class:`~sqlalchemy.engine.Connectable`, + which is typically an instance of + :class:`~sqlalchemy.engine.Engine` or + :class:`~sqlalchemy.engine.Connection`. + + For a dialect-specific instance of :class:`.Inspector`, see + :meth:`.Inspector.from_engine` + + """ + # this might not be a connection, it could be an engine. + self.bind = bind + + # set the engine + if hasattr(bind, 'engine'): + self.engine = bind.engine + else: + self.engine = bind + + if self.engine is bind: + # if engine, ensure initialized + bind.connect().close() + + self.dialect = self.engine.dialect + self.info_cache = {} + + @classmethod + def from_engine(cls, bind): + """Construct a new dialect-specific Inspector object from the given + engine or connection. + + :param bind: a :class:`~sqlalchemy.engine.Connectable`, + which is typically an instance of + :class:`~sqlalchemy.engine.Engine` or + :class:`~sqlalchemy.engine.Connection`. + + This method differs from direct a direct constructor call of + :class:`.Inspector` in that the + :class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to + provide a dialect-specific :class:`.Inspector` instance, which may + provide additional methods. + + See the example at :class:`.Inspector`. + + """ + if hasattr(bind.dialect, 'inspector'): + return bind.dialect.inspector(bind) + return Inspector(bind) + + @inspection._inspects(Connectable) + def _insp(bind): + return Inspector.from_engine(bind) + + @property + def default_schema_name(self): + """Return the default schema name presented by the dialect + for the current engine's database user. + + E.g. this is typically ``public`` for PostgreSQL and ``dbo`` + for SQL Server. + + """ + return self.dialect.default_schema_name + + def get_schema_names(self): + """Return all schema names. + """ + + if hasattr(self.dialect, 'get_schema_names'): + return self.dialect.get_schema_names(self.bind, + info_cache=self.info_cache) + return [] + + def get_table_names(self, schema=None, order_by=None): + """Return all table names in referred to within a particular schema. + + The names are expected to be real tables only, not views. + Views are instead returned using the :meth:`.Inspector.get_view_names` + method. + + + :param schema: Schema name. If ``schema`` is left at ``None``, the + database's default schema is + used, else the named schema is searched. If the database does not + support named schemas, behavior is undefined if ``schema`` is not + passed as ``None``. For special quoting, use :class:`.quoted_name`. + + :param order_by: Optional, may be the string "foreign_key" to sort + the result on foreign key dependencies. Does not automatically + resolve cycles, and will raise :class:`.CircularDependencyError` + if cycles exist. + + .. deprecated:: 1.0.0 - see + :meth:`.Inspector.get_sorted_table_and_fkc_names` for a version + of this which resolves foreign key cycles between tables + automatically. + + .. versionchanged:: 0.8 the "foreign_key" sorting sorts tables + in order of dependee to dependent; that is, in creation + order, rather than in drop order. This is to maintain + consistency with similar features such as + :attr:`.MetaData.sorted_tables` and :func:`.util.sort_tables`. + + .. seealso:: + + :meth:`.Inspector.get_sorted_table_and_fkc_names` + + :attr:`.MetaData.sorted_tables` + + """ + + if hasattr(self.dialect, 'get_table_names'): + tnames = self.dialect.get_table_names( + self.bind, schema, info_cache=self.info_cache) + else: + tnames = self.engine.table_names(schema) + if order_by == 'foreign_key': + tuples = [] + for tname in tnames: + for fkey in self.get_foreign_keys(tname, schema): + if tname != fkey['referred_table']: + tuples.append((fkey['referred_table'], tname)) + tnames = list(topological.sort(tuples, tnames)) + return tnames + + def get_sorted_table_and_fkc_names(self, schema=None): + """Return dependency-sorted table and foreign key constraint names in + referred to within a particular schema. + + This will yield 2-tuples of + ``(tablename, [(tname, fkname), (tname, fkname), ...])`` + consisting of table names in CREATE order grouped with the foreign key + constraint names that are not detected as belonging to a cycle. + The final element + will be ``(None, [(tname, fkname), (tname, fkname), ..])`` + which will consist of remaining + foreign key constraint names that would require a separate CREATE + step after-the-fact, based on dependencies between tables. + + .. versionadded:: 1.0.- + + .. seealso:: + + :meth:`.Inspector.get_table_names` + + :func:`.sort_tables_and_constraints` - similar method which works + with an already-given :class:`.MetaData`. + + """ + if hasattr(self.dialect, 'get_table_names'): + tnames = self.dialect.get_table_names( + self.bind, schema, info_cache=self.info_cache) + else: + tnames = self.engine.table_names(schema) + + tuples = set() + remaining_fkcs = set() + + fknames_for_table = {} + for tname in tnames: + fkeys = self.get_foreign_keys(tname, schema) + fknames_for_table[tname] = set( + [fk['name'] for fk in fkeys] + ) + for fkey in fkeys: + if tname != fkey['referred_table']: + tuples.add((fkey['referred_table'], tname)) + try: + candidate_sort = list(topological.sort(tuples, tnames)) + except exc.CircularDependencyError as err: + for edge in err.edges: + tuples.remove(edge) + remaining_fkcs.update( + (edge[1], fkc) + for fkc in fknames_for_table[edge[1]] + ) + + candidate_sort = list(topological.sort(tuples, tnames)) + return [ + (tname, fknames_for_table[tname].difference(remaining_fkcs)) + for tname in candidate_sort + ] + [(None, list(remaining_fkcs))] + + def get_temp_table_names(self): + """return a list of temporary table names for the current bind. + + This method is unsupported by most dialects; currently + only SQLite implements it. + + .. versionadded:: 1.0.0 + + """ + return self.dialect.get_temp_table_names( + self.bind, info_cache=self.info_cache) + + def get_temp_view_names(self): + """return a list of temporary view names for the current bind. + + This method is unsupported by most dialects; currently + only SQLite implements it. + + .. versionadded:: 1.0.0 + + """ + return self.dialect.get_temp_view_names( + self.bind, info_cache=self.info_cache) + + def get_table_options(self, table_name, schema=None, **kw): + """Return a dictionary of options specified when the table of the + given name was created. + + This currently includes some options that apply to MySQL tables. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + """ + if hasattr(self.dialect, 'get_table_options'): + return self.dialect.get_table_options( + self.bind, table_name, schema, + info_cache=self.info_cache, **kw) + return {} + + def get_view_names(self, schema=None): + """Return all view names in `schema`. + + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + + """ + + return self.dialect.get_view_names(self.bind, schema, + info_cache=self.info_cache) + + def get_view_definition(self, view_name, schema=None): + """Return definition for `view_name`. + + :param schema: Optional, retrieve names from a non-default schema. + For special quoting, use :class:`.quoted_name`. + + """ + + return self.dialect.get_view_definition( + self.bind, view_name, schema, info_cache=self.info_cache) + + def get_columns(self, table_name, schema=None, **kw): + """Return information about columns in `table_name`. + + Given a string `table_name` and an optional string `schema`, return + column information as a list of dicts with these keys: + + * ``name`` - the column's name + + * ``type`` - the type of this column; an instance of + :class:`~sqlalchemy.types.TypeEngine` + + * ``nullable`` - boolean flag if the column is NULL or NOT NULL + + * ``default`` - the column's server default value - this is returned + as a string SQL expression. + + * ``attrs`` - dict containing optional column attributes + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + :return: list of dictionaries, each representing the definition of + a database column. + + """ + + col_defs = self.dialect.get_columns(self.bind, table_name, schema, + info_cache=self.info_cache, + **kw) + for col_def in col_defs: + # make this easy and only return instances for coltype + coltype = col_def['type'] + if not isinstance(coltype, TypeEngine): + col_def['type'] = coltype() + return col_defs + + @deprecated('0.7', 'Call to deprecated method get_primary_keys.' + ' Use get_pk_constraint instead.') + def get_primary_keys(self, table_name, schema=None, **kw): + """Return information about primary keys in `table_name`. + + Given a string `table_name`, and an optional string `schema`, return + primary key information as a list of column names. + """ + + return self.dialect.get_pk_constraint(self.bind, table_name, schema, + info_cache=self.info_cache, + **kw)['constrained_columns'] + + def get_pk_constraint(self, table_name, schema=None, **kw): + """Return information about primary key constraint on `table_name`. + + Given a string `table_name`, and an optional string `schema`, return + primary key information as a dictionary with these keys: + + constrained_columns + a list of column names that make up the primary key + + name + optional name of the primary key constraint. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + """ + return self.dialect.get_pk_constraint(self.bind, table_name, schema, + info_cache=self.info_cache, + **kw) + + def get_foreign_keys(self, table_name, schema=None, **kw): + """Return information about foreign_keys in `table_name`. + + Given a string `table_name`, and an optional string `schema`, return + foreign key information as a list of dicts with these keys: + + constrained_columns + a list of column names that make up the foreign key + + referred_schema + the name of the referred schema + + referred_table + the name of the referred table + + referred_columns + a list of column names in the referred table that correspond to + constrained_columns + + name + optional name of the foreign key constraint. + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + """ + + return self.dialect.get_foreign_keys(self.bind, table_name, schema, + info_cache=self.info_cache, + **kw) + + def get_indexes(self, table_name, schema=None, **kw): + """Return information about indexes in `table_name`. + + Given a string `table_name` and an optional string `schema`, return + index information as a list of dicts with these keys: + + name + the index's name + + column_names + list of column names in order + + unique + boolean + + dialect_options + dict of dialect-specific index options. May not be present + for all dialects. + + .. versionadded:: 1.0.0 + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + """ + + return self.dialect.get_indexes(self.bind, table_name, + schema, + info_cache=self.info_cache, **kw) + + def get_unique_constraints(self, table_name, schema=None, **kw): + """Return information about unique constraints in `table_name`. + + Given a string `table_name` and an optional string `schema`, return + unique constraint information as a list of dicts with these keys: + + name + the unique constraint's name + + column_names + list of column names in order + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + .. versionadded:: 0.8.4 + + """ + + return self.dialect.get_unique_constraints( + self.bind, table_name, schema, info_cache=self.info_cache, **kw) + + def get_check_constraints(self, table_name, schema=None, **kw): + """Return information about check constraints in `table_name`. + + Given a string `table_name` and an optional string `schema`, return + check constraint information as a list of dicts with these keys: + + name + the check constraint's name + + sqltext + the check constraint's SQL expression + + :param table_name: string name of the table. For special quoting, + use :class:`.quoted_name`. + + :param schema: string schema name; if omitted, uses the default schema + of the database connection. For special quoting, + use :class:`.quoted_name`. + + .. versionadded:: 1.1.0 + + """ + + return self.dialect.get_check_constraints( + self.bind, table_name, schema, info_cache=self.info_cache, **kw) + + def reflecttable(self, table, include_columns, exclude_columns=(), + _extend_on=None): + """Given a Table object, load its internal constructs based on + introspection. + + This is the underlying method used by most dialects to produce + table reflection. Direct usage is like:: + + from sqlalchemy import create_engine, MetaData, Table + from sqlalchemy.engine import reflection + + engine = create_engine('...') + meta = MetaData() + user_table = Table('user', meta) + insp = Inspector.from_engine(engine) + insp.reflecttable(user_table, None) + + :param table: a :class:`~sqlalchemy.schema.Table` instance. + :param include_columns: a list of string column names to include + in the reflection process. If ``None``, all columns are reflected. + + """ + + if _extend_on is not None: + if table in _extend_on: + return + else: + _extend_on.add(table) + + dialect = self.bind.dialect + + schema = self.bind.schema_for_object(table) + + table_name = table.name + + # get table-level arguments that are specifically + # intended for reflection, e.g. oracle_resolve_synonyms. + # these are unconditionally passed to related Table + # objects + reflection_options = dict( + (k, table.dialect_kwargs.get(k)) + for k in dialect.reflection_options + if k in table.dialect_kwargs + ) + + # reflect table options, like mysql_engine + tbl_opts = self.get_table_options( + table_name, schema, **table.dialect_kwargs) + if tbl_opts: + # add additional kwargs to the Table if the dialect + # returned them + table._validate_dialect_kwargs(tbl_opts) + + if util.py2k: + if isinstance(schema, str): + schema = schema.decode(dialect.encoding) + if isinstance(table_name, str): + table_name = table_name.decode(dialect.encoding) + + found_table = False + cols_by_orig_name = {} + + for col_d in self.get_columns( + table_name, schema, **table.dialect_kwargs): + found_table = True + + self._reflect_column( + table, col_d, include_columns, + exclude_columns, cols_by_orig_name) + + if not found_table: + raise exc.NoSuchTableError(table.name) + + self._reflect_pk( + table_name, schema, table, cols_by_orig_name, exclude_columns) + + self._reflect_fk( + table_name, schema, table, cols_by_orig_name, + exclude_columns, _extend_on, reflection_options) + + self._reflect_indexes( + table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options) + + self._reflect_unique_constraints( + table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options) + + self._reflect_check_constraints( + table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options) + + def _reflect_column( + self, table, col_d, include_columns, + exclude_columns, cols_by_orig_name): + + orig_name = col_d['name'] + + table.dispatch.column_reflect(self, table, col_d) + + # fetch name again as column_reflect is allowed to + # change it + name = col_d['name'] + if (include_columns and name not in include_columns) \ + or (exclude_columns and name in exclude_columns): + return + + coltype = col_d['type'] + + col_kw = dict( + (k, col_d[k]) + for k in ['nullable', 'autoincrement', 'quote', 'info', 'key'] + if k in col_d + ) + + colargs = [] + if col_d.get('default') is not None: + default = col_d['default'] + if isinstance(default, sql.elements.TextClause): + default = sa_schema.DefaultClause(default, _reflected=True) + elif not isinstance(default, sa_schema.FetchedValue): + default = sa_schema.DefaultClause( + sql.text(col_d['default']), _reflected=True) + + colargs.append(default) + + if 'sequence' in col_d: + self._reflect_col_sequence(col_d, colargs) + + cols_by_orig_name[orig_name] = col = \ + sa_schema.Column(name, coltype, *colargs, **col_kw) + + if col.key in table.primary_key: + col.primary_key = True + table.append_column(col) + + def _reflect_col_sequence(self, col_d, colargs): + if 'sequence' in col_d: + # TODO: mssql and sybase are using this. + seq = col_d['sequence'] + sequence = sa_schema.Sequence(seq['name'], 1, 1) + if 'start' in seq: + sequence.start = seq['start'] + if 'increment' in seq: + sequence.increment = seq['increment'] + colargs.append(sequence) + + def _reflect_pk( + self, table_name, schema, table, + cols_by_orig_name, exclude_columns): + pk_cons = self.get_pk_constraint( + table_name, schema, **table.dialect_kwargs) + if pk_cons: + pk_cols = [ + cols_by_orig_name[pk] + for pk in pk_cons['constrained_columns'] + if pk in cols_by_orig_name and pk not in exclude_columns + ] + + # update pk constraint name + table.primary_key.name = pk_cons.get('name') + + # tell the PKConstraint to re-initialize + # its column collection + table.primary_key._reload(pk_cols) + + def _reflect_fk( + self, table_name, schema, table, cols_by_orig_name, + exclude_columns, _extend_on, reflection_options): + fkeys = self.get_foreign_keys( + table_name, schema, **table.dialect_kwargs) + for fkey_d in fkeys: + conname = fkey_d['name'] + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + constrained_columns = [ + cols_by_orig_name[c].key + if c in cols_by_orig_name else c + for c in fkey_d['constrained_columns'] + ] + if exclude_columns and set(constrained_columns).intersection( + exclude_columns): + continue + referred_schema = fkey_d['referred_schema'] + referred_table = fkey_d['referred_table'] + referred_columns = fkey_d['referred_columns'] + refspec = [] + if referred_schema is not None: + sa_schema.Table(referred_table, table.metadata, + autoload=True, schema=referred_schema, + autoload_with=self.bind, + _extend_on=_extend_on, + **reflection_options + ) + for column in referred_columns: + refspec.append(".".join( + [referred_schema, referred_table, column])) + else: + sa_schema.Table(referred_table, table.metadata, autoload=True, + autoload_with=self.bind, + schema=sa_schema.BLANK_SCHEMA, + _extend_on=_extend_on, + **reflection_options + ) + for column in referred_columns: + refspec.append(".".join([referred_table, column])) + if 'options' in fkey_d: + options = fkey_d['options'] + else: + options = {} + table.append_constraint( + sa_schema.ForeignKeyConstraint(constrained_columns, refspec, + conname, link_to_name=True, + **options)) + + def _reflect_indexes( + self, table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options): + # Indexes + indexes = self.get_indexes(table_name, schema) + for index_d in indexes: + name = index_d['name'] + columns = index_d['column_names'] + unique = index_d['unique'] + flavor = index_d.get('type', 'index') + dialect_options = index_d.get('dialect_options', {}) + + duplicates = index_d.get('duplicates_constraint') + if include_columns and \ + not set(columns).issubset(include_columns): + util.warn( + "Omitting %s key for (%s), key covers omitted columns." % + (flavor, ', '.join(columns))) + continue + if duplicates: + continue + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + idx_cols = [] + for c in columns: + try: + idx_col = cols_by_orig_name[c] \ + if c in cols_by_orig_name else table.c[c] + except KeyError: + util.warn( + "%s key '%s' was not located in " + "columns for table '%s'" % ( + flavor, c, table_name + )) + else: + idx_cols.append(idx_col) + + sa_schema.Index( + name, *idx_cols, + **dict(list(dialect_options.items()) + [('unique', unique)]) + ) + + def _reflect_unique_constraints( + self, table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options): + + # Unique Constraints + try: + constraints = self.get_unique_constraints(table_name, schema) + except NotImplementedError: + # optional dialect feature + return + + for const_d in constraints: + conname = const_d['name'] + columns = const_d['column_names'] + duplicates = const_d.get('duplicates_index') + if include_columns and \ + not set(columns).issubset(include_columns): + util.warn( + "Omitting unique constraint key for (%s), " + "key covers omitted columns." % + ', '.join(columns)) + continue + if duplicates: + continue + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + constrained_cols = [] + for c in columns: + try: + constrained_col = cols_by_orig_name[c] \ + if c in cols_by_orig_name else table.c[c] + except KeyError: + util.warn( + "unique constraint key '%s' was not located in " + "columns for table '%s'" % (c, table_name)) + else: + constrained_cols.append(constrained_col) + table.append_constraint( + sa_schema.UniqueConstraint(*constrained_cols, name=conname)) + + def _reflect_check_constraints( + self, table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options): + try: + constraints = self.get_check_constraints(table_name, schema) + except NotImplementedError: + # optional dialect feature + return + + for const_d in constraints: + table.append_constraint( + sa_schema.CheckConstraint(**const_d)) diff --git a/app/lib/sqlalchemy/engine/result.py b/app/lib/sqlalchemy/engine/result.py new file mode 100644 index 0000000..907dc7b --- /dev/null +++ b/app/lib/sqlalchemy/engine/result.py @@ -0,0 +1,1435 @@ +# engine/result.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Define result set constructs including :class:`.ResultProxy` +and :class:`.RowProxy.""" + + +from .. import exc, util +from ..sql import expression, sqltypes, util as sql_util +import collections +import operator + +# This reconstructor is necessary so that pickles with the C extension or +# without use the same Binary format. +try: + # We need a different reconstructor on the C extension so that we can + # add extra checks that fields have correctly been initialized by + # __setstate__. + from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor + + # The extra function embedding is needed so that the + # reconstructor function has the same signature whether or not + # the extension is present. + def rowproxy_reconstructor(cls, state): + return safe_rowproxy_reconstructor(cls, state) +except ImportError: + def rowproxy_reconstructor(cls, state): + obj = cls.__new__(cls) + obj.__setstate__(state) + return obj + +try: + from sqlalchemy.cresultproxy import BaseRowProxy + _baserowproxy_usecext = True +except ImportError: + _baserowproxy_usecext = False + + class BaseRowProxy(object): + __slots__ = ('_parent', '_row', '_processors', '_keymap') + + def __init__(self, parent, row, processors, keymap): + """RowProxy objects are constructed by ResultProxy objects.""" + + self._parent = parent + self._row = row + self._processors = processors + self._keymap = keymap + + def __reduce__(self): + return (rowproxy_reconstructor, + (self.__class__, self.__getstate__())) + + def values(self): + """Return the values represented by this RowProxy as a list.""" + return list(self) + + def __iter__(self): + for processor, value in zip(self._processors, self._row): + if processor is None: + yield value + else: + yield processor(value) + + def __len__(self): + return len(self._row) + + def __getitem__(self, key): + try: + processor, obj, index = self._keymap[key] + except KeyError: + processor, obj, index = self._parent._key_fallback(key) + except TypeError: + if isinstance(key, slice): + l = [] + for processor, value in zip(self._processors[key], + self._row[key]): + if processor is None: + l.append(value) + else: + l.append(processor(value)) + return tuple(l) + else: + raise + if index is None: + raise exc.InvalidRequestError( + "Ambiguous column name '%s' in " + "result set column descriptions" % obj) + if processor is not None: + return processor(self._row[index]) + else: + return self._row[index] + + def __getattr__(self, name): + try: + return self[name] + except KeyError as e: + raise AttributeError(e.args[0]) + + +class RowProxy(BaseRowProxy): + """Proxy values from a single cursor row. + + Mostly follows "ordered dictionary" behavior, mapping result + values to the string-based column name, the integer position of + the result in the row, as well as Column instances which can be + mapped to the original Columns that produced this result set (for + results that correspond to constructed SQL expressions). + """ + __slots__ = () + + def __contains__(self, key): + return self._parent._has_key(key) + + def __getstate__(self): + return { + '_parent': self._parent, + '_row': tuple(self) + } + + def __setstate__(self, state): + self._parent = parent = state['_parent'] + self._row = state['_row'] + self._processors = parent._processors + self._keymap = parent._keymap + + __hash__ = None + + def _op(self, other, op): + return op(tuple(self), tuple(other)) \ + if isinstance(other, RowProxy) \ + else op(tuple(self), other) + + def __lt__(self, other): + return self._op(other, operator.lt) + + def __le__(self, other): + return self._op(other, operator.le) + + def __ge__(self, other): + return self._op(other, operator.ge) + + def __gt__(self, other): + return self._op(other, operator.gt) + + def __eq__(self, other): + return self._op(other, operator.eq) + + def __ne__(self, other): + return self._op(other, operator.ne) + + def __repr__(self): + return repr(sql_util._repr_row(self)) + + def has_key(self, key): + """Return True if this RowProxy contains the given key.""" + + return self._parent._has_key(key) + + def items(self): + """Return a list of tuples, each tuple containing a key/value pair.""" + # TODO: no coverage here + return [(key, self[key]) for key in self.keys()] + + def keys(self): + """Return the list of keys as strings represented by this RowProxy.""" + + return self._parent.keys + + def iterkeys(self): + return iter(self._parent.keys) + + def itervalues(self): + return iter(self) + +try: + # Register RowProxy with Sequence, + # so sequence protocol is implemented + from collections import Sequence + Sequence.register(RowProxy) +except ImportError: + pass + + +class ResultMetaData(object): + """Handle cursor.description, applying additional info from an execution + context.""" + + __slots__ = ( + '_keymap', 'case_sensitive', 'matched_on_name', + '_processors', 'keys', '_orig_processors') + + def __init__(self, parent, cursor_description): + context = parent.context + dialect = context.dialect + self.case_sensitive = dialect.case_sensitive + self.matched_on_name = False + self._orig_processors = None + + if context.result_column_struct: + result_columns, cols_are_ordered, textual_ordered = \ + context.result_column_struct + num_ctx_cols = len(result_columns) + else: + result_columns = cols_are_ordered = \ + num_ctx_cols = textual_ordered = False + + # merge cursor.description with the column info + # present in the compiled structure, if any + raw = self._merge_cursor_description( + context, cursor_description, result_columns, + num_ctx_cols, cols_are_ordered, textual_ordered) + + self._keymap = {} + if not _baserowproxy_usecext: + # keymap indexes by integer index: this is only used + # in the pure Python BaseRowProxy.__getitem__ + # implementation to avoid an expensive + # isinstance(key, util.int_types) in the most common + # case path + + len_raw = len(raw) + + self._keymap.update([ + (elem[0], (elem[3], elem[4], elem[0])) + for elem in raw + ] + [ + (elem[0] - len_raw, (elem[3], elem[4], elem[0])) + for elem in raw + ]) + + # processors in key order for certain per-row + # views like __iter__ and slices + self._processors = [elem[3] for elem in raw] + + # keymap by primary string... + by_key = dict([ + (elem[2], (elem[3], elem[4], elem[0])) + for elem in raw + ]) + + # for compiled SQL constructs, copy additional lookup keys into + # the key lookup map, such as Column objects, labels, + # column keys and other names + if num_ctx_cols: + + # if by-primary-string dictionary smaller (or bigger?!) than + # number of columns, assume we have dupes, rewrite + # dupe records with "None" for index which results in + # ambiguous column exception when accessed. + if len(by_key) != num_ctx_cols: + seen = set() + for rec in raw: + key = rec[1] + if key in seen: + # this is an "ambiguous" element, replacing + # the full record in the map + key = key.lower() if not self.case_sensitive else key + by_key[key] = (None, key, None) + seen.add(key) + + # copy secondary elements from compiled columns + # into self._keymap, write in the potentially "ambiguous" + # element + self._keymap.update([ + (obj_elem, by_key[elem[2]]) + for elem in raw if elem[4] + for obj_elem in elem[4] + ]) + + # if we did a pure positional match, then reset the + # original "expression element" back to the "unambiguous" + # entry. This is a new behavior in 1.1 which impacts + # TextAsFrom but also straight compiled SQL constructs. + if not self.matched_on_name: + self._keymap.update([ + (elem[4][0], (elem[3], elem[4], elem[0])) + for elem in raw if elem[4] + ]) + else: + # no dupes - copy secondary elements from compiled + # columns into self._keymap + self._keymap.update([ + (obj_elem, (elem[3], elem[4], elem[0])) + for elem in raw if elem[4] + for obj_elem in elem[4] + ]) + + # update keymap with primary string names taking + # precedence + self._keymap.update(by_key) + + # update keymap with "translated" names (sqlite-only thing) + if not num_ctx_cols and context._translate_colname: + self._keymap.update([ + (elem[5], self._keymap[elem[2]]) + for elem in raw if elem[5] + ]) + + def _merge_cursor_description( + self, context, cursor_description, result_columns, + num_ctx_cols, cols_are_ordered, textual_ordered): + """Merge a cursor.description with compiled result column information. + + There are at least four separate strategies used here, selected + depending on the type of SQL construct used to start with. + + The most common case is that of the compiled SQL expression construct, + which generated the column names present in the raw SQL string and + which has the identical number of columns as were reported by + cursor.description. In this case, we assume a 1-1 positional mapping + between the entries in cursor.description and the compiled object. + This is also the most performant case as we disregard extracting / + decoding the column names present in cursor.description since we + already have the desired name we generated in the compiled SQL + construct. + + The next common case is that of the completely raw string SQL, + such as passed to connection.execute(). In this case we have no + compiled construct to work with, so we extract and decode the + names from cursor.description and index those as the primary + result row target keys. + + The remaining fairly common case is that of the textual SQL + that includes at least partial column information; this is when + we use a :class:`.TextAsFrom` construct. This contruct may have + unordered or ordered column information. In the ordered case, we + merge the cursor.description and the compiled construct's information + positionally, and warn if there are additional description names + present, however we still decode the names in cursor.description + as we don't have a guarantee that the names in the columns match + on these. In the unordered case, we match names in cursor.description + to that of the compiled construct based on name matching. + In both of these cases, the cursor.description names and the column + expression objects and names are indexed as result row target keys. + + The final case is much less common, where we have a compiled + non-textual SQL expression construct, but the number of columns + in cursor.description doesn't match what's in the compiled + construct. We make the guess here that there might be textual + column expressions in the compiled construct that themselves include + a comma in them causing them to split. We do the same name-matching + as with textual non-ordered columns. + + The name-matched system of merging is the same as that used by + SQLAlchemy for all cases up through te 0.9 series. Positional + matching for compiled SQL expressions was introduced in 1.0 as a + major performance feature, and positional matching for textual + :class:`.TextAsFrom` objects in 1.1. As name matching is no longer + a common case, it was acceptable to factor it into smaller generator- + oriented methods that are easier to understand, but incur slightly + more performance overhead. + + """ + + case_sensitive = context.dialect.case_sensitive + + if num_ctx_cols and \ + cols_are_ordered and \ + not textual_ordered and \ + num_ctx_cols == len(cursor_description): + self.keys = [elem[0] for elem in result_columns] + # pure positional 1-1 case; doesn't need to read + # the names from cursor.description + return [ + ( + idx, + key, + name.lower() if not case_sensitive else name, + context.get_result_processor( + type_, key, cursor_description[idx][1] + ), + obj, + None + ) for idx, (key, name, obj, type_) + in enumerate(result_columns) + ] + else: + # name-based or text-positional cases, where we need + # to read cursor.description names + if textual_ordered: + # textual positional case + raw_iterator = self._merge_textual_cols_by_position( + context, cursor_description, result_columns) + elif num_ctx_cols: + # compiled SQL with a mismatch of description cols + # vs. compiled cols, or textual w/ unordered columns + raw_iterator = self._merge_cols_by_name( + context, cursor_description, result_columns) + else: + # no compiled SQL, just a raw string + raw_iterator = self._merge_cols_by_none( + context, cursor_description) + + return [ + ( + idx, colname, colname, + context.get_result_processor( + mapped_type, colname, coltype), + obj, untranslated) + + for idx, colname, mapped_type, coltype, obj, untranslated + in raw_iterator + ] + + def _colnames_from_description(self, context, cursor_description): + """Extract column names and data types from a cursor.description. + + Applies unicode decoding, column translation, "normalization", + and case sensitivity rules to the names based on the dialect. + + """ + + dialect = context.dialect + case_sensitive = dialect.case_sensitive + translate_colname = context._translate_colname + description_decoder = dialect._description_decoder \ + if dialect.description_encoding else None + normalize_name = dialect.normalize_name \ + if dialect.requires_name_normalize else None + untranslated = None + + self.keys = [] + + for idx, rec in enumerate(cursor_description): + colname = rec[0] + coltype = rec[1] + + if description_decoder: + colname = description_decoder(colname) + + if translate_colname: + colname, untranslated = translate_colname(colname) + + if normalize_name: + colname = normalize_name(colname) + + self.keys.append(colname) + if not case_sensitive: + colname = colname.lower() + + yield idx, colname, untranslated, coltype + + def _merge_textual_cols_by_position( + self, context, cursor_description, result_columns): + dialect = context.dialect + typemap = dialect.dbapi_type_map + num_ctx_cols = len(result_columns) if result_columns else None + + if num_ctx_cols > len(cursor_description): + util.warn( + "Number of columns in textual SQL (%d) is " + "smaller than number of columns requested (%d)" % ( + num_ctx_cols, len(cursor_description) + )) + + seen = set() + for idx, colname, untranslated, coltype in \ + self._colnames_from_description(context, cursor_description): + if idx < num_ctx_cols: + ctx_rec = result_columns[idx] + obj = ctx_rec[2] + mapped_type = ctx_rec[3] + if obj[0] in seen: + raise exc.InvalidRequestError( + "Duplicate column expression requested " + "in textual SQL: %r" % obj[0]) + seen.add(obj[0]) + else: + mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) + obj = None + + yield idx, colname, mapped_type, coltype, obj, untranslated + + def _merge_cols_by_name(self, context, cursor_description, result_columns): + dialect = context.dialect + typemap = dialect.dbapi_type_map + case_sensitive = dialect.case_sensitive + result_map = self._create_result_map(result_columns, case_sensitive) + + self.matched_on_name = True + for idx, colname, untranslated, coltype in \ + self._colnames_from_description(context, cursor_description): + try: + ctx_rec = result_map[colname] + except KeyError: + mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) + obj = None + else: + obj = ctx_rec[1] + mapped_type = ctx_rec[2] + yield idx, colname, mapped_type, coltype, obj, untranslated + + def _merge_cols_by_none(self, context, cursor_description): + dialect = context.dialect + typemap = dialect.dbapi_type_map + for idx, colname, untranslated, coltype in \ + self._colnames_from_description(context, cursor_description): + mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) + yield idx, colname, mapped_type, coltype, None, untranslated + + @classmethod + def _create_result_map(cls, result_columns, case_sensitive=True): + d = {} + for elem in result_columns: + key, rec = elem[0], elem[1:] + if not case_sensitive: + key = key.lower() + if key in d: + # conflicting keyname, just double up the list + # of objects. this will cause an "ambiguous name" + # error if an attempt is made by the result set to + # access. + e_name, e_obj, e_type = d[key] + d[key] = e_name, e_obj + rec[1], e_type + else: + d[key] = rec + return d + + def _key_fallback(self, key, raiseerr=True): + map = self._keymap + result = None + if isinstance(key, util.string_types): + result = map.get(key if self.case_sensitive else key.lower()) + # fallback for targeting a ColumnElement to a textual expression + # this is a rare use case which only occurs when matching text() + # or colummn('name') constructs to ColumnElements, or after a + # pickle/unpickle roundtrip + elif isinstance(key, expression.ColumnElement): + if key._label and ( + key._label + if self.case_sensitive + else key._label.lower()) in map: + result = map[key._label + if self.case_sensitive + else key._label.lower()] + elif hasattr(key, 'name') and ( + key.name + if self.case_sensitive + else key.name.lower()) in map: + # match is only on name. + result = map[key.name + if self.case_sensitive + else key.name.lower()] + # search extra hard to make sure this + # isn't a column/label name overlap. + # this check isn't currently available if the row + # was unpickled. + if result is not None and \ + result[1] is not None: + for obj in result[1]: + if key._compare_name_for_result(obj): + break + else: + result = None + if result is None: + if raiseerr: + raise exc.NoSuchColumnError( + "Could not locate column in row for column '%s'" % + expression._string_or_unprintable(key)) + else: + return None + else: + map[key] = result + return result + + def _has_key(self, key): + if key in self._keymap: + return True + else: + return self._key_fallback(key, False) is not None + + def _getter(self, key, raiseerr=True): + if key in self._keymap: + processor, obj, index = self._keymap[key] + else: + ret = self._key_fallback(key, raiseerr) + if ret is None: + return None + processor, obj, index = ret + + if index is None: + raise exc.InvalidRequestError( + "Ambiguous column name '%s' in " + "result set column descriptions" % obj) + + return operator.itemgetter(index) + + def __getstate__(self): + return { + '_pickled_keymap': dict( + (key, index) + for key, (processor, obj, index) in self._keymap.items() + if isinstance(key, util.string_types + util.int_types) + ), + 'keys': self.keys, + "case_sensitive": self.case_sensitive, + "matched_on_name": self.matched_on_name + } + + def __setstate__(self, state): + # the row has been processed at pickling time so we don't need any + # processor anymore + self._processors = [None for _ in range(len(state['keys']))] + self._keymap = keymap = {} + for key, index in state['_pickled_keymap'].items(): + # not preserving "obj" here, unfortunately our + # proxy comparison fails with the unpickle + keymap[key] = (None, None, index) + self.keys = state['keys'] + self.case_sensitive = state['case_sensitive'] + self.matched_on_name = state['matched_on_name'] + + +class ResultProxy(object): + """Wraps a DB-API cursor object to provide easier access to row columns. + + Individual columns may be accessed by their integer position, + case-insensitive column name, or by ``schema.Column`` + object. e.g.:: + + row = fetchone() + + col1 = row[0] # access via integer position + + col2 = row['col2'] # access via name + + col3 = row[mytable.c.mycol] # access via Column object. + + ``ResultProxy`` also handles post-processing of result column + data using ``TypeEngine`` objects, which are referenced from + the originating SQL statement that produced this result set. + + """ + + _process_row = RowProxy + out_parameters = None + _autoclose_connection = False + _metadata = None + _soft_closed = False + closed = False + + def __init__(self, context): + self.context = context + self.dialect = context.dialect + self.cursor = self._saved_cursor = context.cursor + self.connection = context.root_connection + self._echo = self.connection._echo and \ + context.engine._should_log_debug() + self._init_metadata() + + def _getter(self, key, raiseerr=True): + try: + getter = self._metadata._getter + except AttributeError: + return self._non_result(None) + else: + return getter(key, raiseerr) + + def _has_key(self, key): + try: + has_key = self._metadata._has_key + except AttributeError: + return self._non_result(None) + else: + return has_key(key) + + def _init_metadata(self): + cursor_description = self._cursor_description() + if cursor_description is not None: + if self.context.compiled and \ + 'compiled_cache' in self.context.execution_options: + if self.context.compiled._cached_metadata: + self._metadata = self.context.compiled._cached_metadata + else: + self._metadata = self.context.compiled._cached_metadata = \ + ResultMetaData(self, cursor_description) + else: + self._metadata = ResultMetaData(self, cursor_description) + if self._echo: + self.context.engine.logger.debug( + "Col %r", tuple(x[0] for x in cursor_description)) + + def keys(self): + """Return the current set of string keys for rows.""" + if self._metadata: + return self._metadata.keys + else: + return [] + + @util.memoized_property + def rowcount(self): + """Return the 'rowcount' for this result. + + The 'rowcount' reports the number of rows *matched* + by the WHERE criterion of an UPDATE or DELETE statement. + + .. note:: + + Notes regarding :attr:`.ResultProxy.rowcount`: + + + * This attribute returns the number of rows *matched*, + which is not necessarily the same as the number of rows + that were actually *modified* - an UPDATE statement, for example, + may have no net change on a given row if the SET values + given are the same as those present in the row already. + Such a row would be matched but not modified. + On backends that feature both styles, such as MySQL, + rowcount is configured by default to return the match + count in all cases. + + * :attr:`.ResultProxy.rowcount` is *only* useful in conjunction + with an UPDATE or DELETE statement. Contrary to what the Python + DBAPI says, it does *not* return the + number of rows available from the results of a SELECT statement + as DBAPIs cannot support this functionality when rows are + unbuffered. + + * :attr:`.ResultProxy.rowcount` may not be fully implemented by + all dialects. In particular, most DBAPIs do not support an + aggregate rowcount result from an executemany call. + The :meth:`.ResultProxy.supports_sane_rowcount` and + :meth:`.ResultProxy.supports_sane_multi_rowcount` methods + will report from the dialect if each usage is known to be + supported. + + * Statements that use RETURNING may not return a correct + rowcount. + + """ + try: + return self.context.rowcount + except BaseException as e: + self.connection._handle_dbapi_exception( + e, None, None, self.cursor, self.context) + + @property + def lastrowid(self): + """return the 'lastrowid' accessor on the DBAPI cursor. + + This is a DBAPI specific method and is only functional + for those backends which support it, for statements + where it is appropriate. It's behavior is not + consistent across backends. + + Usage of this method is normally unnecessary when + using insert() expression constructs; the + :attr:`~ResultProxy.inserted_primary_key` attribute provides a + tuple of primary key values for a newly inserted row, + regardless of database backend. + + """ + try: + return self._saved_cursor.lastrowid + except BaseException as e: + self.connection._handle_dbapi_exception( + e, None, None, + self._saved_cursor, self.context) + + @property + def returns_rows(self): + """True if this :class:`.ResultProxy` returns rows. + + I.e. if it is legal to call the methods + :meth:`~.ResultProxy.fetchone`, + :meth:`~.ResultProxy.fetchmany` + :meth:`~.ResultProxy.fetchall`. + + """ + return self._metadata is not None + + @property + def is_insert(self): + """True if this :class:`.ResultProxy` is the result + of a executing an expression language compiled + :func:`.expression.insert` construct. + + When True, this implies that the + :attr:`inserted_primary_key` attribute is accessible, + assuming the statement did not include + a user defined "returning" construct. + + """ + return self.context.isinsert + + def _cursor_description(self): + """May be overridden by subclasses.""" + + return self._saved_cursor.description + + def _soft_close(self): + """Soft close this :class:`.ResultProxy`. + + This releases all DBAPI cursor resources, but leaves the + ResultProxy "open" from a semantic perspective, meaning the + fetchXXX() methods will continue to return empty results. + + This method is called automatically when: + + * all result rows are exhausted using the fetchXXX() methods. + * cursor.description is None. + + This method is **not public**, but is documented in order to clarify + the "autoclose" process used. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.ResultProxy.close` + + + """ + if self._soft_closed: + return + self._soft_closed = True + cursor = self.cursor + self.connection._safe_close_cursor(cursor) + if self._autoclose_connection: + self.connection.close() + self.cursor = None + + def close(self): + """Close this ResultProxy. + + This closes out the underlying DBAPI cursor corresonding + to the statement execution, if one is still present. Note that the + DBAPI cursor is automatically released when the :class:`.ResultProxy` + exhausts all available rows. :meth:`.ResultProxy.close` is generally + an optional method except in the case when discarding a + :class:`.ResultProxy` that still has additional rows pending for fetch. + + In the case of a result that is the product of + :ref:`connectionless execution `, + the underlying :class:`.Connection` object is also closed, which + :term:`releases` DBAPI connection resources. + + After this method is called, it is no longer valid to call upon + the fetch methods, which will raise a :class:`.ResourceClosedError` + on subsequent use. + + .. versionchanged:: 1.0.0 - the :meth:`.ResultProxy.close` method + has been separated out from the process that releases the underlying + DBAPI cursor resource. The "auto close" feature of the + :class:`.Connection` now performs a so-called "soft close", which + releases the underlying DBAPI cursor, but allows the + :class:`.ResultProxy` to still behave as an open-but-exhausted + result set; the actual :meth:`.ResultProxy.close` method is never + called. It is still safe to discard a :class:`.ResultProxy` + that has been fully exhausted without calling this method. + + .. seealso:: + + :ref:`connections_toplevel` + + :meth:`.ResultProxy._soft_close` + + """ + + if not self.closed: + self._soft_close() + self.closed = True + + def __iter__(self): + while True: + row = self.fetchone() + if row is None: + return + else: + yield row + + @util.memoized_property + def inserted_primary_key(self): + """Return the primary key for the row just inserted. + + The return value is a list of scalar values + corresponding to the list of primary key columns + in the target table. + + This only applies to single row :func:`.insert` + constructs which did not explicitly specify + :meth:`.Insert.returning`. + + Note that primary key columns which specify a + server_default clause, + or otherwise do not qualify as "autoincrement" + columns (see the notes at :class:`.Column`), and were + generated using the database-side default, will + appear in this list as ``None`` unless the backend + supports "returning" and the insert statement executed + with the "implicit returning" enabled. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() construct. + + """ + + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled " + "expression construct.") + elif not self.context.isinsert: + raise exc.InvalidRequestError( + "Statement is not an insert() " + "expression construct.") + elif self.context._is_explicit_returning: + raise exc.InvalidRequestError( + "Can't call inserted_primary_key " + "when returning() " + "is used.") + + return self.context.inserted_primary_key + + def last_updated_params(self): + """Return the collection of updated parameters from this + execution. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an update() construct. + + """ + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled " + "expression construct.") + elif not self.context.isupdate: + raise exc.InvalidRequestError( + "Statement is not an update() " + "expression construct.") + elif self.context.executemany: + return self.context.compiled_parameters + else: + return self.context.compiled_parameters[0] + + def last_inserted_params(self): + """Return the collection of inserted parameters from this + execution. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() construct. + + """ + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled " + "expression construct.") + elif not self.context.isinsert: + raise exc.InvalidRequestError( + "Statement is not an insert() " + "expression construct.") + elif self.context.executemany: + return self.context.compiled_parameters + else: + return self.context.compiled_parameters[0] + + @property + def returned_defaults(self): + """Return the values of default columns that were fetched using + the :meth:`.ValuesBase.return_defaults` feature. + + The value is an instance of :class:`.RowProxy`, or ``None`` + if :meth:`.ValuesBase.return_defaults` was not used or if the + backend does not support RETURNING. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :meth:`.ValuesBase.return_defaults` + + """ + return self.context.returned_defaults + + def lastrow_has_defaults(self): + """Return ``lastrow_has_defaults()`` from the underlying + :class:`.ExecutionContext`. + + See :class:`.ExecutionContext` for details. + + """ + + return self.context.lastrow_has_defaults() + + def postfetch_cols(self): + """Return ``postfetch_cols()`` from the underlying + :class:`.ExecutionContext`. + + See :class:`.ExecutionContext` for details. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() or update() construct. + + """ + + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled " + "expression construct.") + elif not self.context.isinsert and not self.context.isupdate: + raise exc.InvalidRequestError( + "Statement is not an insert() or update() " + "expression construct.") + return self.context.postfetch_cols + + def prefetch_cols(self): + """Return ``prefetch_cols()`` from the underlying + :class:`.ExecutionContext`. + + See :class:`.ExecutionContext` for details. + + Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed + statement is not a compiled expression construct + or is not an insert() or update() construct. + + """ + + if not self.context.compiled: + raise exc.InvalidRequestError( + "Statement is not a compiled " + "expression construct.") + elif not self.context.isinsert and not self.context.isupdate: + raise exc.InvalidRequestError( + "Statement is not an insert() or update() " + "expression construct.") + return self.context.prefetch_cols + + def supports_sane_rowcount(self): + """Return ``supports_sane_rowcount`` from the dialect. + + See :attr:`.ResultProxy.rowcount` for background. + + """ + + return self.dialect.supports_sane_rowcount + + def supports_sane_multi_rowcount(self): + """Return ``supports_sane_multi_rowcount`` from the dialect. + + See :attr:`.ResultProxy.rowcount` for background. + + """ + + return self.dialect.supports_sane_multi_rowcount + + def _fetchone_impl(self): + try: + return self.cursor.fetchone() + except AttributeError: + return self._non_result(None) + + def _fetchmany_impl(self, size=None): + try: + if size is None: + return self.cursor.fetchmany() + else: + return self.cursor.fetchmany(size) + except AttributeError: + return self._non_result([]) + + def _fetchall_impl(self): + try: + return self.cursor.fetchall() + except AttributeError: + return self._non_result([]) + + def _non_result(self, default): + if self._metadata is None: + raise exc.ResourceClosedError( + "This result object does not return rows. " + "It has been closed automatically.", + ) + elif self.closed: + raise exc.ResourceClosedError("This result object is closed.") + else: + return default + + def process_rows(self, rows): + process_row = self._process_row + metadata = self._metadata + keymap = metadata._keymap + processors = metadata._processors + if self._echo: + log = self.context.engine.logger.debug + l = [] + for row in rows: + log("Row %r", sql_util._repr_row(row)) + l.append(process_row(metadata, row, processors, keymap)) + return l + else: + return [process_row(metadata, row, processors, keymap) + for row in rows] + + def fetchall(self): + """Fetch all rows, just like DB-API ``cursor.fetchall()``. + + After all rows have been exhausted, the underlying DBAPI + cursor resource is released, and the object may be safely + discarded. + + Subsequent calls to :meth:`.ResultProxy.fetchall` will return + an empty list. After the :meth:`.ResultProxy.close` method is + called, the method will raise :class:`.ResourceClosedError`. + + .. versionchanged:: 1.0.0 - Added "soft close" behavior which + allows the result to be used in an "exhausted" state prior to + calling the :meth:`.ResultProxy.close` method. + + """ + + try: + l = self.process_rows(self._fetchall_impl()) + self._soft_close() + return l + except BaseException as e: + self.connection._handle_dbapi_exception( + e, None, None, + self.cursor, self.context) + + def fetchmany(self, size=None): + """Fetch many rows, just like DB-API + ``cursor.fetchmany(size=cursor.arraysize)``. + + After all rows have been exhausted, the underlying DBAPI + cursor resource is released, and the object may be safely + discarded. + + Calls to :meth:`.ResultProxy.fetchmany` after all rows have been + exhausted will return + an empty list. After the :meth:`.ResultProxy.close` method is + called, the method will raise :class:`.ResourceClosedError`. + + .. versionchanged:: 1.0.0 - Added "soft close" behavior which + allows the result to be used in an "exhausted" state prior to + calling the :meth:`.ResultProxy.close` method. + + """ + + try: + l = self.process_rows(self._fetchmany_impl(size)) + if len(l) == 0: + self._soft_close() + return l + except BaseException as e: + self.connection._handle_dbapi_exception( + e, None, None, + self.cursor, self.context) + + def fetchone(self): + """Fetch one row, just like DB-API ``cursor.fetchone()``. + + After all rows have been exhausted, the underlying DBAPI + cursor resource is released, and the object may be safely + discarded. + + Calls to :meth:`.ResultProxy.fetchone` after all rows have + been exhausted will return ``None``. + After the :meth:`.ResultProxy.close` method is + called, the method will raise :class:`.ResourceClosedError`. + + .. versionchanged:: 1.0.0 - Added "soft close" behavior which + allows the result to be used in an "exhausted" state prior to + calling the :meth:`.ResultProxy.close` method. + + """ + try: + row = self._fetchone_impl() + if row is not None: + return self.process_rows([row])[0] + else: + self._soft_close() + return None + except BaseException as e: + self.connection._handle_dbapi_exception( + e, None, None, + self.cursor, self.context) + + def first(self): + """Fetch the first row and then close the result set unconditionally. + + Returns None if no row is present. + + After calling this method, the object is fully closed, + e.g. the :meth:`.ResultProxy.close` method will have been called. + + """ + if self._metadata is None: + return self._non_result(None) + + try: + row = self._fetchone_impl() + except BaseException as e: + self.connection._handle_dbapi_exception( + e, None, None, + self.cursor, self.context) + + try: + if row is not None: + return self.process_rows([row])[0] + else: + return None + finally: + self.close() + + def scalar(self): + """Fetch the first column of the first row, and close the result set. + + Returns None if no row is present. + + After calling this method, the object is fully closed, + e.g. the :meth:`.ResultProxy.close` method will have been called. + + """ + row = self.first() + if row is not None: + return row[0] + else: + return None + + +class BufferedRowResultProxy(ResultProxy): + """A ResultProxy with row buffering behavior. + + ``ResultProxy`` that buffers the contents of a selection of rows + before ``fetchone()`` is called. This is to allow the results of + ``cursor.description`` to be available immediately, when + interfacing with a DB-API that requires rows to be consumed before + this information is available (currently psycopg2, when used with + server-side cursors). + + The pre-fetching behavior fetches only one row initially, and then + grows its buffer size by a fixed amount with each successive need + for additional rows up to a size of 1000. + + The size argument is configurable using the ``max_row_buffer`` + execution option:: + + with psycopg2_engine.connect() as conn: + + result = conn.execution_options( + stream_results=True, max_row_buffer=50 + ).execute("select * from table") + + .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option. + + .. seealso:: + + :ref:`psycopg2_execution_options` + """ + + def _init_metadata(self): + self._max_row_buffer = self.context.execution_options.get( + 'max_row_buffer', None) + self.__buffer_rows() + super(BufferedRowResultProxy, self)._init_metadata() + + # this is a "growth chart" for the buffering of rows. + # each successive __buffer_rows call will use the next + # value in the list for the buffer size until the max + # is reached + size_growth = { + 1: 5, + 5: 10, + 10: 20, + 20: 50, + 50: 100, + 100: 250, + 250: 500, + 500: 1000 + } + + def __buffer_rows(self): + if self.cursor is None: + return + size = getattr(self, '_bufsize', 1) + self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) + self._bufsize = self.size_growth.get(size, size) + if self._max_row_buffer is not None: + self._bufsize = min(self._max_row_buffer, self._bufsize) + + def _soft_close(self, **kw): + self.__rowbuffer.clear() + super(BufferedRowResultProxy, self)._soft_close(**kw) + + def _fetchone_impl(self): + if self.cursor is None: + return self._non_result(None) + if not self.__rowbuffer: + self.__buffer_rows() + if not self.__rowbuffer: + return None + return self.__rowbuffer.popleft() + + def _fetchmany_impl(self, size=None): + if size is None: + return self._fetchall_impl() + result = [] + for x in range(0, size): + row = self._fetchone_impl() + if row is None: + break + result.append(row) + return result + + def _fetchall_impl(self): + if self.cursor is None: + return self._non_result([]) + self.__rowbuffer.extend(self.cursor.fetchall()) + ret = self.__rowbuffer + self.__rowbuffer = collections.deque() + return ret + + +class FullyBufferedResultProxy(ResultProxy): + """A result proxy that buffers rows fully upon creation. + + Used for operations where a result is to be delivered + after the database conversation can not be continued, + such as MSSQL INSERT...OUTPUT after an autocommit. + + """ + + def _init_metadata(self): + super(FullyBufferedResultProxy, self)._init_metadata() + self.__rowbuffer = self._buffer_rows() + + def _buffer_rows(self): + return collections.deque(self.cursor.fetchall()) + + def _soft_close(self, **kw): + self.__rowbuffer.clear() + super(FullyBufferedResultProxy, self)._soft_close(**kw) + + def _fetchone_impl(self): + if self.__rowbuffer: + return self.__rowbuffer.popleft() + else: + return self._non_result(None) + + def _fetchmany_impl(self, size=None): + if size is None: + return self._fetchall_impl() + result = [] + for x in range(0, size): + row = self._fetchone_impl() + if row is None: + break + result.append(row) + return result + + def _fetchall_impl(self): + if not self.cursor: + return self._non_result([]) + ret = self.__rowbuffer + self.__rowbuffer = collections.deque() + return ret + + +class BufferedColumnRow(RowProxy): + def __init__(self, parent, row, processors, keymap): + # preprocess row + row = list(row) + # this is a tad faster than using enumerate + index = 0 + for processor in parent._orig_processors: + if processor is not None: + row[index] = processor(row[index]) + index += 1 + row = tuple(row) + super(BufferedColumnRow, self).__init__(parent, row, + processors, keymap) + + +class BufferedColumnResultProxy(ResultProxy): + """A ResultProxy with column buffering behavior. + + ``ResultProxy`` that loads all columns into memory each time + fetchone() is called. If fetchmany() or fetchall() are called, + the full grid of results is fetched. This is to operate with + databases where result rows contain "live" results that fall out + of scope unless explicitly fetched. Currently this includes + cx_Oracle LOB objects. + + """ + + _process_row = BufferedColumnRow + + def _init_metadata(self): + super(BufferedColumnResultProxy, self)._init_metadata() + + metadata = self._metadata + + # don't double-replace the processors, in the case + # of a cached ResultMetaData + if metadata._orig_processors is None: + # orig_processors will be used to preprocess each row when + # they are constructed. + metadata._orig_processors = metadata._processors + # replace the all type processors by None processors. + metadata._processors = [None for _ in range(len(metadata.keys))] + keymap = {} + for k, (func, obj, index) in metadata._keymap.items(): + keymap[k] = (None, obj, index) + metadata._keymap = keymap + + def fetchall(self): + # can't call cursor.fetchall(), since rows must be + # fully processed before requesting more from the DBAPI. + l = [] + while True: + row = self.fetchone() + if row is None: + break + l.append(row) + return l + + def fetchmany(self, size=None): + # can't call cursor.fetchmany(), since rows must be + # fully processed before requesting more from the DBAPI. + if size is None: + return self.fetchall() + l = [] + for i in range(size): + row = self.fetchone() + if row is None: + break + l.append(row) + return l diff --git a/app/lib/sqlalchemy/engine/strategies.py b/app/lib/sqlalchemy/engine/strategies.py new file mode 100644 index 0000000..81bb2c5 --- /dev/null +++ b/app/lib/sqlalchemy/engine/strategies.py @@ -0,0 +1,283 @@ +# engine/strategies.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Strategies for creating new instances of Engine types. + +These are semi-private implementation classes which provide the +underlying behavior for the "strategy" keyword argument available on +:func:`~sqlalchemy.engine.create_engine`. Current available options are +``plain``, ``threadlocal``, and ``mock``. + +New strategies can be added via new ``EngineStrategy`` classes. +""" + +from operator import attrgetter + +from sqlalchemy.engine import base, threadlocal, url +from sqlalchemy import util, event +from sqlalchemy import pool as poollib +from sqlalchemy.sql import schema + +strategies = {} + + +class EngineStrategy(object): + """An adaptor that processes input arguments and produces an Engine. + + Provides a ``create`` method that receives input arguments and + produces an instance of base.Engine or a subclass. + + """ + + def __init__(self): + strategies[self.name] = self + + def create(self, *args, **kwargs): + """Given arguments, returns a new Engine instance.""" + + raise NotImplementedError() + + +class DefaultEngineStrategy(EngineStrategy): + """Base class for built-in strategies.""" + + def create(self, name_or_url, **kwargs): + # create url.URL object + u = url.make_url(name_or_url) + + plugins = u._instantiate_plugins(kwargs) + + u.query.pop('plugin', None) + + entrypoint = u._get_entrypoint() + dialect_cls = entrypoint.get_dialect_cls(u) + + if kwargs.pop('_coerce_config', False): + def pop_kwarg(key, default=None): + value = kwargs.pop(key, default) + if key in dialect_cls.engine_config_types: + value = dialect_cls.engine_config_types[key](value) + return value + else: + pop_kwarg = kwargs.pop + + dialect_args = {} + # consume dialect arguments from kwargs + for k in util.get_cls_kwargs(dialect_cls): + if k in kwargs: + dialect_args[k] = pop_kwarg(k) + + dbapi = kwargs.pop('module', None) + if dbapi is None: + dbapi_args = {} + for k in util.get_func_kwargs(dialect_cls.dbapi): + if k in kwargs: + dbapi_args[k] = pop_kwarg(k) + dbapi = dialect_cls.dbapi(**dbapi_args) + + dialect_args['dbapi'] = dbapi + + for plugin in plugins: + plugin.handle_dialect_kwargs(dialect_cls, dialect_args) + + # create dialect + dialect = dialect_cls(**dialect_args) + + # assemble connection arguments + (cargs, cparams) = dialect.create_connect_args(u) + cparams.update(pop_kwarg('connect_args', {})) + cargs = list(cargs) # allow mutability + + # look for existing pool or create + pool = pop_kwarg('pool', None) + if pool is None: + def connect(connection_record=None): + if dialect._has_events: + for fn in dialect.dispatch.do_connect: + connection = fn( + dialect, connection_record, cargs, cparams) + if connection is not None: + return connection + return dialect.connect(*cargs, **cparams) + + creator = pop_kwarg('creator', connect) + + poolclass = pop_kwarg('poolclass', None) + if poolclass is None: + poolclass = dialect_cls.get_pool_class(u) + pool_args = { + 'dialect': dialect + } + + # consume pool arguments from kwargs, translating a few of + # the arguments + translate = {'logging_name': 'pool_logging_name', + 'echo': 'echo_pool', + 'timeout': 'pool_timeout', + 'recycle': 'pool_recycle', + 'events': 'pool_events', + 'use_threadlocal': 'pool_threadlocal', + 'reset_on_return': 'pool_reset_on_return'} + for k in util.get_cls_kwargs(poolclass): + tk = translate.get(k, k) + if tk in kwargs: + pool_args[k] = pop_kwarg(tk) + + for plugin in plugins: + plugin.handle_pool_kwargs(poolclass, pool_args) + + pool = poolclass(creator, **pool_args) + else: + if isinstance(pool, poollib._DBProxy): + pool = pool.get_pool(*cargs, **cparams) + else: + pool = pool + + pool._dialect = dialect + + # create engine. + engineclass = self.engine_cls + engine_args = {} + for k in util.get_cls_kwargs(engineclass): + if k in kwargs: + engine_args[k] = pop_kwarg(k) + + _initialize = kwargs.pop('_initialize', True) + + # all kwargs should be consumed + if kwargs: + raise TypeError( + "Invalid argument(s) %s sent to create_engine(), " + "using configuration %s/%s/%s. Please check that the " + "keyword arguments are appropriate for this combination " + "of components." % (','.join("'%s'" % k for k in kwargs), + dialect.__class__.__name__, + pool.__class__.__name__, + engineclass.__name__)) + + engine = engineclass(pool, dialect, u, **engine_args) + + if _initialize: + do_on_connect = dialect.on_connect() + if do_on_connect: + def on_connect(dbapi_connection, connection_record): + conn = getattr( + dbapi_connection, '_sqla_unwrap', dbapi_connection) + if conn is None: + return + do_on_connect(conn) + + event.listen(pool, 'first_connect', on_connect) + event.listen(pool, 'connect', on_connect) + + def first_connect(dbapi_connection, connection_record): + c = base.Connection(engine, connection=dbapi_connection, + _has_events=False) + c._execution_options = util.immutabledict() + dialect.initialize(c) + event.listen(pool, 'first_connect', first_connect, once=True) + + dialect_cls.engine_created(engine) + if entrypoint is not dialect_cls: + entrypoint.engine_created(engine) + + for plugin in plugins: + plugin.engine_created(engine) + + return engine + + +class PlainEngineStrategy(DefaultEngineStrategy): + """Strategy for configuring a regular Engine.""" + + name = 'plain' + engine_cls = base.Engine + +PlainEngineStrategy() + + +class ThreadLocalEngineStrategy(DefaultEngineStrategy): + """Strategy for configuring an Engine with threadlocal behavior.""" + + name = 'threadlocal' + engine_cls = threadlocal.TLEngine + +ThreadLocalEngineStrategy() + + +class MockEngineStrategy(EngineStrategy): + """Strategy for configuring an Engine-like object with mocked execution. + + Produces a single mock Connectable object which dispatches + statement execution to a passed-in function. + + """ + + name = 'mock' + + def create(self, name_or_url, executor, **kwargs): + # create url.URL object + u = url.make_url(name_or_url) + + dialect_cls = u.get_dialect() + + dialect_args = {} + # consume dialect arguments from kwargs + for k in util.get_cls_kwargs(dialect_cls): + if k in kwargs: + dialect_args[k] = kwargs.pop(k) + + # create dialect + dialect = dialect_cls(**dialect_args) + + return MockEngineStrategy.MockConnection(dialect, executor) + + class MockConnection(base.Connectable): + def __init__(self, dialect, execute): + self._dialect = dialect + self.execute = execute + + engine = property(lambda s: s) + dialect = property(attrgetter('_dialect')) + name = property(lambda s: s._dialect.name) + + schema_for_object = schema._schema_getter(None) + + def contextual_connect(self, **kwargs): + return self + + def execution_options(self, **kw): + return self + + def compiler(self, statement, parameters, **kwargs): + return self._dialect.compiler( + statement, parameters, engine=self, **kwargs) + + def create(self, entity, **kwargs): + kwargs['checkfirst'] = False + from sqlalchemy.engine import ddl + + ddl.SchemaGenerator( + self.dialect, self, **kwargs).traverse_single(entity) + + def drop(self, entity, **kwargs): + kwargs['checkfirst'] = False + from sqlalchemy.engine import ddl + ddl.SchemaDropper( + self.dialect, self, **kwargs).traverse_single(entity) + + def _run_visitor(self, visitorcallable, element, + connection=None, + **kwargs): + kwargs['checkfirst'] = False + visitorcallable(self.dialect, self, + **kwargs).traverse_single(element) + + def execute(self, object, *multiparams, **params): + raise NotImplementedError() + +MockEngineStrategy() diff --git a/app/lib/sqlalchemy/engine/threadlocal.py b/app/lib/sqlalchemy/engine/threadlocal.py new file mode 100644 index 0000000..ee31764 --- /dev/null +++ b/app/lib/sqlalchemy/engine/threadlocal.py @@ -0,0 +1,138 @@ +# engine/threadlocal.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Provides a thread-local transactional wrapper around the root Engine class. + +The ``threadlocal`` module is invoked when using the +``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`. +This module is semi-private and is invoked automatically when the threadlocal +engine strategy is used. +""" + +from .. import util +from . import base +import weakref + + +class TLConnection(base.Connection): + + def __init__(self, *arg, **kw): + super(TLConnection, self).__init__(*arg, **kw) + self.__opencount = 0 + + def _increment_connect(self): + self.__opencount += 1 + return self + + def close(self): + if self.__opencount == 1: + base.Connection.close(self) + self.__opencount -= 1 + + def _force_close(self): + self.__opencount = 0 + base.Connection.close(self) + + +class TLEngine(base.Engine): + """An Engine that includes support for thread-local managed + transactions. + + """ + _tl_connection_cls = TLConnection + + def __init__(self, *args, **kwargs): + super(TLEngine, self).__init__(*args, **kwargs) + self._connections = util.threading.local() + + def contextual_connect(self, **kw): + if not hasattr(self._connections, 'conn'): + connection = None + else: + connection = self._connections.conn() + + if connection is None or connection.closed: + # guards against pool-level reapers, if desired. + # or not connection.connection.is_valid: + connection = self._tl_connection_cls( + self, + self._wrap_pool_connect( + self.pool.connect, connection), + **kw) + self._connections.conn = weakref.ref(connection) + + return connection._increment_connect() + + def begin_twophase(self, xid=None): + if not hasattr(self._connections, 'trans'): + self._connections.trans = [] + self._connections.trans.append( + self.contextual_connect().begin_twophase(xid=xid)) + return self + + def begin_nested(self): + if not hasattr(self._connections, 'trans'): + self._connections.trans = [] + self._connections.trans.append( + self.contextual_connect().begin_nested()) + return self + + def begin(self): + if not hasattr(self._connections, 'trans'): + self._connections.trans = [] + self._connections.trans.append(self.contextual_connect().begin()) + return self + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.commit() + else: + self.rollback() + + def prepare(self): + if not hasattr(self._connections, 'trans') or \ + not self._connections.trans: + return + self._connections.trans[-1].prepare() + + def commit(self): + if not hasattr(self._connections, 'trans') or \ + not self._connections.trans: + return + trans = self._connections.trans.pop(-1) + trans.commit() + + def rollback(self): + if not hasattr(self._connections, 'trans') or \ + not self._connections.trans: + return + trans = self._connections.trans.pop(-1) + trans.rollback() + + def dispose(self): + self._connections = util.threading.local() + super(TLEngine, self).dispose() + + @property + def closed(self): + return not hasattr(self._connections, 'conn') or \ + self._connections.conn() is None or \ + self._connections.conn().closed + + def close(self): + if not self.closed: + self.contextual_connect().close() + connection = self._connections.conn() + connection._force_close() + del self._connections.conn + self._connections.trans = [] + + def __repr__(self): + return 'TLEngine(%r)' % self.url diff --git a/app/lib/sqlalchemy/engine/url.py b/app/lib/sqlalchemy/engine/url.py new file mode 100644 index 0000000..1c16584 --- /dev/null +++ b/app/lib/sqlalchemy/engine/url.py @@ -0,0 +1,261 @@ +# engine/url.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates +information about a database connection specification. + +The URL object is created automatically when +:func:`~sqlalchemy.engine.create_engine` is called with a string +argument; alternatively, the URL is a public-facing construct which can +be used directly and is also accepted directly by ``create_engine()``. +""" + +import re +from .. import exc, util +from . import Dialect +from ..dialects import registry, plugins + + +class URL(object): + """ + Represent the components of a URL used to connect to a database. + + This object is suitable to be passed directly to a + :func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed + from a string by the :func:`.make_url` function. the string + format of the URL is an RFC-1738-style string. + + All initialization parameters are available as public attributes. + + :param drivername: the name of the database backend. + This name will correspond to a module in sqlalchemy/databases + or a third party plug-in. + + :param username: The user name. + + :param password: database password. + + :param host: The name of the host. + + :param port: The port number. + + :param database: The database name. + + :param query: A dictionary of options to be passed to the + dialect and/or the DBAPI upon connect. + + """ + + def __init__(self, drivername, username=None, password=None, + host=None, port=None, database=None, query=None): + self.drivername = drivername + self.username = username + self.password = password + self.host = host + if port is not None: + self.port = int(port) + else: + self.port = None + self.database = database + self.query = query or {} + + def __to_string__(self, hide_password=True): + s = self.drivername + "://" + if self.username is not None: + s += _rfc_1738_quote(self.username) + if self.password is not None: + s += ':' + ('***' if hide_password + else _rfc_1738_quote(self.password)) + s += "@" + if self.host is not None: + if ':' in self.host: + s += "[%s]" % self.host + else: + s += self.host + if self.port is not None: + s += ':' + str(self.port) + if self.database is not None: + s += '/' + self.database + if self.query: + keys = list(self.query) + keys.sort() + s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys) + return s + + def __str__(self): + return self.__to_string__(hide_password=False) + + def __repr__(self): + return self.__to_string__() + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + return \ + isinstance(other, URL) and \ + self.drivername == other.drivername and \ + self.username == other.username and \ + self.password == other.password and \ + self.host == other.host and \ + self.database == other.database and \ + self.query == other.query + + def get_backend_name(self): + if '+' not in self.drivername: + return self.drivername + else: + return self.drivername.split('+')[0] + + def get_driver_name(self): + if '+' not in self.drivername: + return self.get_dialect().driver + else: + return self.drivername.split('+')[1] + + def _instantiate_plugins(self, kwargs): + plugin_names = util.to_list(self.query.get('plugin', ())) + + return [ + plugins.load(plugin_name)(self, kwargs) + for plugin_name in plugin_names + ] + + def _get_entrypoint(self): + """Return the "entry point" dialect class. + + This is normally the dialect itself except in the case when the + returned class implements the get_dialect_cls() method. + + """ + if '+' not in self.drivername: + name = self.drivername + else: + name = self.drivername.replace('+', '.') + cls = registry.load(name) + # check for legacy dialects that + # would return a module with 'dialect' as the + # actual class + if hasattr(cls, 'dialect') and \ + isinstance(cls.dialect, type) and \ + issubclass(cls.dialect, Dialect): + return cls.dialect + else: + return cls + + def get_dialect(self): + """Return the SQLAlchemy database dialect class corresponding + to this URL's driver name. + """ + entrypoint = self._get_entrypoint() + dialect_cls = entrypoint.get_dialect_cls(self) + return dialect_cls + + def translate_connect_args(self, names=[], **kw): + r"""Translate url attributes into a dictionary of connection arguments. + + Returns attributes of this url (`host`, `database`, `username`, + `password`, `port`) as a plain dictionary. The attribute names are + used as the keys by default. Unset or false attributes are omitted + from the final dictionary. + + :param \**kw: Optional, alternate key names for url attributes. + + :param names: Deprecated. Same purpose as the keyword-based alternate + names, but correlates the name to the original positionally. + """ + + translated = {} + attribute_names = ['host', 'database', 'username', 'password', 'port'] + for sname in attribute_names: + if names: + name = names.pop(0) + elif sname in kw: + name = kw[sname] + else: + name = sname + if name is not None and getattr(self, sname, False): + translated[name] = getattr(self, sname) + return translated + + +def make_url(name_or_url): + """Given a string or unicode instance, produce a new URL instance. + + The given string is parsed according to the RFC 1738 spec. If an + existing URL object is passed, just returns the object. + """ + + if isinstance(name_or_url, util.string_types): + return _parse_rfc1738_args(name_or_url) + else: + return name_or_url + + +def _parse_rfc1738_args(name): + pattern = re.compile(r''' + (?P[\w\+]+):// + (?: + (?P[^:/]*) + (?::(?P.*))? + @)? + (?: + (?: + \[(?P[^/]+)\] | + (?P[^/:]+) + )? + (?::(?P[^/]*))? + )? + (?:/(?P.*))? + ''', re.X) + + m = pattern.match(name) + if m is not None: + components = m.groupdict() + if components['database'] is not None: + tokens = components['database'].split('?', 2) + components['database'] = tokens[0] + query = ( + len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None + if util.py2k and query is not None: + query = dict((k.encode('ascii'), query[k]) for k in query) + else: + query = None + components['query'] = query + + if components['username'] is not None: + components['username'] = _rfc_1738_unquote(components['username']) + + if components['password'] is not None: + components['password'] = _rfc_1738_unquote(components['password']) + + ipv4host = components.pop('ipv4host') + ipv6host = components.pop('ipv6host') + components['host'] = ipv4host or ipv6host + name = components.pop('name') + return URL(name, **components) + else: + raise exc.ArgumentError( + "Could not parse rfc1738 URL from string '%s'" % name) + + +def _rfc_1738_quote(text): + return re.sub(r'[:@/]', lambda m: "%%%X" % ord(m.group(0)), text) + + +def _rfc_1738_unquote(text): + return util.unquote(text) + + +def _parse_keyvalue_args(name): + m = re.match(r'(\w+)://(.*)', name) + if m is not None: + (name, args) = m.group(1, 2) + opts = dict(util.parse_qsl(args)) + return URL(name, *opts) + else: + return None diff --git a/app/lib/sqlalchemy/engine/util.py b/app/lib/sqlalchemy/engine/util.py new file mode 100644 index 0000000..831b63e --- /dev/null +++ b/app/lib/sqlalchemy/engine/util.py @@ -0,0 +1,74 @@ +# engine/util.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .. import util + + +def connection_memoize(key): + """Decorator, memoize a function in a connection.info stash. + + Only applicable to functions which take no arguments other than a + connection. The memo will be stored in ``connection.info[key]``. + """ + + @util.decorator + def decorated(fn, self, connection): + connection = connection.connect() + try: + return connection.info[key] + except KeyError: + connection.info[key] = val = fn(self, connection) + return val + + return decorated + + +def py_fallback(): + def _distill_params(multiparams, params): + """Given arguments from the calling form *multiparams, **params, + return a list of bind parameter structures, usually a list of + dictionaries. + + In the case of 'raw' execution which accepts positional parameters, + it may be a list of tuples or lists. + + """ + + if not multiparams: + if params: + return [params] + else: + return [] + elif len(multiparams) == 1: + zero = multiparams[0] + if isinstance(zero, (list, tuple)): + if not zero or hasattr(zero[0], '__iter__') and \ + not hasattr(zero[0], 'strip'): + # execute(stmt, [{}, {}, {}, ...]) + # execute(stmt, [(), (), (), ...]) + return zero + else: + # execute(stmt, ("value", "value")) + return [zero] + elif hasattr(zero, 'keys'): + # execute(stmt, {"key":"value"}) + return [zero] + else: + # execute(stmt, "value") + return [[zero]] + else: + if hasattr(multiparams[0], '__iter__') and \ + not hasattr(multiparams[0], 'strip'): + return multiparams + else: + return [multiparams] + + return locals() +try: + from sqlalchemy.cutils import _distill_params +except ImportError: + globals().update(py_fallback()) diff --git a/app/lib/sqlalchemy/event/__init__.py b/app/lib/sqlalchemy/event/__init__.py new file mode 100644 index 0000000..5cd01a9 --- /dev/null +++ b/app/lib/sqlalchemy/event/__init__.py @@ -0,0 +1,11 @@ +# event/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .api import CANCEL, NO_RETVAL, listen, listens_for, remove, contains +from .base import Events, dispatcher +from .attr import RefCollection +from .legacy import _legacy_signature diff --git a/app/lib/sqlalchemy/event/api.py b/app/lib/sqlalchemy/event/api.py new file mode 100644 index 0000000..b24c8fe --- /dev/null +++ b/app/lib/sqlalchemy/event/api.py @@ -0,0 +1,188 @@ +# event/api.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Public API functions for the event system. + +""" +from __future__ import absolute_import + +from .. import util, exc +from .base import _registrars +from .registry import _EventKey + +CANCEL = util.symbol('CANCEL') +NO_RETVAL = util.symbol('NO_RETVAL') + + +def _event_key(target, identifier, fn): + for evt_cls in _registrars[identifier]: + tgt = evt_cls._accept_with(target) + if tgt is not None: + return _EventKey(target, identifier, fn, tgt) + else: + raise exc.InvalidRequestError("No such event '%s' for target '%s'" % + (identifier, target)) + + +def listen(target, identifier, fn, *args, **kw): + """Register a listener function for the given target. + + e.g.:: + + from sqlalchemy import event + from sqlalchemy.schema import UniqueConstraint + + def unique_constraint_name(const, table): + const.name = "uq_%s_%s" % ( + table.name, + list(const.columns)[0].name + ) + event.listen( + UniqueConstraint, + "after_parent_attach", + unique_constraint_name) + + + A given function can also be invoked for only the first invocation + of the event using the ``once`` argument:: + + def on_config(): + do_config() + + event.listen(Mapper, "before_configure", on_config, once=True) + + .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` + and :func:`.event.listens_for`. + + .. note:: + + The :func:`.listen` function cannot be called at the same time + that the target event is being run. This has implications + for thread safety, and also means an event cannot be added + from inside the listener function for itself. The list of + events to be run are present inside of a mutable collection + that can't be changed during iteration. + + Event registration and removal is not intended to be a "high + velocity" operation; it is a configurational operation. For + systems that need to quickly associate and deassociate with + events at high scale, use a mutable structure that is handled + from inside of a single listener. + + .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now + used as the container for the list of events, which explicitly + disallows collection mutation while the collection is being + iterated. + + .. seealso:: + + :func:`.listens_for` + + :func:`.remove` + + """ + + _event_key(target, identifier, fn).listen(*args, **kw) + + +def listens_for(target, identifier, *args, **kw): + """Decorate a function as a listener for the given target + identifier. + + e.g.:: + + from sqlalchemy import event + from sqlalchemy.schema import UniqueConstraint + + @event.listens_for(UniqueConstraint, "after_parent_attach") + def unique_constraint_name(const, table): + const.name = "uq_%s_%s" % ( + table.name, + list(const.columns)[0].name + ) + + A given function can also be invoked for only the first invocation + of the event using the ``once`` argument:: + + @event.listens_for(Mapper, "before_configure", once=True) + def on_config(): + do_config() + + + .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` + and :func:`.event.listens_for`. + + .. seealso:: + + :func:`.listen` - general description of event listening + + """ + def decorate(fn): + listen(target, identifier, fn, *args, **kw) + return fn + return decorate + + +def remove(target, identifier, fn): + """Remove an event listener. + + The arguments here should match exactly those which were sent to + :func:`.listen`; all the event registration which proceeded as a result + of this call will be reverted by calling :func:`.remove` with the same + arguments. + + e.g.:: + + # if a function was registered like this... + @event.listens_for(SomeMappedClass, "before_insert", propagate=True) + def my_listener_function(*arg): + pass + + # ... it's removed like this + event.remove(SomeMappedClass, "before_insert", my_listener_function) + + Above, the listener function associated with ``SomeMappedClass`` was also + propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` + function will revert all of these operations. + + .. versionadded:: 0.9.0 + + .. note:: + + The :func:`.remove` function cannot be called at the same time + that the target event is being run. This has implications + for thread safety, and also means an event cannot be removed + from inside the listener function for itself. The list of + events to be run are present inside of a mutable collection + that can't be changed during iteration. + + Event registration and removal is not intended to be a "high + velocity" operation; it is a configurational operation. For + systems that need to quickly associate and deassociate with + events at high scale, use a mutable structure that is handled + from inside of a single listener. + + .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now + used as the container for the list of events, which explicitly + disallows collection mutation while the collection is being + iterated. + + .. seealso:: + + :func:`.listen` + + """ + _event_key(target, identifier, fn).remove() + + +def contains(target, identifier, fn): + """Return True if the given target/ident/fn is set up to listen. + + .. versionadded:: 0.9.0 + + """ + + return _event_key(target, identifier, fn).contains() diff --git a/app/lib/sqlalchemy/event/attr.py b/app/lib/sqlalchemy/event/attr.py new file mode 100644 index 0000000..84ef097 --- /dev/null +++ b/app/lib/sqlalchemy/event/attr.py @@ -0,0 +1,373 @@ +# event/attr.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Attribute implementation for _Dispatch classes. + +The various listener targets for a particular event class are represented +as attributes, which refer to collections of listeners to be fired off. +These collections can exist at the class level as well as at the instance +level. An event is fired off using code like this:: + + some_object.dispatch.first_connect(arg1, arg2) + +Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and +``first_connect`` is typically an instance of ``_ListenerCollection`` +if event listeners are present, or ``_EmptyListener`` if none are present. + +The attribute mechanics here spend effort trying to ensure listener functions +are available with a minimum of function call overhead, that unnecessary +objects aren't created (i.e. many empty per-instance listener collections), +as well as that everything is garbage collectable when owning references are +lost. Other features such as "propagation" of listener functions across +many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances, +as well as support for subclass propagation (e.g. events assigned to +``Pool`` vs. ``QueuePool``) are all implemented here. + +""" + +from __future__ import absolute_import, with_statement + +from .. import util +from ..util import threading +from . import registry +from . import legacy +from itertools import chain +import weakref +import collections + + +class RefCollection(util.MemoizedSlots): + __slots__ = 'ref', + + def _memoized_attr_ref(self): + return weakref.ref(self, registry._collection_gced) + + +class _ClsLevelDispatch(RefCollection): + """Class-level events on :class:`._Dispatch` classes.""" + + __slots__ = ('name', 'arg_names', 'has_kw', + 'legacy_signatures', '_clslevel', '__weakref__') + + def __init__(self, parent_dispatch_cls, fn): + self.name = fn.__name__ + argspec = util.inspect_getargspec(fn) + self.arg_names = argspec.args[1:] + self.has_kw = bool(argspec.keywords) + self.legacy_signatures = list(reversed( + sorted( + getattr(fn, '_legacy_signatures', []), + key=lambda s: s[0] + ) + )) + fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn) + + self._clslevel = weakref.WeakKeyDictionary() + + def _adjust_fn_spec(self, fn, named): + if named: + fn = self._wrap_fn_for_kw(fn) + if self.legacy_signatures: + try: + argspec = util.get_callable_argspec(fn, no_self=True) + except TypeError: + pass + else: + fn = legacy._wrap_fn_for_legacy(self, fn, argspec) + return fn + + def _wrap_fn_for_kw(self, fn): + def wrap_kw(*args, **kw): + argdict = dict(zip(self.arg_names, args)) + argdict.update(kw) + return fn(**argdict) + return wrap_kw + + def insert(self, event_key, propagate): + target = event_key.dispatch_target + assert isinstance(target, type), \ + "Class-level Event targets must be classes." + stack = [target] + while stack: + cls = stack.pop(0) + stack.extend(cls.__subclasses__()) + if cls is not target and cls not in self._clslevel: + self.update_subclass(cls) + else: + if cls not in self._clslevel: + self._clslevel[cls] = collections.deque() + self._clslevel[cls].appendleft(event_key._listen_fn) + registry._stored_in_collection(event_key, self) + + def append(self, event_key, propagate): + target = event_key.dispatch_target + assert isinstance(target, type), \ + "Class-level Event targets must be classes." + + stack = [target] + while stack: + cls = stack.pop(0) + stack.extend(cls.__subclasses__()) + if cls is not target and cls not in self._clslevel: + self.update_subclass(cls) + else: + if cls not in self._clslevel: + self._clslevel[cls] = collections.deque() + self._clslevel[cls].append(event_key._listen_fn) + registry._stored_in_collection(event_key, self) + + def update_subclass(self, target): + if target not in self._clslevel: + self._clslevel[target] = collections.deque() + clslevel = self._clslevel[target] + for cls in target.__mro__[1:]: + if cls in self._clslevel: + clslevel.extend([ + fn for fn + in self._clslevel[cls] + if fn not in clslevel + ]) + + def remove(self, event_key): + target = event_key.dispatch_target + stack = [target] + while stack: + cls = stack.pop(0) + stack.extend(cls.__subclasses__()) + if cls in self._clslevel: + self._clslevel[cls].remove(event_key._listen_fn) + registry._removed_from_collection(event_key, self) + + def clear(self): + """Clear all class level listeners""" + + to_clear = set() + for dispatcher in self._clslevel.values(): + to_clear.update(dispatcher) + dispatcher.clear() + registry._clear(self, to_clear) + + def for_modify(self, obj): + """Return an event collection which can be modified. + + For _ClsLevelDispatch at the class level of + a dispatcher, this returns self. + + """ + return self + + +class _InstanceLevelDispatch(RefCollection): + __slots__ = () + + def _adjust_fn_spec(self, fn, named): + return self.parent._adjust_fn_spec(fn, named) + + +class _EmptyListener(_InstanceLevelDispatch): + """Serves as a proxy interface to the events + served by a _ClsLevelDispatch, when there are no + instance-level events present. + + Is replaced by _ListenerCollection when instance-level + events are added. + + """ + + propagate = frozenset() + listeners = () + + __slots__ = 'parent', 'parent_listeners', 'name' + + def __init__(self, parent, target_cls): + if target_cls not in parent._clslevel: + parent.update_subclass(target_cls) + self.parent = parent # _ClsLevelDispatch + self.parent_listeners = parent._clslevel[target_cls] + self.name = parent.name + + def for_modify(self, obj): + """Return an event collection which can be modified. + + For _EmptyListener at the instance level of + a dispatcher, this generates a new + _ListenerCollection, applies it to the instance, + and returns it. + + """ + result = _ListenerCollection(self.parent, obj._instance_cls) + if getattr(obj, self.name) is self: + setattr(obj, self.name, result) + else: + assert isinstance(getattr(obj, self.name), _JoinedListener) + return result + + def _needs_modify(self, *args, **kw): + raise NotImplementedError("need to call for_modify()") + + exec_once = insert = append = remove = clear = _needs_modify + + def __call__(self, *args, **kw): + """Execute this event.""" + + for fn in self.parent_listeners: + fn(*args, **kw) + + def __len__(self): + return len(self.parent_listeners) + + def __iter__(self): + return iter(self.parent_listeners) + + def __bool__(self): + return bool(self.parent_listeners) + + __nonzero__ = __bool__ + + +class _CompoundListener(_InstanceLevelDispatch): + __slots__ = '_exec_once_mutex', '_exec_once' + + def _memoized_attr__exec_once_mutex(self): + return threading.Lock() + + def exec_once(self, *args, **kw): + """Execute this event, but only if it has not been + executed already for this collection.""" + + if not self._exec_once: + with self._exec_once_mutex: + if not self._exec_once: + try: + self(*args, **kw) + finally: + self._exec_once = True + + def __call__(self, *args, **kw): + """Execute this event.""" + + for fn in self.parent_listeners: + fn(*args, **kw) + for fn in self.listeners: + fn(*args, **kw) + + def __len__(self): + return len(self.parent_listeners) + len(self.listeners) + + def __iter__(self): + return chain(self.parent_listeners, self.listeners) + + def __bool__(self): + return bool(self.listeners or self.parent_listeners) + + __nonzero__ = __bool__ + + +class _ListenerCollection(_CompoundListener): + """Instance-level attributes on instances of :class:`._Dispatch`. + + Represents a collection of listeners. + + As of 0.7.9, _ListenerCollection is only first + created via the _EmptyListener.for_modify() method. + + """ + + __slots__ = ( + 'parent_listeners', 'parent', 'name', 'listeners', + 'propagate', '__weakref__') + + def __init__(self, parent, target_cls): + if target_cls not in parent._clslevel: + parent.update_subclass(target_cls) + self._exec_once = False + self.parent_listeners = parent._clslevel[target_cls] + self.parent = parent + self.name = parent.name + self.listeners = collections.deque() + self.propagate = set() + + def for_modify(self, obj): + """Return an event collection which can be modified. + + For _ListenerCollection at the instance level of + a dispatcher, this returns self. + + """ + return self + + def _update(self, other, only_propagate=True): + """Populate from the listeners in another :class:`_Dispatch` + object.""" + + existing_listeners = self.listeners + existing_listener_set = set(existing_listeners) + self.propagate.update(other.propagate) + other_listeners = [l for l + in other.listeners + if l not in existing_listener_set + and not only_propagate or l in self.propagate + ] + + existing_listeners.extend(other_listeners) + + to_associate = other.propagate.union(other_listeners) + registry._stored_in_collection_multi(self, other, to_associate) + + def insert(self, event_key, propagate): + if event_key.prepend_to_list(self, self.listeners): + if propagate: + self.propagate.add(event_key._listen_fn) + + def append(self, event_key, propagate): + if event_key.append_to_list(self, self.listeners): + if propagate: + self.propagate.add(event_key._listen_fn) + + def remove(self, event_key): + self.listeners.remove(event_key._listen_fn) + self.propagate.discard(event_key._listen_fn) + registry._removed_from_collection(event_key, self) + + def clear(self): + registry._clear(self, self.listeners) + self.propagate.clear() + self.listeners.clear() + + +class _JoinedListener(_CompoundListener): + __slots__ = 'parent', 'name', 'local', 'parent_listeners' + + def __init__(self, parent, name, local): + self._exec_once = False + self.parent = parent + self.name = name + self.local = local + self.parent_listeners = self.local + + @property + def listeners(self): + return getattr(self.parent, self.name) + + def _adjust_fn_spec(self, fn, named): + return self.local._adjust_fn_spec(fn, named) + + def for_modify(self, obj): + self.local = self.parent_listeners = self.local.for_modify(obj) + return self + + def insert(self, event_key, propagate): + self.local.insert(event_key, propagate) + + def append(self, event_key, propagate): + self.local.append(event_key, propagate) + + def remove(self, event_key): + self.local.remove(event_key) + + def clear(self): + raise NotImplementedError() diff --git a/app/lib/sqlalchemy/event/base.py b/app/lib/sqlalchemy/event/base.py new file mode 100644 index 0000000..82ef6a1 --- /dev/null +++ b/app/lib/sqlalchemy/event/base.py @@ -0,0 +1,289 @@ +# event/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Base implementation classes. + +The public-facing ``Events`` serves as the base class for an event interface; +its public attributes represent different kinds of events. These attributes +are mirrored onto a ``_Dispatch`` class, which serves as a container for +collections of listener functions. These collections are represented both +at the class level of a particular ``_Dispatch`` class as well as within +instances of ``_Dispatch``. + +""" +from __future__ import absolute_import + +import weakref + +from .. import util +from .attr import _JoinedListener, \ + _EmptyListener, _ClsLevelDispatch + +_registrars = util.defaultdict(list) + + +def _is_event_name(name): + return not name.startswith('_') and name != 'dispatch' + + +class _UnpickleDispatch(object): + """Serializable callable that re-generates an instance of + :class:`_Dispatch` given a particular :class:`.Events` subclass. + + """ + + def __call__(self, _instance_cls): + for cls in _instance_cls.__mro__: + if 'dispatch' in cls.__dict__: + return cls.__dict__['dispatch'].\ + dispatch_cls._for_class(_instance_cls) + else: + raise AttributeError("No class with a 'dispatch' member present.") + + +class _Dispatch(object): + """Mirror the event listening definitions of an Events class with + listener collections. + + Classes which define a "dispatch" member will return a + non-instantiated :class:`._Dispatch` subclass when the member + is accessed at the class level. When the "dispatch" member is + accessed at the instance level of its owner, an instance + of the :class:`._Dispatch` class is returned. + + A :class:`._Dispatch` class is generated for each :class:`.Events` + class defined, by the :func:`._create_dispatcher_class` function. + The original :class:`.Events` classes remain untouched. + This decouples the construction of :class:`.Events` subclasses from + the implementation used by the event internals, and allows + inspecting tools like Sphinx to work in an unsurprising + way against the public API. + + """ + + # in one ORM edge case, an attribute is added to _Dispatch, + # so __dict__ is used in just that case and potentially others. + __slots__ = '_parent', '_instance_cls', '__dict__', '_empty_listeners' + + _empty_listener_reg = weakref.WeakKeyDictionary() + + def __init__(self, parent, instance_cls=None): + self._parent = parent + self._instance_cls = instance_cls + if instance_cls: + try: + self._empty_listeners = self._empty_listener_reg[instance_cls] + except KeyError: + self._empty_listeners = \ + self._empty_listener_reg[instance_cls] = dict( + (ls.name, _EmptyListener(ls, instance_cls)) + for ls in parent._event_descriptors + ) + else: + self._empty_listeners = {} + + def __getattr__(self, name): + # assign EmptyListeners as attributes on demand + # to reduce startup time for new dispatch objects + try: + ls = self._empty_listeners[name] + except KeyError: + raise AttributeError(name) + else: + setattr(self, ls.name, ls) + return ls + + @property + def _event_descriptors(self): + for k in self._event_names: + yield getattr(self, k) + + def _for_class(self, instance_cls): + return self.__class__(self, instance_cls) + + def _for_instance(self, instance): + instance_cls = instance.__class__ + return self._for_class(instance_cls) + + @property + def _listen(self): + return self._events._listen + + def _join(self, other): + """Create a 'join' of this :class:`._Dispatch` and another. + + This new dispatcher will dispatch events to both + :class:`._Dispatch` objects. + + """ + if '_joined_dispatch_cls' not in self.__class__.__dict__: + cls = type( + "Joined%s" % self.__class__.__name__, + (_JoinedDispatcher, ), {'__slots__': self._event_names} + ) + + self.__class__._joined_dispatch_cls = cls + return self._joined_dispatch_cls(self, other) + + def __reduce__(self): + return _UnpickleDispatch(), (self._instance_cls, ) + + def _update(self, other, only_propagate=True): + """Populate from the listeners in another :class:`_Dispatch` + object.""" + for ls in other._event_descriptors: + if isinstance(ls, _EmptyListener): + continue + getattr(self, ls.name).\ + for_modify(self)._update(ls, only_propagate=only_propagate) + + def _clear(self): + for ls in self._event_descriptors: + ls.for_modify(self).clear() + + +class _EventMeta(type): + """Intercept new Event subclasses and create + associated _Dispatch classes.""" + + def __init__(cls, classname, bases, dict_): + _create_dispatcher_class(cls, classname, bases, dict_) + return type.__init__(cls, classname, bases, dict_) + + +def _create_dispatcher_class(cls, classname, bases, dict_): + """Create a :class:`._Dispatch` class corresponding to an + :class:`.Events` class.""" + + # there's all kinds of ways to do this, + # i.e. make a Dispatch class that shares the '_listen' method + # of the Event class, this is the straight monkeypatch. + if hasattr(cls, 'dispatch'): + dispatch_base = cls.dispatch.__class__ + else: + dispatch_base = _Dispatch + + event_names = [k for k in dict_ if _is_event_name(k)] + dispatch_cls = type("%sDispatch" % classname, + (dispatch_base, ), {'__slots__': event_names}) + + dispatch_cls._event_names = event_names + + dispatch_inst = cls._set_dispatch(cls, dispatch_cls) + for k in dispatch_cls._event_names: + setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k])) + _registrars[k].append(cls) + + for super_ in dispatch_cls.__bases__: + if issubclass(super_, _Dispatch) and super_ is not _Dispatch: + for ls in super_._events.dispatch._event_descriptors: + setattr(dispatch_inst, ls.name, ls) + dispatch_cls._event_names.append(ls.name) + + if getattr(cls, '_dispatch_target', None): + cls._dispatch_target.dispatch = dispatcher(cls) + + +def _remove_dispatcher(cls): + for k in cls.dispatch._event_names: + _registrars[k].remove(cls) + if not _registrars[k]: + del _registrars[k] + + +class Events(util.with_metaclass(_EventMeta, object)): + """Define event listening functions for a particular target type.""" + + @staticmethod + def _set_dispatch(cls, dispatch_cls): + # this allows an Events subclass to define additional utility + # methods made available to the target via + # "self.dispatch._events." + # @staticemethod to allow easy "super" calls while in a metaclass + # constructor. + cls.dispatch = dispatch_cls(None) + dispatch_cls._events = cls + return cls.dispatch + + @classmethod + def _accept_with(cls, target): + # Mapper, ClassManager, Session override this to + # also accept classes, scoped_sessions, sessionmakers, etc. + if hasattr(target, 'dispatch') and ( + + isinstance(target.dispatch, cls.dispatch.__class__) or + + + ( + isinstance(target.dispatch, type) and + isinstance(target.dispatch, cls.dispatch.__class__) + ) or + + ( + isinstance(target.dispatch, _JoinedDispatcher) and + isinstance(target.dispatch.parent, cls.dispatch.__class__) + ) + + + ): + return target + else: + return None + + @classmethod + def _listen(cls, event_key, propagate=False, insert=False, named=False): + event_key.base_listen(propagate=propagate, insert=insert, named=named) + + @classmethod + def _remove(cls, event_key): + event_key.remove() + + @classmethod + def _clear(cls): + cls.dispatch._clear() + + +class _JoinedDispatcher(object): + """Represent a connection between two _Dispatch objects.""" + + __slots__ = 'local', 'parent', '_instance_cls' + + def __init__(self, local, parent): + self.local = local + self.parent = parent + self._instance_cls = self.local._instance_cls + + def __getattr__(self, name): + # assign _JoinedListeners as attributes on demand + # to reduce startup time for new dispatch objects + ls = getattr(self.local, name) + jl = _JoinedListener(self.parent, ls.name, ls) + setattr(self, ls.name, jl) + return jl + + @property + def _listen(self): + return self.parent._listen + + +class dispatcher(object): + """Descriptor used by target classes to + deliver the _Dispatch class at the class level + and produce new _Dispatch instances for target + instances. + + """ + + def __init__(self, events): + self.dispatch_cls = events.dispatch + self.events = events + + def __get__(self, obj, cls): + if obj is None: + return self.dispatch_cls + obj.__dict__['dispatch'] = disp = self.dispatch_cls._for_instance(obj) + return disp diff --git a/app/lib/sqlalchemy/event/legacy.py b/app/lib/sqlalchemy/event/legacy.py new file mode 100644 index 0000000..1063606 --- /dev/null +++ b/app/lib/sqlalchemy/event/legacy.py @@ -0,0 +1,169 @@ +# event/legacy.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Routines to handle adaption of legacy call signatures, +generation of deprecation notes and docstrings. + +""" + +from .. import util + + +def _legacy_signature(since, argnames, converter=None): + def leg(fn): + if not hasattr(fn, '_legacy_signatures'): + fn._legacy_signatures = [] + fn._legacy_signatures.append((since, argnames, converter)) + return fn + return leg + + +def _wrap_fn_for_legacy(dispatch_collection, fn, argspec): + for since, argnames, conv in dispatch_collection.legacy_signatures: + if argnames[-1] == "**kw": + has_kw = True + argnames = argnames[0:-1] + else: + has_kw = False + + if len(argnames) == len(argspec.args) \ + and has_kw is bool(argspec.keywords): + + if conv: + assert not has_kw + + def wrap_leg(*args): + return fn(*conv(*args)) + else: + def wrap_leg(*args, **kw): + argdict = dict(zip(dispatch_collection.arg_names, args)) + args = [argdict[name] for name in argnames] + if has_kw: + return fn(*args, **kw) + else: + return fn(*args) + return wrap_leg + else: + return fn + + +def _indent(text, indent): + return "\n".join( + indent + line + for line in text.split("\n") + ) + + +def _standard_listen_example(dispatch_collection, sample_target, fn): + example_kw_arg = _indent( + "\n".join( + "%(arg)s = kw['%(arg)s']" % {"arg": arg} + for arg in dispatch_collection.arg_names[0:2] + ), + " ") + if dispatch_collection.legacy_signatures: + current_since = max(since for since, args, conv + in dispatch_collection.legacy_signatures) + else: + current_since = None + text = ( + "from sqlalchemy import event\n\n" + "# standard decorator style%(current_since)s\n" + "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" + "def receive_%(event_name)s(" + "%(named_event_arguments)s%(has_kw_arguments)s):\n" + " \"listen for the '%(event_name)s' event\"\n" + "\n # ... (event handling logic) ...\n" + ) + + if len(dispatch_collection.arg_names) > 3: + text += ( + + "\n# named argument style (new in 0.9)\n" + "@event.listens_for(" + "%(sample_target)s, '%(event_name)s', named=True)\n" + "def receive_%(event_name)s(**kw):\n" + " \"listen for the '%(event_name)s' event\"\n" + "%(example_kw_arg)s\n" + "\n # ... (event handling logic) ...\n" + ) + + text %= { + "current_since": " (arguments as of %s)" % + current_since if current_since else "", + "event_name": fn.__name__, + "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "", + "named_event_arguments": ", ".join(dispatch_collection.arg_names), + "example_kw_arg": example_kw_arg, + "sample_target": sample_target + } + return text + + +def _legacy_listen_examples(dispatch_collection, sample_target, fn): + text = "" + for since, args, conv in dispatch_collection.legacy_signatures: + text += ( + "\n# legacy calling style (pre-%(since)s)\n" + "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" + "def receive_%(event_name)s(" + "%(named_event_arguments)s%(has_kw_arguments)s):\n" + " \"listen for the '%(event_name)s' event\"\n" + "\n # ... (event handling logic) ...\n" % { + "since": since, + "event_name": fn.__name__, + "has_kw_arguments": " **kw" + if dispatch_collection.has_kw else "", + "named_event_arguments": ", ".join(args), + "sample_target": sample_target + } + ) + return text + + +def _version_signature_changes(dispatch_collection): + since, args, conv = dispatch_collection.legacy_signatures[0] + return ( + "\n.. versionchanged:: %(since)s\n" + " The ``%(event_name)s`` event now accepts the \n" + " arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n" + " Listener functions which accept the previous argument \n" + " signature(s) listed above will be automatically \n" + " adapted to the new signature." % { + "since": since, + "event_name": dispatch_collection.name, + "named_event_arguments": ", ".join(dispatch_collection.arg_names), + "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "" + } + ) + + +def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn): + header = ".. container:: event_signatures\n\n"\ + " Example argument forms::\n"\ + "\n" + + sample_target = getattr(parent_dispatch_cls, "_target_class_doc", "obj") + text = ( + header + + _indent( + _standard_listen_example( + dispatch_collection, sample_target, fn), + " " * 8) + ) + if dispatch_collection.legacy_signatures: + text += _indent( + _legacy_listen_examples( + dispatch_collection, sample_target, fn), + " " * 8) + + text += _version_signature_changes(dispatch_collection) + + return util.inject_docstring_text(fn.__doc__, + text, + 1 + ) diff --git a/app/lib/sqlalchemy/event/registry.py b/app/lib/sqlalchemy/event/registry.py new file mode 100644 index 0000000..acccadf --- /dev/null +++ b/app/lib/sqlalchemy/event/registry.py @@ -0,0 +1,262 @@ +# event/registry.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Provides managed registration services on behalf of :func:`.listen` +arguments. + +By "managed registration", we mean that event listening functions and +other objects can be added to various collections in such a way that their +membership in all those collections can be revoked at once, based on +an equivalent :class:`._EventKey`. + +""" + +from __future__ import absolute_import + +import weakref +import collections +import types +from .. import exc, util + + +_key_to_collection = collections.defaultdict(dict) +""" +Given an original listen() argument, can locate all +listener collections and the listener fn contained + +(target, identifier, fn) -> { + ref(listenercollection) -> ref(listener_fn) + ref(listenercollection) -> ref(listener_fn) + ref(listenercollection) -> ref(listener_fn) + } +""" + +_collection_to_key = collections.defaultdict(dict) +""" +Given a _ListenerCollection or _ClsLevelListener, can locate +all the original listen() arguments and the listener fn contained + +ref(listenercollection) -> { + ref(listener_fn) -> (target, identifier, fn), + ref(listener_fn) -> (target, identifier, fn), + ref(listener_fn) -> (target, identifier, fn), + } +""" + + +def _collection_gced(ref): + # defaultdict, so can't get a KeyError + if not _collection_to_key or ref not in _collection_to_key: + return + listener_to_key = _collection_to_key.pop(ref) + for key in listener_to_key.values(): + if key in _key_to_collection: + # defaultdict, so can't get a KeyError + dispatch_reg = _key_to_collection[key] + dispatch_reg.pop(ref) + if not dispatch_reg: + _key_to_collection.pop(key) + + +def _stored_in_collection(event_key, owner): + key = event_key._key + + dispatch_reg = _key_to_collection[key] + + owner_ref = owner.ref + listen_ref = weakref.ref(event_key._listen_fn) + + if owner_ref in dispatch_reg: + return False + + dispatch_reg[owner_ref] = listen_ref + + listener_to_key = _collection_to_key[owner_ref] + listener_to_key[listen_ref] = key + + return True + + +def _removed_from_collection(event_key, owner): + key = event_key._key + + dispatch_reg = _key_to_collection[key] + + listen_ref = weakref.ref(event_key._listen_fn) + + owner_ref = owner.ref + dispatch_reg.pop(owner_ref, None) + if not dispatch_reg: + del _key_to_collection[key] + + if owner_ref in _collection_to_key: + listener_to_key = _collection_to_key[owner_ref] + listener_to_key.pop(listen_ref) + + +def _stored_in_collection_multi(newowner, oldowner, elements): + if not elements: + return + + oldowner = oldowner.ref + newowner = newowner.ref + + old_listener_to_key = _collection_to_key[oldowner] + new_listener_to_key = _collection_to_key[newowner] + + for listen_fn in elements: + listen_ref = weakref.ref(listen_fn) + key = old_listener_to_key[listen_ref] + dispatch_reg = _key_to_collection[key] + if newowner in dispatch_reg: + assert dispatch_reg[newowner] == listen_ref + else: + dispatch_reg[newowner] = listen_ref + + new_listener_to_key[listen_ref] = key + + +def _clear(owner, elements): + if not elements: + return + + owner = owner.ref + listener_to_key = _collection_to_key[owner] + for listen_fn in elements: + listen_ref = weakref.ref(listen_fn) + key = listener_to_key[listen_ref] + dispatch_reg = _key_to_collection[key] + dispatch_reg.pop(owner, None) + + if not dispatch_reg: + del _key_to_collection[key] + + +class _EventKey(object): + """Represent :func:`.listen` arguments. + """ + + __slots__ = ( + 'target', 'identifier', 'fn', 'fn_key', 'fn_wrap', 'dispatch_target' + ) + + def __init__(self, target, identifier, + fn, dispatch_target, _fn_wrap=None): + self.target = target + self.identifier = identifier + self.fn = fn + if isinstance(fn, types.MethodType): + self.fn_key = id(fn.__func__), id(fn.__self__) + else: + self.fn_key = id(fn) + self.fn_wrap = _fn_wrap + self.dispatch_target = dispatch_target + + @property + def _key(self): + return (id(self.target), self.identifier, self.fn_key) + + def with_wrapper(self, fn_wrap): + if fn_wrap is self._listen_fn: + return self + else: + return _EventKey( + self.target, + self.identifier, + self.fn, + self.dispatch_target, + _fn_wrap=fn_wrap + ) + + def with_dispatch_target(self, dispatch_target): + if dispatch_target is self.dispatch_target: + return self + else: + return _EventKey( + self.target, + self.identifier, + self.fn, + dispatch_target, + _fn_wrap=self.fn_wrap + ) + + def listen(self, *args, **kw): + once = kw.pop("once", False) + named = kw.pop("named", False) + + target, identifier, fn = \ + self.dispatch_target, self.identifier, self._listen_fn + + dispatch_collection = getattr(target.dispatch, identifier) + + adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named) + + self = self.with_wrapper(adjusted_fn) + + if once: + self.with_wrapper( + util.only_once(self._listen_fn)).listen(*args, **kw) + else: + self.dispatch_target.dispatch._listen(self, *args, **kw) + + def remove(self): + key = self._key + + if key not in _key_to_collection: + raise exc.InvalidRequestError( + "No listeners found for event %s / %r / %s " % + (self.target, self.identifier, self.fn) + ) + dispatch_reg = _key_to_collection.pop(key) + + for collection_ref, listener_ref in dispatch_reg.items(): + collection = collection_ref() + listener_fn = listener_ref() + if collection is not None and listener_fn is not None: + collection.remove(self.with_wrapper(listener_fn)) + + def contains(self): + """Return True if this event key is registered to listen. + """ + return self._key in _key_to_collection + + def base_listen(self, propagate=False, insert=False, + named=False): + + target, identifier, fn = \ + self.dispatch_target, self.identifier, self._listen_fn + + dispatch_collection = getattr(target.dispatch, identifier) + + if insert: + dispatch_collection.\ + for_modify(target.dispatch).insert(self, propagate) + else: + dispatch_collection.\ + for_modify(target.dispatch).append(self, propagate) + + @property + def _listen_fn(self): + return self.fn_wrap or self.fn + + def append_to_list(self, owner, list_): + if _stored_in_collection(self, owner): + list_.append(self._listen_fn) + return True + else: + return False + + def remove_from_list(self, owner, list_): + _removed_from_collection(self, owner) + list_.remove(self._listen_fn) + + def prepend_to_list(self, owner, list_): + if _stored_in_collection(self, owner): + list_.appendleft(self._listen_fn) + return True + else: + return False diff --git a/app/lib/sqlalchemy/events.py b/app/lib/sqlalchemy/events.py new file mode 100644 index 0000000..7aa3001 --- /dev/null +++ b/app/lib/sqlalchemy/events.py @@ -0,0 +1,1173 @@ +# sqlalchemy/events.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Core event interfaces.""" + +from . import event, exc +from .pool import Pool +from .engine import Connectable, Engine, Dialect +from .sql.base import SchemaEventTarget + + +class DDLEvents(event.Events): + """ + Define event listeners for schema objects, + that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget` + subclasses, including :class:`.MetaData`, :class:`.Table`, + :class:`.Column`. + + :class:`.MetaData` and :class:`.Table` support events + specifically regarding when CREATE and DROP + DDL is emitted to the database. + + Attachment events are also provided to customize + behavior whenever a child schema element is associated + with a parent, such as, when a :class:`.Column` is associated + with its :class:`.Table`, when a :class:`.ForeignKeyConstraint` + is associated with a :class:`.Table`, etc. + + Example using the ``after_create`` event:: + + from sqlalchemy import event + from sqlalchemy import Table, Column, Metadata, Integer + + m = MetaData() + some_table = Table('some_table', m, Column('data', Integer)) + + def after_create(target, connection, **kw): + connection.execute("ALTER TABLE %s SET name=foo_%s" % + (target.name, target.name)) + + event.listen(some_table, "after_create", after_create) + + DDL events integrate closely with the + :class:`.DDL` class and the :class:`.DDLElement` hierarchy + of DDL clause constructs, which are themselves appropriate + as listener callables:: + + from sqlalchemy import DDL + event.listen( + some_table, + "after_create", + DDL("ALTER TABLE %(table)s SET name=foo_%(table)s") + ) + + The methods here define the name of an event as well + as the names of members that are passed to listener + functions. + + See also: + + :ref:`event_toplevel` + + :class:`.DDLElement` + + :class:`.DDL` + + :ref:`schema_ddl_sequences` + + """ + + _target_class_doc = "SomeSchemaClassOrObject" + _dispatch_target = SchemaEventTarget + + def before_create(self, target, connection, **kw): + r"""Called before CREATE statements are emitted. + + :param target: the :class:`.MetaData` or :class:`.Table` + object which is the target of the event. + :param connection: the :class:`.Connection` where the + CREATE statement or statements will be emitted. + :param \**kw: additional keyword arguments relevant + to the event. The contents of this dictionary + may vary across releases, and include the + list of tables being generated for a metadata-level + event, the checkfirst flag, and other + elements used by internal events. + + """ + + def after_create(self, target, connection, **kw): + r"""Called after CREATE statements are emitted. + + :param target: the :class:`.MetaData` or :class:`.Table` + object which is the target of the event. + :param connection: the :class:`.Connection` where the + CREATE statement or statements have been emitted. + :param \**kw: additional keyword arguments relevant + to the event. The contents of this dictionary + may vary across releases, and include the + list of tables being generated for a metadata-level + event, the checkfirst flag, and other + elements used by internal events. + + """ + + def before_drop(self, target, connection, **kw): + r"""Called before DROP statements are emitted. + + :param target: the :class:`.MetaData` or :class:`.Table` + object which is the target of the event. + :param connection: the :class:`.Connection` where the + DROP statement or statements will be emitted. + :param \**kw: additional keyword arguments relevant + to the event. The contents of this dictionary + may vary across releases, and include the + list of tables being generated for a metadata-level + event, the checkfirst flag, and other + elements used by internal events. + + """ + + def after_drop(self, target, connection, **kw): + r"""Called after DROP statements are emitted. + + :param target: the :class:`.MetaData` or :class:`.Table` + object which is the target of the event. + :param connection: the :class:`.Connection` where the + DROP statement or statements have been emitted. + :param \**kw: additional keyword arguments relevant + to the event. The contents of this dictionary + may vary across releases, and include the + list of tables being generated for a metadata-level + event, the checkfirst flag, and other + elements used by internal events. + + """ + + def before_parent_attach(self, target, parent): + """Called before a :class:`.SchemaItem` is associated with + a parent :class:`.SchemaItem`. + + :param target: the target object + :param parent: the parent to which the target is being attached. + + :func:`.event.listen` also accepts a modifier for this event: + + :param propagate=False: When True, the listener function will + be established for any copies made of the target object, + i.e. those copies that are generated when + :meth:`.Table.tometadata` is used. + + """ + + def after_parent_attach(self, target, parent): + """Called after a :class:`.SchemaItem` is associated with + a parent :class:`.SchemaItem`. + + :param target: the target object + :param parent: the parent to which the target is being attached. + + :func:`.event.listen` also accepts a modifier for this event: + + :param propagate=False: When True, the listener function will + be established for any copies made of the target object, + i.e. those copies that are generated when + :meth:`.Table.tometadata` is used. + + """ + + def column_reflect(self, inspector, table, column_info): + """Called for each unit of 'column info' retrieved when + a :class:`.Table` is being reflected. + + The dictionary of column information as returned by the + dialect is passed, and can be modified. The dictionary + is that returned in each element of the list returned + by :meth:`.reflection.Inspector.get_columns`: + + * ``name`` - the column's name + + * ``type`` - the type of this column, which should be an instance + of :class:`~sqlalchemy.types.TypeEngine` + + * ``nullable`` - boolean flag if the column is NULL or NOT NULL + + * ``default`` - the column's server default value. This is + normally specified as a plain string SQL expression, however the + event can pass a :class:`.FetchedValue`, :class:`.DefaultClause`, + or :func:`.sql.expression.text` object as well. + + .. versionchanged:: 1.1.6 + + The :meth:`.DDLEvents.column_reflect` event allows a non + string :class:`.FetchedValue`, + :func:`.sql.expression.text`, or derived object to be + specified as the value of ``default`` in the column + dictionary. + + * ``attrs`` - dict containing optional column attributes + + The event is called before any action is taken against + this dictionary, and the contents can be modified. + The :class:`.Column` specific arguments ``info``, ``key``, + and ``quote`` can also be added to the dictionary and + will be passed to the constructor of :class:`.Column`. + + Note that this event is only meaningful if either + associated with the :class:`.Table` class across the + board, e.g.:: + + from sqlalchemy.schema import Table + from sqlalchemy import event + + def listen_for_reflect(inspector, table, column_info): + "receive a column_reflect event" + # ... + + event.listen( + Table, + 'column_reflect', + listen_for_reflect) + + ...or with a specific :class:`.Table` instance using + the ``listeners`` argument:: + + def listen_for_reflect(inspector, table, column_info): + "receive a column_reflect event" + # ... + + t = Table( + 'sometable', + autoload=True, + listeners=[ + ('column_reflect', listen_for_reflect) + ]) + + This because the reflection process initiated by ``autoload=True`` + completes within the scope of the constructor for :class:`.Table`. + + """ + + +class PoolEvents(event.Events): + """Available events for :class:`.Pool`. + + The methods here define the name of an event as well + as the names of members that are passed to listener + functions. + + e.g.:: + + from sqlalchemy import event + + def my_on_checkout(dbapi_conn, connection_rec, connection_proxy): + "handle an on checkout event" + + event.listen(Pool, 'checkout', my_on_checkout) + + In addition to accepting the :class:`.Pool` class and + :class:`.Pool` instances, :class:`.PoolEvents` also accepts + :class:`.Engine` objects and the :class:`.Engine` class as + targets, which will be resolved to the ``.pool`` attribute of the + given engine or the :class:`.Pool` class:: + + engine = create_engine("postgresql://scott:tiger@localhost/test") + + # will associate with engine.pool + event.listen(engine, 'checkout', my_on_checkout) + + """ + + _target_class_doc = "SomeEngineOrPool" + _dispatch_target = Pool + + @classmethod + def _accept_with(cls, target): + if isinstance(target, type): + if issubclass(target, Engine): + return Pool + elif issubclass(target, Pool): + return target + elif isinstance(target, Engine): + return target.pool + else: + return target + + def connect(self, dbapi_connection, connection_record): + """Called at the moment a particular DBAPI connection is first + created for a given :class:`.Pool`. + + This event allows one to capture the point directly after which + the DBAPI module-level ``.connect()`` method has been used in order + to produce a new DBAPI connection. + + :param dbapi_connection: a DBAPI connection. + + :param connection_record: the :class:`._ConnectionRecord` managing the + DBAPI connection. + + """ + + def first_connect(self, dbapi_connection, connection_record): + """Called exactly once for the first time a DBAPI connection is + checked out from a particular :class:`.Pool`. + + The rationale for :meth:`.PoolEvents.first_connect` is to determine + information about a particular series of database connections based + on the settings used for all connections. Since a particular + :class:`.Pool` refers to a single "creator" function (which in terms + of a :class:`.Engine` refers to the URL and connection options used), + it is typically valid to make observations about a single connection + that can be safely assumed to be valid about all subsequent + connections, such as the database version, the server and client + encoding settings, collation settings, and many others. + + :param dbapi_connection: a DBAPI connection. + + :param connection_record: the :class:`._ConnectionRecord` managing the + DBAPI connection. + + """ + + def checkout(self, dbapi_connection, connection_record, connection_proxy): + """Called when a connection is retrieved from the Pool. + + :param dbapi_connection: a DBAPI connection. + + :param connection_record: the :class:`._ConnectionRecord` managing the + DBAPI connection. + + :param connection_proxy: the :class:`._ConnectionFairy` object which + will proxy the public interface of the DBAPI connection for the + lifespan of the checkout. + + If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current + connection will be disposed and a fresh connection retrieved. + Processing of all checkout listeners will abort and restart + using the new connection. + + .. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event + which occurs upon creation of a new :class:`.Connection`. + + """ + + def checkin(self, dbapi_connection, connection_record): + """Called when a connection returns to the pool. + + Note that the connection may be closed, and may be None if the + connection has been invalidated. ``checkin`` will not be called + for detached connections. (They do not return to the pool.) + + :param dbapi_connection: a DBAPI connection. + + :param connection_record: the :class:`._ConnectionRecord` managing the + DBAPI connection. + + """ + + def reset(self, dbapi_connection, connection_record): + """Called before the "reset" action occurs for a pooled connection. + + This event represents + when the ``rollback()`` method is called on the DBAPI connection + before it is returned to the pool. The behavior of "reset" can + be controlled, including disabled, using the ``reset_on_return`` + pool argument. + + + The :meth:`.PoolEvents.reset` event is usually followed by the + :meth:`.PoolEvents.checkin` event is called, except in those + cases where the connection is discarded immediately after reset. + + :param dbapi_connection: a DBAPI connection. + + :param connection_record: the :class:`._ConnectionRecord` managing the + DBAPI connection. + + .. versionadded:: 0.8 + + .. seealso:: + + :meth:`.ConnectionEvents.rollback` + + :meth:`.ConnectionEvents.commit` + + """ + + def invalidate(self, dbapi_connection, connection_record, exception): + """Called when a DBAPI connection is to be "invalidated". + + This event is called any time the :meth:`._ConnectionRecord.invalidate` + method is invoked, either from API usage or via "auto-invalidation", + without the ``soft`` flag. + + The event occurs before a final attempt to call ``.close()`` on the + connection occurs. + + :param dbapi_connection: a DBAPI connection. + + :param connection_record: the :class:`._ConnectionRecord` managing the + DBAPI connection. + + :param exception: the exception object corresponding to the reason + for this invalidation, if any. May be ``None``. + + .. versionadded:: 0.9.2 Added support for connection invalidation + listening. + + .. seealso:: + + :ref:`pool_connection_invalidation` + + """ + + def soft_invalidate(self, dbapi_connection, connection_record, exception): + """Called when a DBAPI connection is to be "soft invalidated". + + This event is called any time the :meth:`._ConnectionRecord.invalidate` + method is invoked with the ``soft`` flag. + + Soft invalidation refers to when the connection record that tracks + this connection will force a reconnect after the current connection + is checked in. It does not actively close the dbapi_connection + at the point at which it is called. + + .. versionadded:: 1.0.3 + + """ + + def close(self, dbapi_connection, connection_record): + """Called when a DBAPI connection is closed. + + The event is emitted before the close occurs. + + The close of a connection can fail; typically this is because + the connection is already closed. If the close operation fails, + the connection is discarded. + + The :meth:`.close` event corresponds to a connection that's still + associated with the pool. To intercept close events for detached + connections use :meth:`.close_detached`. + + .. versionadded:: 1.1 + + """ + + def detach(self, dbapi_connection, connection_record): + """Called when a DBAPI connection is "detached" from a pool. + + This event is emitted after the detach occurs. The connection + is no longer associated with the given connection record. + + .. versionadded:: 1.1 + + """ + + def close_detached(self, dbapi_connection): + """Called when a detached DBAPI connection is closed. + + The event is emitted before the close occurs. + + The close of a connection can fail; typically this is because + the connection is already closed. If the close operation fails, + the connection is discarded. + + .. versionadded:: 1.1 + + """ + + +class ConnectionEvents(event.Events): + """Available events for :class:`.Connectable`, which includes + :class:`.Connection` and :class:`.Engine`. + + The methods here define the name of an event as well as the names of + members that are passed to listener functions. + + An event listener can be associated with any :class:`.Connectable` + class or instance, such as an :class:`.Engine`, e.g.:: + + from sqlalchemy import event, create_engine + + def before_cursor_execute(conn, cursor, statement, parameters, context, + executemany): + log.info("Received statement: %s", statement) + + engine = create_engine('postgresql://scott:tiger@localhost/test') + event.listen(engine, "before_cursor_execute", before_cursor_execute) + + or with a specific :class:`.Connection`:: + + with engine.begin() as conn: + @event.listens_for(conn, 'before_cursor_execute') + def before_cursor_execute(conn, cursor, statement, parameters, + context, executemany): + log.info("Received statement: %s", statement) + + When the methods are called with a `statement` parameter, such as in + :meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and + :meth:`.dbapi_error`, the statement is the exact SQL string that was + prepared for transmission to the DBAPI ``cursor`` in the connection's + :class:`.Dialect`. + + The :meth:`.before_execute` and :meth:`.before_cursor_execute` + events can also be established with the ``retval=True`` flag, which + allows modification of the statement and parameters to be sent + to the database. The :meth:`.before_cursor_execute` event is + particularly useful here to add ad-hoc string transformations, such + as comments, to all executions:: + + from sqlalchemy.engine import Engine + from sqlalchemy import event + + @event.listens_for(Engine, "before_cursor_execute", retval=True) + def comment_sql_calls(conn, cursor, statement, parameters, + context, executemany): + statement = statement + " -- some comment" + return statement, parameters + + .. note:: :class:`.ConnectionEvents` can be established on any + combination of :class:`.Engine`, :class:`.Connection`, as well + as instances of each of those classes. Events across all + four scopes will fire off for a given instance of + :class:`.Connection`. However, for performance reasons, the + :class:`.Connection` object determines at instantiation time + whether or not its parent :class:`.Engine` has event listeners + established. Event listeners added to the :class:`.Engine` + class or to an instance of :class:`.Engine` *after* the instantiation + of a dependent :class:`.Connection` instance will usually + *not* be available on that :class:`.Connection` instance. The newly + added listeners will instead take effect for :class:`.Connection` + instances created subsequent to those event listeners being + established on the parent :class:`.Engine` class or instance. + + :param retval=False: Applies to the :meth:`.before_execute` and + :meth:`.before_cursor_execute` events only. When True, the + user-defined event function must have a return value, which + is a tuple of parameters that replace the given statement + and parameters. See those methods for a description of + specific return arguments. + + .. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated + with any :class:`.Connectable` including :class:`.Connection`, + in addition to the existing support for :class:`.Engine`. + + """ + + _target_class_doc = "SomeEngine" + _dispatch_target = Connectable + + @classmethod + def _listen(cls, event_key, retval=False): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn + + target._has_events = True + + if not retval: + if identifier == 'before_execute': + orig_fn = fn + + def wrap_before_execute(conn, clauseelement, + multiparams, params): + orig_fn(conn, clauseelement, multiparams, params) + return clauseelement, multiparams, params + fn = wrap_before_execute + elif identifier == 'before_cursor_execute': + orig_fn = fn + + def wrap_before_cursor_execute(conn, cursor, statement, + parameters, context, + executemany): + orig_fn(conn, cursor, statement, + parameters, context, executemany) + return statement, parameters + fn = wrap_before_cursor_execute + elif retval and \ + identifier not in ('before_execute', + 'before_cursor_execute', 'handle_error'): + raise exc.ArgumentError( + "Only the 'before_execute', " + "'before_cursor_execute' and 'handle_error' engine " + "event listeners accept the 'retval=True' " + "argument.") + event_key.with_wrapper(fn).base_listen() + + def before_execute(self, conn, clauseelement, multiparams, params): + """Intercept high level execute() events, receiving uncompiled + SQL constructs and other objects prior to rendering into SQL. + + This event is good for debugging SQL compilation issues as well + as early manipulation of the parameters being sent to the database, + as the parameter lists will be in a consistent format here. + + This event can be optionally established with the ``retval=True`` + flag. The ``clauseelement``, ``multiparams``, and ``params`` + arguments should be returned as a three-tuple in this case:: + + @event.listens_for(Engine, "before_execute", retval=True) + def before_execute(conn, conn, clauseelement, multiparams, params): + # do something with clauseelement, multiparams, params + return clauseelement, multiparams, params + + :param conn: :class:`.Connection` object + :param clauseelement: SQL expression construct, :class:`.Compiled` + instance, or string statement passed to :meth:`.Connection.execute`. + :param multiparams: Multiple parameter sets, a list of dictionaries. + :param params: Single parameter set, a single dictionary. + + See also: + + :meth:`.before_cursor_execute` + + """ + + def after_execute(self, conn, clauseelement, multiparams, params, result): + """Intercept high level execute() events after execute. + + + :param conn: :class:`.Connection` object + :param clauseelement: SQL expression construct, :class:`.Compiled` + instance, or string statement passed to :meth:`.Connection.execute`. + :param multiparams: Multiple parameter sets, a list of dictionaries. + :param params: Single parameter set, a single dictionary. + :param result: :class:`.ResultProxy` generated by the execution. + + """ + + def before_cursor_execute(self, conn, cursor, statement, + parameters, context, executemany): + """Intercept low-level cursor execute() events before execution, + receiving the string SQL statement and DBAPI-specific parameter list to + be invoked against a cursor. + + This event is a good choice for logging as well as late modifications + to the SQL string. It's less ideal for parameter modifications except + for those which are specific to a target backend. + + This event can be optionally established with the ``retval=True`` + flag. The ``statement`` and ``parameters`` arguments should be + returned as a two-tuple in this case:: + + @event.listens_for(Engine, "before_cursor_execute", retval=True) + def before_cursor_execute(conn, cursor, statement, + parameters, context, executemany): + # do something with statement, parameters + return statement, parameters + + See the example at :class:`.ConnectionEvents`. + + :param conn: :class:`.Connection` object + :param cursor: DBAPI cursor object + :param statement: string SQL statement, as to be passed to the DBAPI + :param parameters: Dictionary, tuple, or list of parameters being + passed to the ``execute()`` or ``executemany()`` method of the + DBAPI ``cursor``. In some cases may be ``None``. + :param context: :class:`.ExecutionContext` object in use. May + be ``None``. + :param executemany: boolean, if ``True``, this is an ``executemany()`` + call, if ``False``, this is an ``execute()`` call. + + See also: + + :meth:`.before_execute` + + :meth:`.after_cursor_execute` + + """ + + def after_cursor_execute(self, conn, cursor, statement, + parameters, context, executemany): + """Intercept low-level cursor execute() events after execution. + + :param conn: :class:`.Connection` object + :param cursor: DBAPI cursor object. Will have results pending + if the statement was a SELECT, but these should not be consumed + as they will be needed by the :class:`.ResultProxy`. + :param statement: string SQL statement, as passed to the DBAPI + :param parameters: Dictionary, tuple, or list of parameters being + passed to the ``execute()`` or ``executemany()`` method of the + DBAPI ``cursor``. In some cases may be ``None``. + :param context: :class:`.ExecutionContext` object in use. May + be ``None``. + :param executemany: boolean, if ``True``, this is an ``executemany()`` + call, if ``False``, this is an ``execute()`` call. + + """ + + def dbapi_error(self, conn, cursor, statement, parameters, + context, exception): + """Intercept a raw DBAPI error. + + This event is called with the DBAPI exception instance + received from the DBAPI itself, *before* SQLAlchemy wraps the + exception with it's own exception wrappers, and before any + other operations are performed on the DBAPI cursor; the + existing transaction remains in effect as well as any state + on the cursor. + + The use case here is to inject low-level exception handling + into an :class:`.Engine`, typically for logging and + debugging purposes. + + .. warning:: + + Code should **not** modify + any state or throw any exceptions here as this will + interfere with SQLAlchemy's cleanup and error handling + routines. For exception modification, please refer to the + new :meth:`.ConnectionEvents.handle_error` event. + + Subsequent to this hook, SQLAlchemy may attempt any + number of operations on the connection/cursor, including + closing the cursor, rolling back of the transaction in the + case of connectionless execution, and disposing of the entire + connection pool if a "disconnect" was detected. The + exception is then wrapped in a SQLAlchemy DBAPI exception + wrapper and re-thrown. + + :param conn: :class:`.Connection` object + :param cursor: DBAPI cursor object + :param statement: string SQL statement, as passed to the DBAPI + :param parameters: Dictionary, tuple, or list of parameters being + passed to the ``execute()`` or ``executemany()`` method of the + DBAPI ``cursor``. In some cases may be ``None``. + :param context: :class:`.ExecutionContext` object in use. May + be ``None``. + :param exception: The **unwrapped** exception emitted directly from the + DBAPI. The class here is specific to the DBAPI module in use. + + .. deprecated:: 0.9.7 - replaced by + :meth:`.ConnectionEvents.handle_error` + + """ + + def handle_error(self, exception_context): + r"""Intercept all exceptions processed by the :class:`.Connection`. + + This includes all exceptions emitted by the DBAPI as well as + within SQLAlchemy's statement invocation process, including + encoding errors and other statement validation errors. Other areas + in which the event is invoked include transaction begin and end, + result row fetching, cursor creation. + + Note that :meth:`.handle_error` may support new kinds of exceptions + and new calling scenarios at *any time*. Code which uses this + event must expect new calling patterns to be present in minor + releases. + + To support the wide variety of members that correspond to an exception, + as well as to allow extensibility of the event without backwards + incompatibility, the sole argument received is an instance of + :class:`.ExceptionContext`. This object contains data members + representing detail about the exception. + + Use cases supported by this hook include: + + * read-only, low-level exception handling for logging and + debugging purposes + * exception re-writing + * Establishing or disabling whether a connection or the owning + connection pool is invalidated or expired in response to a + specific exception. + + The hook is called while the cursor from the failed operation + (if any) is still open and accessible. Special cleanup operations + can be called on this cursor; SQLAlchemy will attempt to close + this cursor subsequent to this hook being invoked. If the connection + is in "autocommit" mode, the transaction also remains open within + the scope of this hook; the rollback of the per-statement transaction + also occurs after the hook is called. + + The user-defined event handler has two options for replacing + the SQLAlchemy-constructed exception into one that is user + defined. It can either raise this new exception directly, in + which case all further event listeners are bypassed and the + exception will be raised, after appropriate cleanup as taken + place:: + + @event.listens_for(Engine, "handle_error") + def handle_exception(context): + if isinstance(context.original_exception, + psycopg2.OperationalError) and \ + "failed" in str(context.original_exception): + raise MySpecialException("failed operation") + + .. warning:: Because the :meth:`.ConnectionEvents.handle_error` + event specifically provides for exceptions to be re-thrown as + the ultimate exception raised by the failed statement, + **stack traces will be misleading** if the user-defined event + handler itself fails and throws an unexpected exception; + the stack trace may not illustrate the actual code line that + failed! It is advised to code carefully here and use + logging and/or inline debugging if unexpected exceptions are + occurring. + + Alternatively, a "chained" style of event handling can be + used, by configuring the handler with the ``retval=True`` + modifier and returning the new exception instance from the + function. In this case, event handling will continue onto the + next handler. The "chained" exception is available using + :attr:`.ExceptionContext.chained_exception`:: + + @event.listens_for(Engine, "handle_error", retval=True) + def handle_exception(context): + if context.chained_exception is not None and \ + "special" in context.chained_exception.message: + return MySpecialException("failed", + cause=context.chained_exception) + + Handlers that return ``None`` may remain within this chain; the + last non-``None`` return value is the one that continues to be + passed to the next handler. + + When a custom exception is raised or returned, SQLAlchemy raises + this new exception as-is, it is not wrapped by any SQLAlchemy + object. If the exception is not a subclass of + :class:`sqlalchemy.exc.StatementError`, + certain features may not be available; currently this includes + the ORM's feature of adding a detail hint about "autoflush" to + exceptions raised within the autoflush process. + + :param context: an :class:`.ExceptionContext` object. See this + class for details on all available members. + + .. versionadded:: 0.9.7 Added the + :meth:`.ConnectionEvents.handle_error` hook. + + .. versionchanged:: 1.1 The :meth:`.handle_error` event will now + receive all exceptions that inherit from ``BaseException``, including + ``SystemExit`` and ``KeyboardInterrupt``. The setting for + :attr:`.ExceptionContext.is_disconnect` is ``True`` in this case + and the default for :attr:`.ExceptionContext.invalidate_pool_on_disconnect` + is ``False``. + + .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now + invoked when an :class:`.Engine` fails during the initial + call to :meth:`.Engine.connect`, as well as when a + :class:`.Connection` object encounters an error during a + reconnect operation. + + .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is + not fired off when a dialect makes use of the + ``skip_user_error_events`` execution option. This is used + by dialects which intend to catch SQLAlchemy-specific exceptions + within specific operations, such as when the MySQL dialect detects + a table not present within the ``has_table()`` dialect method. + Prior to 1.0.0, code which implements :meth:`.handle_error` needs + to ensure that exceptions thrown in these scenarios are re-raised + without modification. + + """ + + def engine_connect(self, conn, branch): + """Intercept the creation of a new :class:`.Connection`. + + This event is called typically as the direct result of calling + the :meth:`.Engine.connect` method. + + It differs from the :meth:`.PoolEvents.connect` method, which + refers to the actual connection to a database at the DBAPI level; + a DBAPI connection may be pooled and reused for many operations. + In contrast, this event refers only to the production of a higher level + :class:`.Connection` wrapper around such a DBAPI connection. + + It also differs from the :meth:`.PoolEvents.checkout` event + in that it is specific to the :class:`.Connection` object, not the + DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although + this DBAPI connection is available here via the + :attr:`.Connection.connection` attribute. But note there can in fact + be multiple :meth:`.PoolEvents.checkout` events within the lifespan + of a single :class:`.Connection` object, if that :class:`.Connection` + is invalidated and re-established. There can also be multiple + :class:`.Connection` objects generated for the same already-checked-out + DBAPI connection, in the case that a "branch" of a :class:`.Connection` + is produced. + + :param conn: :class:`.Connection` object. + :param branch: if True, this is a "branch" of an existing + :class:`.Connection`. A branch is generated within the course + of a statement execution to invoke supplemental statements, most + typically to pre-execute a SELECT of a default value for the purposes + of an INSERT statement. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :ref:`pool_disconnects_pessimistic` - illustrates how to use + :meth:`.ConnectionEvents.engine_connect` + to transparently ensure pooled connections are connected to the + database. + + :meth:`.PoolEvents.checkout` the lower-level pool checkout event + for an individual DBAPI connection + + :meth:`.ConnectionEvents.set_connection_execution_options` - a copy + of a :class:`.Connection` is also made when the + :meth:`.Connection.execution_options` method is called. + + """ + + def set_connection_execution_options(self, conn, opts): + """Intercept when the :meth:`.Connection.execution_options` + method is called. + + This method is called after the new :class:`.Connection` has been + produced, with the newly updated execution options collection, but + before the :class:`.Dialect` has acted upon any of those new options. + + Note that this method is not called when a new :class:`.Connection` + is produced which is inheriting execution options from its parent + :class:`.Engine`; to intercept this condition, use the + :meth:`.ConnectionEvents.engine_connect` event. + + :param conn: The newly copied :class:`.Connection` object + + :param opts: dictionary of options that were passed to the + :meth:`.Connection.execution_options` method. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :meth:`.ConnectionEvents.set_engine_execution_options` - event + which is called when :meth:`.Engine.execution_options` is called. + + + """ + + def set_engine_execution_options(self, engine, opts): + """Intercept when the :meth:`.Engine.execution_options` + method is called. + + The :meth:`.Engine.execution_options` method produces a shallow + copy of the :class:`.Engine` which stores the new options. That new + :class:`.Engine` is passed here. A particular application of this + method is to add a :meth:`.ConnectionEvents.engine_connect` event + handler to the given :class:`.Engine` which will perform some per- + :class:`.Connection` task specific to these execution options. + + :param conn: The newly copied :class:`.Engine` object + + :param opts: dictionary of options that were passed to the + :meth:`.Connection.execution_options` method. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :meth:`.ConnectionEvents.set_connection_execution_options` - event + which is called when :meth:`.Connection.execution_options` is + called. + + """ + + def engine_disposed(self, engine): + """Intercept when the :meth:`.Engine.dispose` method is called. + + The :meth:`.Engine.dispose` method instructs the engine to + "dispose" of it's connection pool (e.g. :class:`.Pool`), and + replaces it with a new one. Disposing of the old pool has the + effect that existing checked-in connections are closed. The new + pool does not establish any new connections until it is first used. + + This event can be used to indicate that resources related to the + :class:`.Engine` should also be cleaned up, keeping in mind that the + :class:`.Engine` can still be used for new requests in which case + it re-acquires connection resources. + + .. versionadded:: 1.0.5 + + """ + def begin(self, conn): + """Intercept begin() events. + + :param conn: :class:`.Connection` object + + """ + + def rollback(self, conn): + """Intercept rollback() events, as initiated by a + :class:`.Transaction`. + + Note that the :class:`.Pool` also "auto-rolls back" + a DBAPI connection upon checkin, if the ``reset_on_return`` + flag is set to its default value of ``'rollback'``. + To intercept this + rollback, use the :meth:`.PoolEvents.reset` hook. + + :param conn: :class:`.Connection` object + + .. seealso:: + + :meth:`.PoolEvents.reset` + + """ + + def commit(self, conn): + """Intercept commit() events, as initiated by a + :class:`.Transaction`. + + Note that the :class:`.Pool` may also "auto-commit" + a DBAPI connection upon checkin, if the ``reset_on_return`` + flag is set to the value ``'commit'``. To intercept this + commit, use the :meth:`.PoolEvents.reset` hook. + + :param conn: :class:`.Connection` object + """ + + def savepoint(self, conn, name): + """Intercept savepoint() events. + + :param conn: :class:`.Connection` object + :param name: specified name used for the savepoint. + + """ + + def rollback_savepoint(self, conn, name, context): + """Intercept rollback_savepoint() events. + + :param conn: :class:`.Connection` object + :param name: specified name used for the savepoint. + :param context: :class:`.ExecutionContext` in use. May be ``None``. + + """ + + def release_savepoint(self, conn, name, context): + """Intercept release_savepoint() events. + + :param conn: :class:`.Connection` object + :param name: specified name used for the savepoint. + :param context: :class:`.ExecutionContext` in use. May be ``None``. + + """ + + def begin_twophase(self, conn, xid): + """Intercept begin_twophase() events. + + :param conn: :class:`.Connection` object + :param xid: two-phase XID identifier + + """ + + def prepare_twophase(self, conn, xid): + """Intercept prepare_twophase() events. + + :param conn: :class:`.Connection` object + :param xid: two-phase XID identifier + """ + + def rollback_twophase(self, conn, xid, is_prepared): + """Intercept rollback_twophase() events. + + :param conn: :class:`.Connection` object + :param xid: two-phase XID identifier + :param is_prepared: boolean, indicates if + :meth:`.TwoPhaseTransaction.prepare` was called. + + """ + + def commit_twophase(self, conn, xid, is_prepared): + """Intercept commit_twophase() events. + + :param conn: :class:`.Connection` object + :param xid: two-phase XID identifier + :param is_prepared: boolean, indicates if + :meth:`.TwoPhaseTransaction.prepare` was called. + + """ + + +class DialectEvents(event.Events): + """event interface for execution-replacement functions. + + These events allow direct instrumentation and replacement + of key dialect functions which interact with the DBAPI. + + .. note:: + + :class:`.DialectEvents` hooks should be considered **semi-public** + and experimental. + These hooks are not for general use and are only for those situations + where intricate re-statement of DBAPI mechanics must be injected onto + an existing dialect. For general-use statement-interception events, + please use the :class:`.ConnectionEvents` interface. + + .. seealso:: + + :meth:`.ConnectionEvents.before_cursor_execute` + + :meth:`.ConnectionEvents.before_execute` + + :meth:`.ConnectionEvents.after_cursor_execute` + + :meth:`.ConnectionEvents.after_execute` + + + .. versionadded:: 0.9.4 + + """ + + _target_class_doc = "SomeEngine" + _dispatch_target = Dialect + + @classmethod + def _listen(cls, event_key, retval=False): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, event_key.fn + + target._has_events = True + event_key.base_listen() + + @classmethod + def _accept_with(cls, target): + if isinstance(target, type): + if issubclass(target, Engine): + return Dialect + elif issubclass(target, Dialect): + return target + elif isinstance(target, Engine): + return target.dialect + else: + return target + + def do_connect(self, dialect, conn_rec, cargs, cparams): + """Receive connection arguments before a connection is made. + + Return a DBAPI connection to halt further events from invoking; + the returned connection will be used. + + Alternatively, the event can manipulate the cargs and/or cparams + collections; cargs will always be a Python list that can be mutated + in-place and cparams a Python dictionary. Return None to + allow control to pass to the next event handler and ultimately + to allow the dialect to connect normally, given the updated + arguments. + + .. versionadded:: 1.0.3 + + """ + + def do_executemany(self, cursor, statement, parameters, context): + """Receive a cursor to have executemany() called. + + Return the value True to halt further events from invoking, + and to indicate that the cursor execution has already taken + place within the event handler. + + """ + + def do_execute_no_params(self, cursor, statement, context): + """Receive a cursor to have execute() with no parameters called. + + Return the value True to halt further events from invoking, + and to indicate that the cursor execution has already taken + place within the event handler. + + """ + + def do_execute(self, cursor, statement, parameters, context): + """Receive a cursor to have execute() called. + + Return the value True to halt further events from invoking, + and to indicate that the cursor execution has already taken + place within the event handler. + + """ diff --git a/app/lib/sqlalchemy/exc.py b/app/lib/sqlalchemy/exc.py new file mode 100644 index 0000000..b2e07ae --- /dev/null +++ b/app/lib/sqlalchemy/exc.py @@ -0,0 +1,388 @@ +# sqlalchemy/exc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Exceptions used with SQLAlchemy. + +The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are +raised as a result of DBAPI exceptions are all subclasses of +:exc:`.DBAPIError`. + +""" + + +class SQLAlchemyError(Exception): + """Generic error class.""" + + +class ArgumentError(SQLAlchemyError): + """Raised when an invalid or conflicting function argument is supplied. + + This error generally corresponds to construction time state errors. + + """ + + +class ObjectNotExecutableError(ArgumentError): + """Raised when an object is passed to .execute() that can't be + executed as SQL. + + .. versionadded:: 1.1 + + """ + + def __init__(self, target): + super(ObjectNotExecutableError, self).__init__( + "Not an executable object: %r" % target + ) + + +class NoSuchModuleError(ArgumentError): + """Raised when a dynamically-loaded module (usually a database dialect) + of a particular name cannot be located.""" + + +class NoForeignKeysError(ArgumentError): + """Raised when no foreign keys can be located between two selectables + during a join.""" + + +class AmbiguousForeignKeysError(ArgumentError): + """Raised when more than one foreign key matching can be located + between two selectables during a join.""" + + +class CircularDependencyError(SQLAlchemyError): + """Raised by topological sorts when a circular dependency is detected. + + There are two scenarios where this error occurs: + + * In a Session flush operation, if two objects are mutually dependent + on each other, they can not be inserted or deleted via INSERT or + DELETE statements alone; an UPDATE will be needed to post-associate + or pre-deassociate one of the foreign key constrained values. + The ``post_update`` flag described at :ref:`post_update` can resolve + this cycle. + * In a :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey` + or :class:`.ForeignKeyConstraint` objects mutually refer to each + other. Apply the ``use_alter=True`` flag to one or both, + see :ref:`use_alter`. + + """ + def __init__(self, message, cycles, edges, msg=None): + if msg is None: + message += " (%s)" % ", ".join(repr(s) for s in cycles) + else: + message = msg + SQLAlchemyError.__init__(self, message) + self.cycles = cycles + self.edges = edges + + def __reduce__(self): + return self.__class__, (None, self.cycles, + self.edges, self.args[0]) + + +class CompileError(SQLAlchemyError): + """Raised when an error occurs during SQL compilation""" + + +class UnsupportedCompilationError(CompileError): + """Raised when an operation is not supported by the given compiler. + + + .. versionadded:: 0.8.3 + + """ + + def __init__(self, compiler, element_type): + super(UnsupportedCompilationError, self).__init__( + "Compiler %r can't render element of type %s" % + (compiler, element_type)) + + +class IdentifierError(SQLAlchemyError): + """Raised when a schema name is beyond the max character limit""" + + +class DisconnectionError(SQLAlchemyError): + """A disconnect is detected on a raw DB-API connection. + + This error is raised and consumed internally by a connection pool. It can + be raised by the :meth:`.PoolEvents.checkout` event so that the host pool + forces a retry; the exception will be caught three times in a row before + the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError` + regarding the connection attempt. + + """ + + +class TimeoutError(SQLAlchemyError): + """Raised when a connection pool times out on getting a connection.""" + + +class InvalidRequestError(SQLAlchemyError): + """SQLAlchemy was asked to do something it can't do. + + This error generally corresponds to runtime state errors. + + """ + + +class NoInspectionAvailable(InvalidRequestError): + """A subject passed to :func:`sqlalchemy.inspection.inspect` produced + no context for inspection.""" + + +class ResourceClosedError(InvalidRequestError): + """An operation was requested from a connection, cursor, or other + object that's in a closed state.""" + + +class NoSuchColumnError(KeyError, InvalidRequestError): + """A nonexistent column is requested from a ``RowProxy``.""" + + +class NoReferenceError(InvalidRequestError): + """Raised by ``ForeignKey`` to indicate a reference cannot be resolved.""" + + +class NoReferencedTableError(NoReferenceError): + """Raised by ``ForeignKey`` when the referred ``Table`` cannot be + located. + + """ + def __init__(self, message, tname): + NoReferenceError.__init__(self, message) + self.table_name = tname + + def __reduce__(self): + return self.__class__, (self.args[0], self.table_name) + + +class NoReferencedColumnError(NoReferenceError): + """Raised by ``ForeignKey`` when the referred ``Column`` cannot be + located. + + """ + def __init__(self, message, tname, cname): + NoReferenceError.__init__(self, message) + self.table_name = tname + self.column_name = cname + + def __reduce__(self): + return self.__class__, (self.args[0], self.table_name, + self.column_name) + + +class NoSuchTableError(InvalidRequestError): + """Table does not exist or is not visible to a connection.""" + + +class UnboundExecutionError(InvalidRequestError): + """SQL was attempted without a database connection to execute it on.""" + + +class DontWrapMixin(object): + """A mixin class which, when applied to a user-defined Exception class, + will not be wrapped inside of :exc:`.StatementError` if the error is + emitted within the process of executing a statement. + + E.g.:: + + from sqlalchemy.exc import DontWrapMixin + + class MyCustomException(Exception, DontWrapMixin): + pass + + class MySpecialType(TypeDecorator): + impl = String + + def process_bind_param(self, value, dialect): + if value == 'invalid': + raise MyCustomException("invalid!") + + """ + +# Moved to orm.exc; compatibility definition installed by orm import until 0.6 +UnmappedColumnError = None + + +class StatementError(SQLAlchemyError): + """An error occurred during execution of a SQL statement. + + :class:`StatementError` wraps the exception raised + during execution, and features :attr:`.statement` + and :attr:`.params` attributes which supply context regarding + the specifics of the statement which had an issue. + + The wrapped exception object is available in + the :attr:`.orig` attribute. + + """ + + statement = None + """The string SQL statement being invoked when this exception occurred.""" + + params = None + """The parameter list being used when this exception occurred.""" + + orig = None + """The DBAPI exception object.""" + + def __init__(self, message, statement, params, orig): + SQLAlchemyError.__init__(self, message) + self.statement = statement + self.params = params + self.orig = orig + self.detail = [] + + def add_detail(self, msg): + self.detail.append(msg) + + def __reduce__(self): + return self.__class__, (self.args[0], self.statement, + self.params, self.orig) + + def __str__(self): + from sqlalchemy.sql import util + + details = [SQLAlchemyError.__str__(self)] + if self.statement: + details.append("[SQL: %r]" % self.statement) + if self.params: + params_repr = util._repr_params(self.params, 10) + details.append("[parameters: %r]" % params_repr) + return ' '.join([ + "(%s)" % det for det in self.detail + ] + details) + + def __unicode__(self): + return self.__str__() + + +class DBAPIError(StatementError): + """Raised when the execution of a database operation fails. + + Wraps exceptions raised by the DB-API underlying the + database operation. Driver-specific implementations of the standard + DB-API exception types are wrapped by matching sub-types of SQLAlchemy's + :class:`DBAPIError` when possible. DB-API's ``Error`` type maps to + :class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note + that there is no guarantee that different DB-API implementations will + raise the same exception type for any given error condition. + + :class:`DBAPIError` features :attr:`~.StatementError.statement` + and :attr:`~.StatementError.params` attributes which supply context + regarding the specifics of the statement which had an issue, for the + typical case when the error was raised within the context of + emitting a SQL statement. + + The wrapped exception object is available in the + :attr:`~.StatementError.orig` attribute. Its type and properties are + DB-API implementation specific. + + """ + + @classmethod + def instance(cls, statement, params, + orig, dbapi_base_err, + connection_invalidated=False, + dialect=None): + # Don't ever wrap these, just return them directly as if + # DBAPIError didn't exist. + if (isinstance(orig, BaseException) and + not isinstance(orig, Exception)) or \ + isinstance(orig, DontWrapMixin): + return orig + + if orig is not None: + # not a DBAPI error, statement is present. + # raise a StatementError + if not isinstance(orig, dbapi_base_err) and statement: + return StatementError( + "(%s.%s) %s" % + (orig.__class__.__module__, orig.__class__.__name__, + orig), + statement, params, orig + ) + + glob = globals() + for super_ in orig.__class__.__mro__: + name = super_.__name__ + if dialect: + name = dialect.dbapi_exception_translation_map.get( + name, name) + if name in glob and issubclass(glob[name], DBAPIError): + cls = glob[name] + break + + return cls(statement, params, orig, connection_invalidated) + + def __reduce__(self): + return self.__class__, (self.statement, self.params, + self.orig, self.connection_invalidated) + + def __init__(self, statement, params, orig, connection_invalidated=False): + try: + text = str(orig) + except Exception as e: + text = 'Error in str() of DB-API-generated exception: ' + str(e) + StatementError.__init__( + self, + '(%s.%s) %s' % ( + orig.__class__.__module__, orig.__class__.__name__, text, ), + statement, + params, + orig + ) + self.connection_invalidated = connection_invalidated + + +class InterfaceError(DBAPIError): + """Wraps a DB-API InterfaceError.""" + + +class DatabaseError(DBAPIError): + """Wraps a DB-API DatabaseError.""" + + +class DataError(DatabaseError): + """Wraps a DB-API DataError.""" + + +class OperationalError(DatabaseError): + """Wraps a DB-API OperationalError.""" + + +class IntegrityError(DatabaseError): + """Wraps a DB-API IntegrityError.""" + + +class InternalError(DatabaseError): + """Wraps a DB-API InternalError.""" + + +class ProgrammingError(DatabaseError): + """Wraps a DB-API ProgrammingError.""" + + +class NotSupportedError(DatabaseError): + """Wraps a DB-API NotSupportedError.""" + + +# Warnings + +class SADeprecationWarning(DeprecationWarning): + """Issued once per usage of a deprecated API.""" + + +class SAPendingDeprecationWarning(PendingDeprecationWarning): + """Issued once per usage of a deprecated API.""" + + +class SAWarning(RuntimeWarning): + """Issued at runtime.""" diff --git a/app/lib/sqlalchemy/ext/__init__.py b/app/lib/sqlalchemy/ext/__init__.py new file mode 100644 index 0000000..bb9ae58 --- /dev/null +++ b/app/lib/sqlalchemy/ext/__init__.py @@ -0,0 +1,11 @@ +# ext/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .. import util as _sa_util + +_sa_util.dependencies.resolve_all("sqlalchemy.ext") + diff --git a/app/lib/sqlalchemy/ext/associationproxy.py b/app/lib/sqlalchemy/ext/associationproxy.py new file mode 100644 index 0000000..6f570a1 --- /dev/null +++ b/app/lib/sqlalchemy/ext/associationproxy.py @@ -0,0 +1,1068 @@ +# ext/associationproxy.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Contain the ``AssociationProxy`` class. + +The ``AssociationProxy`` is a Python property object which provides +transparent proxied access to the endpoint of an association object. + +See the example ``examples/association/proxied_association.py``. + +""" +import itertools +import operator +import weakref +from .. import exc, orm, util +from ..orm import collections, interfaces +from ..sql import not_, or_ + + +def association_proxy(target_collection, attr, **kw): + r"""Return a Python property implementing a view of a target + attribute which references an attribute on members of the + target. + + The returned value is an instance of :class:`.AssociationProxy`. + + Implements a Python property representing a relationship as a collection + of simpler values, or a scalar value. The proxied property will mimic + the collection type of the target (list, dict or set), or, in the case of + a one to one relationship, a simple scalar value. + + :param target_collection: Name of the attribute we'll proxy to. + This attribute is typically mapped by + :func:`~sqlalchemy.orm.relationship` to link to a target collection, but + can also be a many-to-one or non-scalar relationship. + + :param attr: Attribute on the associated instance or instances we'll + proxy for. + + For example, given a target collection of [obj1, obj2], a list created + by this proxy property would look like [getattr(obj1, *attr*), + getattr(obj2, *attr*)] + + If the relationship is one-to-one or otherwise uselist=False, then + simply: getattr(obj, *attr*) + + :param creator: optional. + + When new items are added to this proxied collection, new instances of + the class collected by the target collection will be created. For list + and set collections, the target class constructor will be called with + the 'value' for the new instance. For dict types, two arguments are + passed: key and value. + + If you want to construct instances differently, supply a *creator* + function that takes arguments as above and returns instances. + + For scalar relationships, creator() will be called if the target is None. + If the target is present, set operations are proxied to setattr() on the + associated object. + + If you have an associated object with multiple attributes, you may set + up multiple association proxies mapping to different attributes. See + the unit tests for examples, and for examples of how creator() functions + can be used to construct the scalar relationship on-demand in this + situation. + + :param \*\*kw: Passes along any other keyword arguments to + :class:`.AssociationProxy`. + + """ + return AssociationProxy(target_collection, attr, **kw) + + +ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY') +"""Symbol indicating an :class:`InspectionAttr` that's + of type :class:`.AssociationProxy`. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attibute. + +""" + + +class AssociationProxy(interfaces.InspectionAttrInfo): + """A descriptor that presents a read/write view of an object attribute.""" + + is_attribute = False + extension_type = ASSOCIATION_PROXY + + def __init__(self, target_collection, attr, creator=None, + getset_factory=None, proxy_factory=None, + proxy_bulk_set=None, info=None): + """Construct a new :class:`.AssociationProxy`. + + The :func:`.association_proxy` function is provided as the usual + entrypoint here, though :class:`.AssociationProxy` can be instantiated + and/or subclassed directly. + + :param target_collection: Name of the collection we'll proxy to, + usually created with :func:`.relationship`. + + :param attr: Attribute on the collected instances we'll proxy + for. For example, given a target collection of [obj1, obj2], a + list created by this proxy property would look like + [getattr(obj1, attr), getattr(obj2, attr)] + + :param creator: Optional. When new items are added to this proxied + collection, new instances of the class collected by the target + collection will be created. For list and set collections, the + target class constructor will be called with the 'value' for the + new instance. For dict types, two arguments are passed: + key and value. + + If you want to construct instances differently, supply a 'creator' + function that takes arguments as above and returns instances. + + :param getset_factory: Optional. Proxied attribute access is + automatically handled by routines that get and set values based on + the `attr` argument for this proxy. + + If you would like to customize this behavior, you may supply a + `getset_factory` callable that produces a tuple of `getter` and + `setter` functions. The factory is called with two arguments, the + abstract type of the underlying collection and this proxy instance. + + :param proxy_factory: Optional. The type of collection to emulate is + determined by sniffing the target collection. If your collection + type can't be determined by duck typing or you'd like to use a + different collection implementation, you may supply a factory + function to produce those collections. Only applicable to + non-scalar relationships. + + :param proxy_bulk_set: Optional, use with proxy_factory. See + the _set() method for details. + + :param info: optional, will be assigned to + :attr:`.AssociationProxy.info` if present. + + .. versionadded:: 1.0.9 + + """ + self.target_collection = target_collection + self.value_attr = attr + self.creator = creator + self.getset_factory = getset_factory + self.proxy_factory = proxy_factory + self.proxy_bulk_set = proxy_bulk_set + + self.owning_class = None + self.key = '_%s_%s_%s' % ( + type(self).__name__, target_collection, id(self)) + self.collection_class = None + if info: + self.info = info + + @property + def remote_attr(self): + """The 'remote' :class:`.MapperProperty` referenced by this + :class:`.AssociationProxy`. + + .. versionadded:: 0.7.3 + + See also: + + :attr:`.AssociationProxy.attr` + + :attr:`.AssociationProxy.local_attr` + + """ + return getattr(self.target_class, self.value_attr) + + @property + def local_attr(self): + """The 'local' :class:`.MapperProperty` referenced by this + :class:`.AssociationProxy`. + + .. versionadded:: 0.7.3 + + See also: + + :attr:`.AssociationProxy.attr` + + :attr:`.AssociationProxy.remote_attr` + + """ + return getattr(self.owning_class, self.target_collection) + + @property + def attr(self): + """Return a tuple of ``(local_attr, remote_attr)``. + + This attribute is convenient when specifying a join + using :meth:`.Query.join` across two relationships:: + + sess.query(Parent).join(*Parent.proxied.attr) + + .. versionadded:: 0.7.3 + + See also: + + :attr:`.AssociationProxy.local_attr` + + :attr:`.AssociationProxy.remote_attr` + + """ + return (self.local_attr, self.remote_attr) + + def _get_property(self): + return (orm.class_mapper(self.owning_class). + get_property(self.target_collection)) + + @util.memoized_property + def target_class(self): + """The intermediary class handled by this :class:`.AssociationProxy`. + + Intercepted append/set/assignment events will result + in the generation of new instances of this class. + + """ + return self._get_property().mapper.class_ + + @util.memoized_property + def scalar(self): + """Return ``True`` if this :class:`.AssociationProxy` proxies a scalar + relationship on the local side.""" + + scalar = not self._get_property().uselist + if scalar: + self._initialize_scalar_accessors() + return scalar + + @util.memoized_property + def _value_is_scalar(self): + return not self._get_property().\ + mapper.get_property(self.value_attr).uselist + + @util.memoized_property + def _target_is_object(self): + return getattr(self.target_class, self.value_attr).impl.uses_objects + + def __get__(self, obj, class_): + if self.owning_class is None: + self.owning_class = class_ and class_ or type(obj) + if obj is None: + return self + + if self.scalar: + target = getattr(obj, self.target_collection) + return self._scalar_get(target) + else: + try: + # If the owning instance is reborn (orm session resurrect, + # etc.), refresh the proxy cache. + creator_id, proxy = getattr(obj, self.key) + if id(obj) == creator_id: + return proxy + except AttributeError: + pass + proxy = self._new(_lazy_collection(obj, self.target_collection)) + setattr(obj, self.key, (id(obj), proxy)) + return proxy + + def __set__(self, obj, values): + if self.owning_class is None: + self.owning_class = type(obj) + + if self.scalar: + creator = self.creator and self.creator or self.target_class + target = getattr(obj, self.target_collection) + if target is None: + setattr(obj, self.target_collection, creator(values)) + else: + self._scalar_set(target, values) + else: + proxy = self.__get__(obj, None) + if proxy is not values: + proxy.clear() + self._set(proxy, values) + + def __delete__(self, obj): + if self.owning_class is None: + self.owning_class = type(obj) + delattr(obj, self.key) + + def _initialize_scalar_accessors(self): + if self.getset_factory: + get, set = self.getset_factory(None, self) + else: + get, set = self._default_getset(None) + self._scalar_get, self._scalar_set = get, set + + def _default_getset(self, collection_class): + attr = self.value_attr + _getter = operator.attrgetter(attr) + getter = lambda target: _getter(target) if target is not None else None + if collection_class is dict: + setter = lambda o, k, v: setattr(o, attr, v) + else: + setter = lambda o, v: setattr(o, attr, v) + return getter, setter + + def _new(self, lazy_collection): + creator = self.creator and self.creator or self.target_class + self.collection_class = util.duck_type_collection(lazy_collection()) + + if self.proxy_factory: + return self.proxy_factory( + lazy_collection, creator, self.value_attr, self) + + if self.getset_factory: + getter, setter = self.getset_factory(self.collection_class, self) + else: + getter, setter = self._default_getset(self.collection_class) + + if self.collection_class is list: + return _AssociationList( + lazy_collection, creator, getter, setter, self) + elif self.collection_class is dict: + return _AssociationDict( + lazy_collection, creator, getter, setter, self) + elif self.collection_class is set: + return _AssociationSet( + lazy_collection, creator, getter, setter, self) + else: + raise exc.ArgumentError( + 'could not guess which interface to use for ' + 'collection_class "%s" backing "%s"; specify a ' + 'proxy_factory and proxy_bulk_set manually' % + (self.collection_class.__name__, self.target_collection)) + + def _inflate(self, proxy): + creator = self.creator and self.creator or self.target_class + + if self.getset_factory: + getter, setter = self.getset_factory(self.collection_class, self) + else: + getter, setter = self._default_getset(self.collection_class) + + proxy.creator = creator + proxy.getter = getter + proxy.setter = setter + + def _set(self, proxy, values): + if self.proxy_bulk_set: + self.proxy_bulk_set(proxy, values) + elif self.collection_class is list: + proxy.extend(values) + elif self.collection_class is dict: + proxy.update(values) + elif self.collection_class is set: + proxy.update(values) + else: + raise exc.ArgumentError( + 'no proxy_bulk_set supplied for custom ' + 'collection_class implementation') + + @property + def _comparator(self): + return self._get_property().comparator + + def any(self, criterion=None, **kwargs): + """Produce a proxied 'any' expression using EXISTS. + + This expression will be a composed product + using the :meth:`.RelationshipProperty.Comparator.any` + and/or :meth:`.RelationshipProperty.Comparator.has` + operators of the underlying proxied attributes. + + """ + if self._target_is_object: + if self._value_is_scalar: + value_expr = getattr( + self.target_class, self.value_attr).has( + criterion, **kwargs) + else: + value_expr = getattr( + self.target_class, self.value_attr).any( + criterion, **kwargs) + else: + value_expr = criterion + + # check _value_is_scalar here, otherwise + # we're scalar->scalar - call .any() so that + # the "can't call any() on a scalar" msg is raised. + if self.scalar and not self._value_is_scalar: + return self._comparator.has( + value_expr + ) + else: + return self._comparator.any( + value_expr + ) + + def has(self, criterion=None, **kwargs): + """Produce a proxied 'has' expression using EXISTS. + + This expression will be a composed product + using the :meth:`.RelationshipProperty.Comparator.any` + and/or :meth:`.RelationshipProperty.Comparator.has` + operators of the underlying proxied attributes. + + """ + + if self._target_is_object: + return self._comparator.has( + getattr(self.target_class, self.value_attr). + has(criterion, **kwargs) + ) + else: + if criterion is not None or kwargs: + raise exc.ArgumentError( + "Non-empty has() not allowed for " + "column-targeted association proxy; use ==") + return self._comparator.has() + + def contains(self, obj): + """Produce a proxied 'contains' expression using EXISTS. + + This expression will be a composed product + using the :meth:`.RelationshipProperty.Comparator.any` + , :meth:`.RelationshipProperty.Comparator.has`, + and/or :meth:`.RelationshipProperty.Comparator.contains` + operators of the underlying proxied attributes. + """ + + if self.scalar and not self._value_is_scalar: + return self._comparator.has( + getattr(self.target_class, self.value_attr).contains(obj) + ) + else: + return self._comparator.any(**{self.value_attr: obj}) + + def __eq__(self, obj): + # note the has() here will fail for collections; eq_() + # is only allowed with a scalar. + if obj is None: + return or_( + self._comparator.has(**{self.value_attr: obj}), + self._comparator == None + ) + else: + return self._comparator.has(**{self.value_attr: obj}) + + def __ne__(self, obj): + # note the has() here will fail for collections; eq_() + # is only allowed with a scalar. + return self._comparator.has( + getattr(self.target_class, self.value_attr) != obj) + + +class _lazy_collection(object): + def __init__(self, obj, target): + self.ref = weakref.ref(obj) + self.target = target + + def __call__(self): + obj = self.ref() + if obj is None: + raise exc.InvalidRequestError( + "stale association proxy, parent object has gone out of " + "scope") + return getattr(obj, self.target) + + def __getstate__(self): + return {'obj': self.ref(), 'target': self.target} + + def __setstate__(self, state): + self.ref = weakref.ref(state['obj']) + self.target = state['target'] + + +class _AssociationCollection(object): + def __init__(self, lazy_collection, creator, getter, setter, parent): + """Constructs an _AssociationCollection. + + This will always be a subclass of either _AssociationList, + _AssociationSet, or _AssociationDict. + + lazy_collection + A callable returning a list-based collection of entities (usually an + object attribute managed by a SQLAlchemy relationship()) + + creator + A function that creates new target entities. Given one parameter: + value. This assertion is assumed:: + + obj = creator(somevalue) + assert getter(obj) == somevalue + + getter + A function. Given an associated object, return the 'value'. + + setter + A function. Given an associated object and a value, store that + value on the object. + + """ + self.lazy_collection = lazy_collection + self.creator = creator + self.getter = getter + self.setter = setter + self.parent = parent + + col = property(lambda self: self.lazy_collection()) + + def __len__(self): + return len(self.col) + + def __bool__(self): + return bool(self.col) + + __nonzero__ = __bool__ + + def __getstate__(self): + return {'parent': self.parent, 'lazy_collection': self.lazy_collection} + + def __setstate__(self, state): + self.parent = state['parent'] + self.lazy_collection = state['lazy_collection'] + self.parent._inflate(self) + + +class _AssociationList(_AssociationCollection): + """Generic, converting, list-to-list proxy.""" + + def _create(self, value): + return self.creator(value) + + def _get(self, object): + return self.getter(object) + + def _set(self, object, value): + return self.setter(object, value) + + def __getitem__(self, index): + if not isinstance(index, slice): + return self._get(self.col[index]) + else: + return [self._get(member) for member in self.col[index]] + + def __setitem__(self, index, value): + if not isinstance(index, slice): + self._set(self.col[index], value) + else: + if index.stop is None: + stop = len(self) + elif index.stop < 0: + stop = len(self) + index.stop + else: + stop = index.stop + step = index.step or 1 + + start = index.start or 0 + rng = list(range(index.start or 0, stop, step)) + if step == 1: + for i in rng: + del self[start] + i = start + for item in value: + self.insert(i, item) + i += 1 + else: + if len(value) != len(rng): + raise ValueError( + "attempt to assign sequence of size %s to " + "extended slice of size %s" % (len(value), + len(rng))) + for i, item in zip(rng, value): + self._set(self.col[i], item) + + def __delitem__(self, index): + del self.col[index] + + def __contains__(self, value): + for member in self.col: + # testlib.pragma exempt:__eq__ + if self._get(member) == value: + return True + return False + + def __getslice__(self, start, end): + return [self._get(member) for member in self.col[start:end]] + + def __setslice__(self, start, end, values): + members = [self._create(v) for v in values] + self.col[start:end] = members + + def __delslice__(self, start, end): + del self.col[start:end] + + def __iter__(self): + """Iterate over proxied values. + + For the actual domain objects, iterate over .col instead or + just use the underlying collection directly from its property + on the parent. + """ + + for member in self.col: + yield self._get(member) + return + + def append(self, value): + item = self._create(value) + self.col.append(item) + + def count(self, value): + return sum([1 for _ in + util.itertools_filter(lambda v: v == value, iter(self))]) + + def extend(self, values): + for v in values: + self.append(v) + + def insert(self, index, value): + self.col[index:index] = [self._create(value)] + + def pop(self, index=-1): + return self.getter(self.col.pop(index)) + + def remove(self, value): + for i, val in enumerate(self): + if val == value: + del self.col[i] + return + raise ValueError("value not in list") + + def reverse(self): + """Not supported, use reversed(mylist)""" + + raise NotImplementedError + + def sort(self): + """Not supported, use sorted(mylist)""" + + raise NotImplementedError + + def clear(self): + del self.col[0:len(self.col)] + + def __eq__(self, other): + return list(self) == other + + def __ne__(self, other): + return list(self) != other + + def __lt__(self, other): + return list(self) < other + + def __le__(self, other): + return list(self) <= other + + def __gt__(self, other): + return list(self) > other + + def __ge__(self, other): + return list(self) >= other + + def __cmp__(self, other): + return cmp(list(self), other) + + def __add__(self, iterable): + try: + other = list(iterable) + except TypeError: + return NotImplemented + return list(self) + other + + def __radd__(self, iterable): + try: + other = list(iterable) + except TypeError: + return NotImplemented + return other + list(self) + + def __mul__(self, n): + if not isinstance(n, int): + return NotImplemented + return list(self) * n + __rmul__ = __mul__ + + def __iadd__(self, iterable): + self.extend(iterable) + return self + + def __imul__(self, n): + # unlike a regular list *=, proxied __imul__ will generate unique + # backing objects for each copy. *= on proxied lists is a bit of + # a stretch anyhow, and this interpretation of the __imul__ contract + # is more plausibly useful than copying the backing objects. + if not isinstance(n, int): + return NotImplemented + if n == 0: + self.clear() + elif n > 1: + self.extend(list(self) * (n - 1)) + return self + + def copy(self): + return list(self) + + def __repr__(self): + return repr(list(self)) + + def __hash__(self): + raise TypeError("%s objects are unhashable" % type(self).__name__) + + for func_name, func in list(locals().items()): + if (util.callable(func) and func.__name__ == func_name and + not func.__doc__ and hasattr(list, func_name)): + func.__doc__ = getattr(list, func_name).__doc__ + del func_name, func + + +_NotProvided = util.symbol('_NotProvided') + + +class _AssociationDict(_AssociationCollection): + """Generic, converting, dict-to-dict proxy.""" + + def _create(self, key, value): + return self.creator(key, value) + + def _get(self, object): + return self.getter(object) + + def _set(self, object, key, value): + return self.setter(object, key, value) + + def __getitem__(self, key): + return self._get(self.col[key]) + + def __setitem__(self, key, value): + if key in self.col: + self._set(self.col[key], key, value) + else: + self.col[key] = self._create(key, value) + + def __delitem__(self, key): + del self.col[key] + + def __contains__(self, key): + # testlib.pragma exempt:__hash__ + return key in self.col + + def has_key(self, key): + # testlib.pragma exempt:__hash__ + return key in self.col + + def __iter__(self): + return iter(self.col.keys()) + + def clear(self): + self.col.clear() + + def __eq__(self, other): + return dict(self) == other + + def __ne__(self, other): + return dict(self) != other + + def __lt__(self, other): + return dict(self) < other + + def __le__(self, other): + return dict(self) <= other + + def __gt__(self, other): + return dict(self) > other + + def __ge__(self, other): + return dict(self) >= other + + def __cmp__(self, other): + return cmp(dict(self), other) + + def __repr__(self): + return repr(dict(self.items())) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key, default=None): + if key not in self.col: + self.col[key] = self._create(key, default) + return default + else: + return self[key] + + def keys(self): + return self.col.keys() + + if util.py2k: + def iteritems(self): + return ((key, self._get(self.col[key])) for key in self.col) + + def itervalues(self): + return (self._get(self.col[key]) for key in self.col) + + def iterkeys(self): + return self.col.iterkeys() + + def values(self): + return [self._get(member) for member in self.col.values()] + + def items(self): + return [(k, self._get(self.col[k])) for k in self] + else: + def items(self): + return ((key, self._get(self.col[key])) for key in self.col) + + def values(self): + return (self._get(self.col[key]) for key in self.col) + + def pop(self, key, default=_NotProvided): + if default is _NotProvided: + member = self.col.pop(key) + else: + member = self.col.pop(key, default) + return self._get(member) + + def popitem(self): + item = self.col.popitem() + return (item[0], self._get(item[1])) + + def update(self, *a, **kw): + if len(a) > 1: + raise TypeError('update expected at most 1 arguments, got %i' % + len(a)) + elif len(a) == 1: + seq_or_map = a[0] + # discern dict from sequence - took the advice from + # http://www.voidspace.org.uk/python/articles/duck_typing.shtml + # still not perfect :( + if hasattr(seq_or_map, 'keys'): + for item in seq_or_map: + self[item] = seq_or_map[item] + else: + try: + for k, v in seq_or_map: + self[k] = v + except ValueError: + raise ValueError( + "dictionary update sequence " + "requires 2-element tuples") + + for key, value in kw: + self[key] = value + + def copy(self): + return dict(self.items()) + + def __hash__(self): + raise TypeError("%s objects are unhashable" % type(self).__name__) + + for func_name, func in list(locals().items()): + if (util.callable(func) and func.__name__ == func_name and + not func.__doc__ and hasattr(dict, func_name)): + func.__doc__ = getattr(dict, func_name).__doc__ + del func_name, func + + +class _AssociationSet(_AssociationCollection): + """Generic, converting, set-to-set proxy.""" + + def _create(self, value): + return self.creator(value) + + def _get(self, object): + return self.getter(object) + + def _set(self, object, value): + return self.setter(object, value) + + def __len__(self): + return len(self.col) + + def __bool__(self): + if self.col: + return True + else: + return False + + __nonzero__ = __bool__ + + def __contains__(self, value): + for member in self.col: + # testlib.pragma exempt:__eq__ + if self._get(member) == value: + return True + return False + + def __iter__(self): + """Iterate over proxied values. + + For the actual domain objects, iterate over .col instead or just use + the underlying collection directly from its property on the parent. + + """ + for member in self.col: + yield self._get(member) + return + + def add(self, value): + if value not in self: + self.col.add(self._create(value)) + + # for discard and remove, choosing a more expensive check strategy rather + # than call self.creator() + def discard(self, value): + for member in self.col: + if self._get(member) == value: + self.col.discard(member) + break + + def remove(self, value): + for member in self.col: + if self._get(member) == value: + self.col.discard(member) + return + raise KeyError(value) + + def pop(self): + if not self.col: + raise KeyError('pop from an empty set') + member = self.col.pop() + return self._get(member) + + def update(self, other): + for value in other: + self.add(value) + + def __ior__(self, other): + if not collections._set_binops_check_strict(self, other): + return NotImplemented + for value in other: + self.add(value) + return self + + def _set(self): + return set(iter(self)) + + def union(self, other): + return set(self).union(other) + + __or__ = union + + def difference(self, other): + return set(self).difference(other) + + __sub__ = difference + + def difference_update(self, other): + for value in other: + self.discard(value) + + def __isub__(self, other): + if not collections._set_binops_check_strict(self, other): + return NotImplemented + for value in other: + self.discard(value) + return self + + def intersection(self, other): + return set(self).intersection(other) + + __and__ = intersection + + def intersection_update(self, other): + want, have = self.intersection(other), set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + + def __iand__(self, other): + if not collections._set_binops_check_strict(self, other): + return NotImplemented + want, have = self.intersection(other), set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + return self + + def symmetric_difference(self, other): + return set(self).symmetric_difference(other) + + __xor__ = symmetric_difference + + def symmetric_difference_update(self, other): + want, have = self.symmetric_difference(other), set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + + def __ixor__(self, other): + if not collections._set_binops_check_strict(self, other): + return NotImplemented + want, have = self.symmetric_difference(other), set(self) + + remove, add = have - want, want - have + + for value in remove: + self.remove(value) + for value in add: + self.add(value) + return self + + def issubset(self, other): + return set(self).issubset(other) + + def issuperset(self, other): + return set(self).issuperset(other) + + def clear(self): + self.col.clear() + + def copy(self): + return set(self) + + def __eq__(self, other): + return set(self) == other + + def __ne__(self, other): + return set(self) != other + + def __lt__(self, other): + return set(self) < other + + def __le__(self, other): + return set(self) <= other + + def __gt__(self, other): + return set(self) > other + + def __ge__(self, other): + return set(self) >= other + + def __repr__(self): + return repr(set(self)) + + def __hash__(self): + raise TypeError("%s objects are unhashable" % type(self).__name__) + + for func_name, func in list(locals().items()): + if (util.callable(func) and func.__name__ == func_name and + not func.__doc__ and hasattr(set, func_name)): + func.__doc__ = getattr(set, func_name).__doc__ + del func_name, func diff --git a/app/lib/sqlalchemy/ext/automap.py b/app/lib/sqlalchemy/ext/automap.py new file mode 100644 index 0000000..219bfe1 --- /dev/null +++ b/app/lib/sqlalchemy/ext/automap.py @@ -0,0 +1,1048 @@ +# ext/automap.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system +which automatically generates mapped classes and relationships from a database +schema, typically though not necessarily one which is reflected. + +.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`. + +It is hoped that the :class:`.AutomapBase` system provides a quick +and modernized solution to the problem that the very famous +`SQLSoup `_ +also tries to solve, that of generating a quick and rudimentary object +model from an existing database on the fly. By addressing the issue strictly +at the mapper configuration level, and integrating fully with existing +Declarative class techniques, :class:`.AutomapBase` seeks to provide +a well-integrated approach to the issue of expediently auto-generating ad-hoc +mappings. + + +Basic Use +========= + +The simplest usage is to reflect an existing database into a new model. +We create a new :class:`.AutomapBase` class in a similar manner as to how +we create a declarative base class, using :func:`.automap_base`. +We then call :meth:`.AutomapBase.prepare` on the resulting base class, +asking it to reflect the schema and produce mappings:: + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy.orm import Session + from sqlalchemy import create_engine + + Base = automap_base() + + # engine, suppose it has two tables 'user' and 'address' set up + engine = create_engine("sqlite:///mydatabase.db") + + # reflect the tables + Base.prepare(engine, reflect=True) + + # mapped classes are now created with names by default + # matching that of the table name. + User = Base.classes.user + Address = Base.classes.address + + session = Session(engine) + + # rudimentary relationships are produced + session.add(Address(email_address="foo@bar.com", user=User(name="foo"))) + session.commit() + + # collection-based relationships are by default named + # "_collection" + print (u1.address_collection) + +Above, calling :meth:`.AutomapBase.prepare` while passing along the +:paramref:`.AutomapBase.prepare.reflect` parameter indicates that the +:meth:`.MetaData.reflect` method will be called on this declarative base +classes' :class:`.MetaData` collection; then, each **viable** +:class:`.Table` within the :class:`.MetaData` will get a new mapped class +generated automatically. The :class:`.ForeignKeyConstraint` objects which +link the various tables together will be used to produce new, bidirectional +:func:`.relationship` objects between classes. The classes and relationships +follow along a default naming scheme that we can customize. At this point, +our basic mapping consisting of related ``User`` and ``Address`` classes is +ready to use in the traditional way. + +.. note:: By **viable**, we mean that for a table to be mapped, it must + specify a primary key. Additionally, if the table is detected as being + a pure association table between two other tables, it will not be directly + mapped and will instead be configured as a many-to-many table between + the mappings for the two referring tables. + +Generating Mappings from an Existing MetaData +============================================= + +We can pass a pre-declared :class:`.MetaData` object to :func:`.automap_base`. +This object can be constructed in any way, including programmatically, from +a serialized file, or from itself being reflected using +:meth:`.MetaData.reflect`. Below we illustrate a combination of reflection and +explicit table declaration:: + + from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey + engine = create_engine("sqlite:///mydatabase.db") + + # produce our own MetaData object + metadata = MetaData() + + # we can reflect it ourselves from a database, using options + # such as 'only' to limit what tables we look at... + metadata.reflect(engine, only=['user', 'address']) + + # ... or just define our own Table objects with it (or combine both) + Table('user_order', metadata, + Column('id', Integer, primary_key=True), + Column('user_id', ForeignKey('user.id')) + ) + + # we can then produce a set of mappings from this MetaData. + Base = automap_base(metadata=metadata) + + # calling prepare() just sets up mapped classes and relationships. + Base.prepare() + + # mapped classes are ready + User, Address, Order = Base.classes.user, Base.classes.address,\ + Base.classes.user_order + +Specifying Classes Explicitly +============================= + +The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined +explicitly, in a way similar to that of the :class:`.DeferredReflection` class. +Classes that extend from :class:`.AutomapBase` act like regular declarative +classes, but are not immediately mapped after their construction, and are +instead mapped when we call :meth:`.AutomapBase.prepare`. The +:meth:`.AutomapBase.prepare` method will make use of the classes we've +established based on the table name we use. If our schema contains tables +``user`` and ``address``, we can define one or both of the classes to be used:: + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy import create_engine + + # automap base + Base = automap_base() + + # pre-declare User for the 'user' table + class User(Base): + __tablename__ = 'user' + + # override schema elements like Columns + user_name = Column('name', String) + + # override relationships too, if desired. + # we must use the same name that automap would use for the + # relationship, and also must refer to the class name that automap will + # generate for "address" + address_collection = relationship("address", collection_class=set) + + # reflect + engine = create_engine("sqlite:///mydatabase.db") + Base.prepare(engine, reflect=True) + + # we still have Address generated from the tablename "address", + # but User is the same as Base.classes.User now + + Address = Base.classes.address + + u1 = session.query(User).first() + print (u1.address_collection) + + # the backref is still there: + a1 = session.query(Address).first() + print (a1.user) + +Above, one of the more intricate details is that we illustrated overriding +one of the :func:`.relationship` objects that automap would have created. +To do this, we needed to make sure the names match up with what automap +would normally generate, in that the relationship name would be +``User.address_collection`` and the name of the class referred to, from +automap's perspective, is called ``address``, even though we are referring to +it as ``Address`` within our usage of this class. + +Overriding Naming Schemes +========================= + +:mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and +relationship names based on a schema, which means it has decision points in how +these names are determined. These three decision points are provided using +functions which can be passed to the :meth:`.AutomapBase.prepare` method, and +are known as :func:`.classname_for_table`, +:func:`.name_for_scalar_relationship`, +and :func:`.name_for_collection_relationship`. Any or all of these +functions are provided as in the example below, where we use a "camel case" +scheme for class names and a "pluralizer" for collection names using the +`Inflect `_ package:: + + import re + import inflect + + def camelize_classname(base, tablename, table): + "Produce a 'camelized' class name, e.g. " + "'words_and_underscores' -> 'WordsAndUnderscores'" + + return str(tablename[0].upper() + \ + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:])) + + _pluralizer = inflect.engine() + def pluralize_collection(base, local_cls, referred_cls, constraint): + "Produce an 'uncamelized', 'pluralized' class name, e.g. " + "'SomeTerm' -> 'some_terms'" + + referred_name = referred_cls.__name__ + uncamelized = re.sub(r'[A-Z]', + lambda m: "_%s" % m.group(0).lower(), + referred_name)[1:] + pluralized = _pluralizer.plural(uncamelized) + return pluralized + + from sqlalchemy.ext.automap import automap_base + + Base = automap_base() + + engine = create_engine("sqlite:///mydatabase.db") + + Base.prepare(engine, reflect=True, + classname_for_table=camelize_classname, + name_for_collection_relationship=pluralize_collection + ) + +From the above mapping, we would now have classes ``User`` and ``Address``, +where the collection from ``User`` to ``Address`` is called +``User.addresses``:: + + User, Address = Base.classes.User, Base.classes.Address + + u1 = User(addresses=[Address(email="foo@bar.com")]) + +Relationship Detection +====================== + +The vast majority of what automap accomplishes is the generation of +:func:`.relationship` structures based on foreign keys. The mechanism +by which this works for many-to-one and one-to-many relationships is as +follows: + +1. A given :class:`.Table`, known to be mapped to a particular class, + is examined for :class:`.ForeignKeyConstraint` objects. + +2. From each :class:`.ForeignKeyConstraint`, the remote :class:`.Table` + object present is matched up to the class to which it is to be mapped, + if any, else it is skipped. + +3. As the :class:`.ForeignKeyConstraint` we are examining corresponds to a + reference from the immediate mapped class, the relationship will be set up + as a many-to-one referring to the referred class; a corresponding + one-to-many backref will be created on the referred class referring + to this class. + +4. If any of the columns that are part of the :class:`.ForeignKeyConstraint` + are not nullable (e.g. ``nullable=False``), a + :paramref:`~.relationship.cascade` keyword argument + of ``all, delete-orphan`` will be added to the keyword arguments to + be passed to the relationship or backref. If the + :class:`.ForeignKeyConstraint` reports that + :paramref:`.ForeignKeyConstraint.ondelete` + is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable + set of columns, the option :paramref:`~.relationship.passive_deletes` + flag is set to ``True`` in the set of relationship keyword arguments. + Note that not all backends support reflection of ON DELETE. + + .. versionadded:: 1.0.0 - automap will detect non-nullable foreign key + constraints when producing a one-to-many relationship and establish + a default cascade of ``all, delete-orphan`` if so; additionally, + if the constraint specifies :paramref:`.ForeignKeyConstraint.ondelete` + of ``CASCADE`` for non-nullable or ``SET NULL`` for nullable columns, + the ``passive_deletes=True`` option is also added. + +5. The names of the relationships are determined using the + :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and + :paramref:`.AutomapBase.prepare.name_for_collection_relationship` + callable functions. It is important to note that the default relationship + naming derives the name from the **the actual class name**. If you've + given a particular class an explicit name by declaring it, or specified an + alternate class naming scheme, that's the name from which the relationship + name will be derived. + +6. The classes are inspected for an existing mapped property matching these + names. If one is detected on one side, but none on the other side, + :class:`.AutomapBase` attempts to create a relationship on the missing side, + then uses the :paramref:`.relationship.back_populates` parameter in order to + point the new relationship to the other side. + +7. In the usual case where no relationship is on either side, + :meth:`.AutomapBase.prepare` produces a :func:`.relationship` on the + "many-to-one" side and matches it to the other using the + :paramref:`.relationship.backref` parameter. + +8. Production of the :func:`.relationship` and optionally the :func:`.backref` + is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship` + function, which can be supplied by the end-user in order to augment + the arguments passed to :func:`.relationship` or :func:`.backref` or to + make use of custom implementations of these functions. + +Custom Relationship Arguments +----------------------------- + +The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used +to add parameters to relationships. For most cases, we can make use of the +existing :func:`.automap.generate_relationship` function to return +the object, after augmenting the given keyword dictionary with our own +arguments. + +Below is an illustration of how to send +:paramref:`.relationship.cascade` and +:paramref:`.relationship.passive_deletes` +options along to all one-to-many relationships:: + + from sqlalchemy.ext.automap import generate_relationship + + def _gen_relationship(base, direction, return_fn, + attrname, local_cls, referred_cls, **kw): + if direction is interfaces.ONETOMANY: + kw['cascade'] = 'all, delete-orphan' + kw['passive_deletes'] = True + # make use of the built-in function to actually return + # the result. + return generate_relationship(base, direction, return_fn, + attrname, local_cls, referred_cls, **kw) + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy import create_engine + + # automap base + Base = automap_base() + + engine = create_engine("sqlite:///mydatabase.db") + Base.prepare(engine, reflect=True, + generate_relationship=_gen_relationship) + +Many-to-Many relationships +-------------------------- + +:mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g. +those which contain a ``secondary`` argument. The process for producing these +is as follows: + +1. A given :class:`.Table` is examined for :class:`.ForeignKeyConstraint` + objects, before any mapped class has been assigned to it. + +2. If the table contains two and exactly two :class:`.ForeignKeyConstraint` + objects, and all columns within this table are members of these two + :class:`.ForeignKeyConstraint` objects, the table is assumed to be a + "secondary" table, and will **not be mapped directly**. + +3. The two (or one, for self-referential) external tables to which the + :class:`.Table` refers to are matched to the classes to which they will be + mapped, if any. + +4. If mapped classes for both sides are located, a many-to-many bi-directional + :func:`.relationship` / :func:`.backref` pair is created between the two + classes. + +5. The override logic for many-to-many works the same as that of one-to-many/ + many-to-one; the :func:`.generate_relationship` function is called upon + to generate the strucures and existing attributes will be maintained. + +Relationships with Inheritance +------------------------------ + +:mod:`.sqlalchemy.ext.automap` will not generate any relationships between +two classes that are in an inheritance relationship. That is, with two +classes given as follows:: + + class Employee(Base): + __tablename__ = 'employee' + id = Column(Integer, primary_key=True) + type = Column(String(50)) + __mapper_args__ = { + 'polymorphic_identity':'employee', 'polymorphic_on': type + } + + class Engineer(Employee): + __tablename__ = 'engineer' + id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __mapper_args__ = { + 'polymorphic_identity':'engineer', + } + +The foreign key from ``Engineer`` to ``Employee`` is used not for a +relationship, but to establish joined inheritance between the two classes. + +Note that this means automap will not generate *any* relationships +for foreign keys that link from a subclass to a superclass. If a mapping +has actual relationships from subclass to superclass as well, those +need to be explicit. Below, as we have two separate foreign keys +from ``Engineer`` to ``Employee``, we need to set up both the relationship +we want as well as the ``inherit_condition``, as these are not things +SQLAlchemy can guess:: + + class Employee(Base): + __tablename__ = 'employee' + id = Column(Integer, primary_key=True) + type = Column(String(50)) + + __mapper_args__ = { + 'polymorphic_identity':'employee', 'polymorphic_on':type + } + + class Engineer(Employee): + __tablename__ = 'engineer' + id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + favorite_employee_id = Column(Integer, ForeignKey('employee.id')) + + favorite_employee = relationship(Employee, + foreign_keys=favorite_employee_id) + + __mapper_args__ = { + 'polymorphic_identity':'engineer', + 'inherit_condition': id == Employee.id + } + +Handling Simple Naming Conflicts +-------------------------------- + +In the case of naming conflicts during mapping, override any of +:func:`.classname_for_table`, :func:`.name_for_scalar_relationship`, +and :func:`.name_for_collection_relationship` as needed. For example, if +automap is attempting to name a many-to-one relationship the same as an +existing column, an alternate convention can be conditionally selected. Given +a schema: + +.. sourcecode:: sql + + CREATE TABLE table_a ( + id INTEGER PRIMARY KEY + ); + + CREATE TABLE table_b ( + id INTEGER PRIMARY KEY, + table_a INTEGER, + FOREIGN KEY(table_a) REFERENCES table_a(id) + ); + +The above schema will first automap the ``table_a`` table as a class named +``table_a``; it will then automap a relationship onto the class for ``table_b`` +with the same name as this related class, e.g. ``table_a``. This +relationship name conflicts with the mapping column ``table_b.table_a``, +and will emit an error on mapping. + +We can resolve this conflict by using an underscore as follows:: + + def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): + name = referred_cls.__name__.lower() + local_table = local_cls.__table__ + if name in local_table.columns: + newname = name + "_" + warnings.warn( + "Already detected name %s present. using %s" % + (name, newname)) + return newname + return name + + + Base.prepare(engine, reflect=True, + name_for_scalar_relationship=name_for_scalar_relationship) + +Alternatively, we can change the name on the column side. The columns +that are mapped can be modified using the technique described at +:ref:`mapper_column_distinct_names`, by assigning the column explicitly +to a new name:: + + Base = automap_base() + + class TableB(Base): + __tablename__ = 'table_b' + _table_a = Column('table_a', ForeignKey('table_a.id')) + + Base.prepare(engine, reflect=True) + + +Using Automap with Explicit Declarations +======================================== + +As noted previously, automap has no dependency on reflection, and can make +use of any collection of :class:`.Table` objects within a :class:`.MetaData` +collection. From this, it follows that automap can also be used +generate missing relationships given an otherwise complete model that fully +defines table metadata:: + + from sqlalchemy.ext.automap import automap_base + from sqlalchemy import Column, Integer, String, ForeignKey + + Base = automap_base() + + class User(Base): + __tablename__ = 'user' + + id = Column(Integer, primary_key=True) + name = Column(String) + + class Address(Base): + __tablename__ = 'address' + + id = Column(Integer, primary_key=True) + email = Column(String) + user_id = Column(ForeignKey('user.id')) + + # produce relationships + Base.prepare() + + # mapping is complete, with "address_collection" and + # "user" relationships + a1 = Address(email='u1') + a2 = Address(email='u2') + u1 = User(address_collection=[a1, a2]) + assert a1.user is u1 + +Above, given mostly complete ``User`` and ``Address`` mappings, the +:class:`.ForeignKey` which we defined on ``Address.user_id`` allowed a +bidirectional relationship pair ``Address.user`` and +``User.address_collection`` to be generated on the mapped classes. + +Note that when subclassing :class:`.AutomapBase`, +the :meth:`.AutomapBase.prepare` method is required; if not called, the classes +we've declared are in an un-mapped state. + + +""" +from .declarative import declarative_base as _declarative_base +from .declarative.base import _DeferredMapperConfig +from ..sql import and_ +from ..schema import ForeignKeyConstraint +from ..orm import relationship, backref, interfaces +from .. import util + + +def classname_for_table(base, tablename, table): + """Return the class name that should be used, given the name + of a table. + + The default implementation is:: + + return str(tablename) + + Alternate implementations can be specified using the + :paramref:`.AutomapBase.prepare.classname_for_table` + parameter. + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param tablename: string name of the :class:`.Table`. + + :param table: the :class:`.Table` object itself. + + :return: a string class name. + + .. note:: + + In Python 2, the string used for the class name **must** be a + non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute + of :class:`.Table` is typically a Python unicode subclass, so the + ``str()`` function should be applied to this name, after accounting for + any non-ASCII characters. + + """ + return str(tablename) + + +def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): + """Return the attribute name that should be used to refer from one + class to another, for a scalar object reference. + + The default implementation is:: + + return referred_cls.__name__.lower() + + Alternate implementations can be specified using the + :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` + parameter. + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param local_cls: the class to be mapped on the local side. + + :param referred_cls: the class to be mapped on the referring side. + + :param constraint: the :class:`.ForeignKeyConstraint` that is being + inspected to produce this relationship. + + """ + return referred_cls.__name__.lower() + + +def name_for_collection_relationship( + base, local_cls, referred_cls, constraint): + """Return the attribute name that should be used to refer from one + class to another, for a collection reference. + + The default implementation is:: + + return referred_cls.__name__.lower() + "_collection" + + Alternate implementations + can be specified using the + :paramref:`.AutomapBase.prepare.name_for_collection_relationship` + parameter. + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param local_cls: the class to be mapped on the local side. + + :param referred_cls: the class to be mapped on the referring side. + + :param constraint: the :class:`.ForeignKeyConstraint` that is being + inspected to produce this relationship. + + """ + return referred_cls.__name__.lower() + "_collection" + + +def generate_relationship( + base, direction, return_fn, attrname, local_cls, referred_cls, **kw): + r"""Generate a :func:`.relationship` or :func:`.backref` on behalf of two + mapped classes. + + An alternate implementation of this function can be specified using the + :paramref:`.AutomapBase.prepare.generate_relationship` parameter. + + The default implementation of this function is as follows:: + + if return_fn is backref: + return return_fn(attrname, **kw) + elif return_fn is relationship: + return return_fn(referred_cls, **kw) + else: + raise TypeError("Unknown relationship function: %s" % return_fn) + + :param base: the :class:`.AutomapBase` class doing the prepare. + + :param direction: indicate the "direction" of the relationship; this will + be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`. + + :param return_fn: the function that is used by default to create the + relationship. This will be either :func:`.relationship` or + :func:`.backref`. The :func:`.backref` function's result will be used to + produce a new :func:`.relationship` in a second step, so it is critical + that user-defined implementations correctly differentiate between the two + functions, if a custom relationship function is being used. + + :param attrname: the attribute name to which this relationship is being + assigned. If the value of :paramref:`.generate_relationship.return_fn` is + the :func:`.backref` function, then this name is the name that is being + assigned to the backref. + + :param local_cls: the "local" class to which this relationship or backref + will be locally present. + + :param referred_cls: the "referred" class to which the relationship or + backref refers to. + + :param \**kw: all additional keyword arguments are passed along to the + function. + + :return: a :func:`.relationship` or :func:`.backref` construct, as dictated + by the :paramref:`.generate_relationship.return_fn` parameter. + + """ + if return_fn is backref: + return return_fn(attrname, **kw) + elif return_fn is relationship: + return return_fn(referred_cls, **kw) + else: + raise TypeError("Unknown relationship function: %s" % return_fn) + + +class AutomapBase(object): + """Base class for an "automap" schema. + + The :class:`.AutomapBase` class can be compared to the "declarative base" + class that is produced by the :func:`.declarative.declarative_base` + function. In practice, the :class:`.AutomapBase` class is always used + as a mixin along with an actual declarative base. + + A new subclassable :class:`.AutomapBase` is typically instantated + using the :func:`.automap_base` function. + + .. seealso:: + + :ref:`automap_toplevel` + + """ + __abstract__ = True + + classes = None + """An instance of :class:`.util.Properties` containing classes. + + This object behaves much like the ``.c`` collection on a table. Classes + are present under the name they were given, e.g.:: + + Base = automap_base() + Base.prepare(engine=some_engine, reflect=True) + + User, Address = Base.classes.User, Base.classes.Address + + """ + + @classmethod + def prepare( + cls, + engine=None, + reflect=False, + schema=None, + classname_for_table=classname_for_table, + collection_class=list, + name_for_scalar_relationship=name_for_scalar_relationship, + name_for_collection_relationship=name_for_collection_relationship, + generate_relationship=generate_relationship): + """Extract mapped classes and relationships from the :class:`.MetaData` and + perform mappings. + + :param engine: an :class:`.Engine` or :class:`.Connection` with which + to perform schema reflection, if specified. + If the :paramref:`.AutomapBase.prepare.reflect` argument is False, + this object is not used. + + :param reflect: if True, the :meth:`.MetaData.reflect` method is called + on the :class:`.MetaData` associated with this :class:`.AutomapBase`. + The :class:`.Engine` passed via + :paramref:`.AutomapBase.prepare.engine` will be used to perform the + reflection if present; else, the :class:`.MetaData` should already be + bound to some engine else the operation will fail. + + :param classname_for_table: callable function which will be used to + produce new class names, given a table name. Defaults to + :func:`.classname_for_table`. + + :param name_for_scalar_relationship: callable function which will be + used to produce relationship names for scalar relationships. Defaults + to :func:`.name_for_scalar_relationship`. + + :param name_for_collection_relationship: callable function which will + be used to produce relationship names for collection-oriented + relationships. Defaults to :func:`.name_for_collection_relationship`. + + :param generate_relationship: callable function which will be used to + actually generate :func:`.relationship` and :func:`.backref` + constructs. Defaults to :func:`.generate_relationship`. + + :param collection_class: the Python collection class that will be used + when a new :func:`.relationship` object is created that represents a + collection. Defaults to ``list``. + + :param schema: When present in conjunction with the + :paramref:`.AutomapBase.prepare.reflect` flag, is passed to + :meth:`.MetaData.reflect` to indicate the primary schema where tables + should be reflected from. When omitted, the default schema in use + by the database connection is used. + + .. versionadded:: 1.1 + + """ + if reflect: + cls.metadata.reflect( + engine, + schema=schema, + extend_existing=True, + autoload_replace=False + ) + + table_to_map_config = dict( + (m.local_table, m) + for m in _DeferredMapperConfig. + classes_for_base(cls, sort=False) + ) + + many_to_many = [] + + for table in cls.metadata.tables.values(): + lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table) + if lcl_m2m is not None: + many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table)) + elif not table.primary_key: + continue + elif table not in table_to_map_config: + mapped_cls = type( + classname_for_table(cls, table.name, table), + (cls, ), + {"__table__": table} + ) + map_config = _DeferredMapperConfig.config_for_cls(mapped_cls) + cls.classes[map_config.cls.__name__] = mapped_cls + table_to_map_config[table] = map_config + + for map_config in table_to_map_config.values(): + _relationships_for_fks(cls, + map_config, + table_to_map_config, + collection_class, + name_for_scalar_relationship, + name_for_collection_relationship, + generate_relationship) + + for lcl_m2m, rem_m2m, m2m_const, table in many_to_many: + _m2m_relationship(cls, lcl_m2m, rem_m2m, m2m_const, table, + table_to_map_config, + collection_class, + name_for_scalar_relationship, + name_for_collection_relationship, + generate_relationship) + + for map_config in _DeferredMapperConfig.classes_for_base(cls): + map_config.map() + + _sa_decl_prepare = True + """Indicate that the mapping of classes should be deferred. + + The presence of this attribute name indicates to declarative + that the call to mapper() should not occur immediately; instead, + information about the table and attributes to be mapped are gathered + into an internal structure called _DeferredMapperConfig. These + objects can be collected later using classes_for_base(), additional + mapping decisions can be made, and then the map() method will actually + apply the mapping. + + The only real reason this deferral of the whole + thing is needed is to support primary key columns that aren't reflected + yet when the class is declared; everything else can theoretically be + added to the mapper later. However, the _DeferredMapperConfig is a + nice interface in any case which exists at that not usually exposed point + at which declarative has the class and the Table but hasn't called + mapper() yet. + + """ + + +def automap_base(declarative_base=None, **kw): + r"""Produce a declarative automap base. + + This function produces a new base class that is a product of the + :class:`.AutomapBase` class as well a declarative base produced by + :func:`.declarative.declarative_base`. + + All parameters other than ``declarative_base`` are keyword arguments + that are passed directly to the :func:`.declarative.declarative_base` + function. + + :param declarative_base: an existing class produced by + :func:`.declarative.declarative_base`. When this is passed, the function + no longer invokes :func:`.declarative.declarative_base` itself, and all + other keyword arguments are ignored. + + :param \**kw: keyword arguments are passed along to + :func:`.declarative.declarative_base`. + + """ + if declarative_base is None: + Base = _declarative_base(**kw) + else: + Base = declarative_base + + return type( + Base.__name__, + (AutomapBase, Base,), + {"__abstract__": True, "classes": util.Properties({})} + ) + + +def _is_many_to_many(automap_base, table): + fk_constraints = [const for const in table.constraints + if isinstance(const, ForeignKeyConstraint)] + if len(fk_constraints) != 2: + return None, None, None + + cols = sum( + [[fk.parent for fk in fk_constraint.elements] + for fk_constraint in fk_constraints], []) + + if set(cols) != set(table.c): + return None, None, None + + return ( + fk_constraints[0].elements[0].column.table, + fk_constraints[1].elements[0].column.table, + fk_constraints + ) + + +def _relationships_for_fks(automap_base, map_config, table_to_map_config, + collection_class, + name_for_scalar_relationship, + name_for_collection_relationship, + generate_relationship): + local_table = map_config.local_table + local_cls = map_config.cls + + if local_table is None: + return + for constraint in local_table.constraints: + if isinstance(constraint, ForeignKeyConstraint): + fks = constraint.elements + referred_table = fks[0].column.table + referred_cfg = table_to_map_config.get(referred_table, None) + if referred_cfg is None: + continue + referred_cls = referred_cfg.cls + + if local_cls is not referred_cls and issubclass( + local_cls, referred_cls): + continue + + relationship_name = name_for_scalar_relationship( + automap_base, + local_cls, + referred_cls, constraint) + backref_name = name_for_collection_relationship( + automap_base, + referred_cls, + local_cls, + constraint + ) + + o2m_kws = {} + nullable = False not in set([fk.parent.nullable for fk in fks]) + if not nullable: + o2m_kws['cascade'] = "all, delete-orphan" + + if constraint.ondelete and \ + constraint.ondelete.lower() == "cascade": + o2m_kws['passive_deletes'] = True + else: + if constraint.ondelete and \ + constraint.ondelete.lower() == "set null": + o2m_kws['passive_deletes'] = True + + create_backref = backref_name not in referred_cfg.properties + + if relationship_name not in map_config.properties: + if create_backref: + backref_obj = generate_relationship( + automap_base, + interfaces.ONETOMANY, backref, + backref_name, referred_cls, local_cls, + collection_class=collection_class, + **o2m_kws) + else: + backref_obj = None + rel = generate_relationship(automap_base, + interfaces.MANYTOONE, + relationship, + relationship_name, + local_cls, referred_cls, + foreign_keys=[ + fk.parent + for fk in constraint.elements], + backref=backref_obj, + remote_side=[ + fk.column + for fk in constraint.elements] + ) + if rel is not None: + map_config.properties[relationship_name] = rel + if not create_backref: + referred_cfg.properties[ + backref_name].back_populates = relationship_name + elif create_backref: + rel = generate_relationship(automap_base, + interfaces.ONETOMANY, + relationship, + backref_name, + referred_cls, local_cls, + foreign_keys=[ + fk.parent + for fk in constraint.elements], + back_populates=relationship_name, + collection_class=collection_class, + **o2m_kws) + if rel is not None: + referred_cfg.properties[backref_name] = rel + map_config.properties[ + relationship_name].back_populates = backref_name + + +def _m2m_relationship(automap_base, lcl_m2m, rem_m2m, m2m_const, table, + table_to_map_config, + collection_class, + name_for_scalar_relationship, + name_for_collection_relationship, + generate_relationship): + + map_config = table_to_map_config.get(lcl_m2m, None) + referred_cfg = table_to_map_config.get(rem_m2m, None) + if map_config is None or referred_cfg is None: + return + + local_cls = map_config.cls + referred_cls = referred_cfg.cls + + relationship_name = name_for_collection_relationship( + automap_base, + local_cls, + referred_cls, m2m_const[0]) + backref_name = name_for_collection_relationship( + automap_base, + referred_cls, + local_cls, + m2m_const[1] + ) + + create_backref = backref_name not in referred_cfg.properties + + if relationship_name not in map_config.properties: + if create_backref: + backref_obj = generate_relationship( + automap_base, + interfaces.MANYTOMANY, + backref, + backref_name, + referred_cls, local_cls, + collection_class=collection_class + ) + else: + backref_obj = None + rel = generate_relationship(automap_base, + interfaces.MANYTOMANY, + relationship, + relationship_name, + local_cls, referred_cls, + secondary=table, + primaryjoin=and_( + fk.column == fk.parent + for fk in m2m_const[0].elements), + secondaryjoin=and_( + fk.column == fk.parent + for fk in m2m_const[1].elements), + backref=backref_obj, + collection_class=collection_class + ) + if rel is not None: + map_config.properties[relationship_name] = rel + + if not create_backref: + referred_cfg.properties[ + backref_name].back_populates = relationship_name + elif create_backref: + rel = generate_relationship(automap_base, + interfaces.MANYTOMANY, + relationship, + backref_name, + referred_cls, local_cls, + secondary=table, + primaryjoin=and_( + fk.column == fk.parent + for fk in m2m_const[1].elements), + secondaryjoin=and_( + fk.column == fk.parent + for fk in m2m_const[0].elements), + back_populates=relationship_name, + collection_class=collection_class) + if rel is not None: + referred_cfg.properties[backref_name] = rel + map_config.properties[ + relationship_name].back_populates = backref_name diff --git a/app/lib/sqlalchemy/ext/baked.py b/app/lib/sqlalchemy/ext/baked.py new file mode 100644 index 0000000..68bd468 --- /dev/null +++ b/app/lib/sqlalchemy/ext/baked.py @@ -0,0 +1,559 @@ +# sqlalchemy/ext/baked.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Baked query extension. + +Provides a creational pattern for the :class:`.query.Query` object which +allows the fully constructed object, Core select statement, and string +compiled result to be fully cached. + + +""" + +from ..orm.query import Query +from ..orm import strategies, attributes, properties, \ + strategy_options, util as orm_util, interfaces +from .. import log as sqla_log +from ..sql import util as sql_util, func, literal_column +from ..orm import exc as orm_exc +from .. import exc as sa_exc +from .. import util + +import copy +import logging + +log = logging.getLogger(__name__) + + +class BakedQuery(object): + """A builder object for :class:`.query.Query` objects.""" + + __slots__ = 'steps', '_bakery', '_cache_key', '_spoiled' + + def __init__(self, bakery, initial_fn, args=()): + self._cache_key = () + self._update_cache_key(initial_fn, args) + self.steps = [initial_fn] + self._spoiled = False + self._bakery = bakery + + @classmethod + def bakery(cls, size=200): + """Construct a new bakery.""" + + _bakery = util.LRUCache(size) + + def call(initial_fn, *args): + return cls(_bakery, initial_fn, args) + + return call + + def _clone(self): + b1 = BakedQuery.__new__(BakedQuery) + b1._cache_key = self._cache_key + b1.steps = list(self.steps) + b1._bakery = self._bakery + b1._spoiled = self._spoiled + return b1 + + def _update_cache_key(self, fn, args=()): + self._cache_key += (fn.__code__,) + args + + def __iadd__(self, other): + if isinstance(other, tuple): + self.add_criteria(*other) + else: + self.add_criteria(other) + return self + + def __add__(self, other): + if isinstance(other, tuple): + return self.with_criteria(*other) + else: + return self.with_criteria(other) + + def add_criteria(self, fn, *args): + """Add a criteria function to this :class:`.BakedQuery`. + + This is equivalent to using the ``+=`` operator to + modify a :class:`.BakedQuery` in-place. + + """ + self._update_cache_key(fn, args) + self.steps.append(fn) + return self + + def with_criteria(self, fn, *args): + """Add a criteria function to a :class:`.BakedQuery` cloned from this one. + + This is equivalent to using the ``+`` operator to + produce a new :class:`.BakedQuery` with modifications. + + """ + return self._clone().add_criteria(fn, *args) + + def for_session(self, session): + """Return a :class:`.Result` object for this :class:`.BakedQuery`. + + This is equivalent to calling the :class:`.BakedQuery` as a + Python callable, e.g. ``result = my_baked_query(session)``. + + """ + return Result(self, session) + + def __call__(self, session): + return self.for_session(session) + + def spoil(self, full=False): + """Cancel any query caching that will occur on this BakedQuery object. + + The BakedQuery can continue to be used normally, however additional + creational functions will not be cached; they will be called + on every invocation. + + This is to support the case where a particular step in constructing + a baked query disqualifies the query from being cacheable, such + as a variant that relies upon some uncacheable value. + + :param full: if False, only functions added to this + :class:`.BakedQuery` object subsequent to the spoil step will be + non-cached; the state of the :class:`.BakedQuery` up until + this point will be pulled from the cache. If True, then the + entire :class:`.Query` object is built from scratch each + time, with all creational functions being called on each + invocation. + + """ + if not full: + _spoil_point = self._clone() + _spoil_point._cache_key += ('_query_only', ) + self.steps = [_spoil_point._retrieve_baked_query] + self._spoiled = True + return self + + def _retrieve_baked_query(self, session): + query = self._bakery.get(self._cache_key, None) + if query is None: + query = self._as_query(session) + self._bakery[self._cache_key] = query.with_session(None) + return query.with_session(session) + + def _bake(self, session): + query = self._as_query(session) + + context = query._compile_context() + self._bake_subquery_loaders(session, context) + context.session = None + context.query = query = context.query.with_session(None) + query._execution_options = query._execution_options.union( + {"compiled_cache": self._bakery} + ) + # we'll be holding onto the query for some of its state, + # so delete some compilation-use-only attributes that can take up + # space + for attr in ( + '_correlate', '_from_obj', '_mapper_adapter_map', + '_joinpath', '_joinpoint'): + query.__dict__.pop(attr, None) + self._bakery[self._cache_key] = context + return context + + def _as_query(self, session): + query = self.steps[0](session) + + for step in self.steps[1:]: + query = step(query) + return query + + def _bake_subquery_loaders(self, session, context): + """convert subquery eager loaders in the cache into baked queries. + + For subquery eager loading to work, all we need here is that the + Query point to the correct session when it is run. However, since + we are "baking" anyway, we may as well also turn the query into + a "baked" query so that we save on performance too. + + """ + context.attributes['baked_queries'] = baked_queries = [] + for k, v in list(context.attributes.items()): + if isinstance(v, Query): + if 'subquery' in k: + bk = BakedQuery(self._bakery, lambda *args: v) + bk._cache_key = self._cache_key + k + bk._bake(session) + baked_queries.append((k, bk._cache_key, v)) + del context.attributes[k] + + def _unbake_subquery_loaders(self, session, context, params): + """Retrieve subquery eager loaders stored by _bake_subquery_loaders + and turn them back into Result objects that will iterate just + like a Query object. + + """ + for k, cache_key, query in context.attributes["baked_queries"]: + bk = BakedQuery(self._bakery, + lambda sess, q=query: q.with_session(sess)) + bk._cache_key = cache_key + context.attributes[k] = bk.for_session(session).params(**params) + + +class Result(object): + """Invokes a :class:`.BakedQuery` against a :class:`.Session`. + + The :class:`.Result` object is where the actual :class:`.query.Query` + object gets created, or retrieved from the cache, + against a target :class:`.Session`, and is then invoked for results. + + """ + __slots__ = 'bq', 'session', '_params' + + def __init__(self, bq, session): + self.bq = bq + self.session = session + self._params = {} + + def params(self, *args, **kw): + """Specify parameters to be replaced into the string SQL statement.""" + + if len(args) == 1: + kw.update(args[0]) + elif len(args) > 0: + raise sa_exc.ArgumentError( + "params() takes zero or one positional argument, " + "which is a dictionary.") + self._params.update(kw) + return self + + def _as_query(self): + return self.bq._as_query(self.session).params(self._params) + + def __str__(self): + return str(self._as_query()) + + def __iter__(self): + bq = self.bq + if bq._spoiled: + return iter(self._as_query()) + + baked_context = bq._bakery.get(bq._cache_key, None) + if baked_context is None: + baked_context = bq._bake(self.session) + + context = copy.copy(baked_context) + context.session = self.session + context.attributes = context.attributes.copy() + + bq._unbake_subquery_loaders(self.session, context, self._params) + + context.statement.use_labels = True + if context.autoflush and not context.populate_existing: + self.session._autoflush() + return context.query.params(self._params).\ + with_session(self.session)._execute_and_instances(context) + + def count(self): + """return the 'count'. + + Equivalent to :meth:`.Query.count`. + + Note this uses a subquery to ensure an accurate count regardless + of the structure of the original statement. + + .. versionadded:: 1.1.6 + + """ + + col = func.count(literal_column('*')) + bq = self.bq.with_criteria(lambda q: q.from_self(col)) + return bq.for_session(self.session).params(self._params).scalar() + + def scalar(self): + """Return the first element of the first result or None + if no rows present. If multiple rows are returned, + raises MultipleResultsFound. + + Equivalent to :meth:`.Query.scalar`. + + .. versionadded:: 1.1.6 + + """ + try: + ret = self.one() + if not isinstance(ret, tuple): + return ret + return ret[0] + except orm_exc.NoResultFound: + return None + + def first(self): + """Return the first row. + + Equivalent to :meth:`.Query.first`. + + """ + bq = self.bq.with_criteria(lambda q: q.slice(0, 1)) + ret = list(bq.for_session(self.session).params(self._params)) + if len(ret) > 0: + return ret[0] + else: + return None + + def one(self): + """Return exactly one result or raise an exception. + + Equivalent to :meth:`.Query.one`. + + """ + try: + ret = self.one_or_none() + except orm_exc.MultipleResultsFound: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one()") + else: + if ret is None: + raise orm_exc.NoResultFound("No row was found for one()") + return ret + + def one_or_none(self): + """Return one or zero results, or raise an exception for multiple + rows. + + Equivalent to :meth:`.Query.one_or_none`. + + .. versionadded:: 1.0.9 + + """ + ret = list(self) + + l = len(ret) + if l == 1: + return ret[0] + elif l == 0: + return None + else: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one_or_none()") + + def all(self): + """Return all rows. + + Equivalent to :meth:`.Query.all`. + + """ + return list(self) + + def get(self, ident): + """Retrieve an object based on identity. + + Equivalent to :meth:`.Query.get`. + + """ + + query = self.bq.steps[0](self.session) + return query._get_impl(ident, self._load_on_ident) + + def _load_on_ident(self, query, key): + """Load the given identity key from the database.""" + + ident = key[1] + + mapper = query._mapper_zero() + + _get_clause, _get_params = mapper._get_clause + + def setup(query): + _lcl_get_clause = _get_clause + q = query._clone() + q._get_condition() + q._order_by = None + + # None present in ident - turn those comparisons + # into "IS NULL" + if None in ident: + nones = set([ + _get_params[col].key for col, value in + zip(mapper.primary_key, ident) if value is None + ]) + _lcl_get_clause = sql_util.adapt_criterion_to_null( + _lcl_get_clause, nones) + + _lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False) + q._criterion = _lcl_get_clause + return q + + # cache the query against a key that includes + # which positions in the primary key are NULL + # (remember, we can map to an OUTER JOIN) + bq = self.bq + + # add the clause we got from mapper._get_clause to the cache + # key so that if a race causes multiple calls to _get_clause, + # we've cached on ours + bq = bq._clone() + bq._cache_key += (_get_clause, ) + + bq = bq.with_criteria(setup, tuple(elem is None for elem in ident)) + + params = dict([ + (_get_params[primary_key].key, id_val) + for id_val, primary_key in zip(ident, mapper.primary_key) + ]) + + result = list(bq.for_session(self.session).params(**params)) + l = len(result) + if l > 1: + raise orm_exc.MultipleResultsFound() + elif l: + return result[0] + else: + return None + + +def bake_lazy_loaders(): + """Enable the use of baked queries for all lazyloaders systemwide. + + This operation should be safe for all lazy loaders, and will reduce + Python overhead for these operations. + + """ + BakedLazyLoader._strategy_keys[:] = [] + + properties.RelationshipProperty.strategy_for( + lazy="select")(BakedLazyLoader) + properties.RelationshipProperty.strategy_for( + lazy=True)(BakedLazyLoader) + properties.RelationshipProperty.strategy_for( + lazy="baked_select")(BakedLazyLoader) + + strategies.LazyLoader._strategy_keys[:] = BakedLazyLoader._strategy_keys[:] + + +def unbake_lazy_loaders(): + """Disable the use of baked queries for all lazyloaders systemwide. + + This operation reverts the changes produced by :func:`.bake_lazy_loaders`. + + """ + strategies.LazyLoader._strategy_keys[:] = [] + BakedLazyLoader._strategy_keys[:] = [] + + properties.RelationshipProperty.strategy_for( + lazy="select")(strategies.LazyLoader) + properties.RelationshipProperty.strategy_for( + lazy=True)(strategies.LazyLoader) + properties.RelationshipProperty.strategy_for( + lazy="baked_select")(BakedLazyLoader) + assert strategies.LazyLoader._strategy_keys + + +@sqla_log.class_logger +@properties.RelationshipProperty.strategy_for(lazy="baked_select") +class BakedLazyLoader(strategies.LazyLoader): + + def _emit_lazyload(self, session, state, ident_key, passive): + q = BakedQuery( + self.mapper._compiled_cache, + lambda session: session.query(self.mapper)) + q.add_criteria( + lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False), + self.parent_property) + + if not self.parent_property.bake_queries: + q.spoil(full=True) + + if self.parent_property.secondary is not None: + q.add_criteria( + lambda q: + q.select_from(self.mapper, self.parent_property.secondary)) + + pending = not state.key + + # don't autoflush on pending + if pending or passive & attributes.NO_AUTOFLUSH: + q.add_criteria(lambda q: q.autoflush(False)) + + if state.load_options: + q.spoil() + args = state.load_path[self.parent_property] + q.add_criteria( + lambda q: + q._with_current_path(args), args) + q.add_criteria( + lambda q: q._conditional_options(*state.load_options)) + + if self.use_get: + return q(session)._load_on_ident( + session.query(self.mapper), ident_key) + + if self.parent_property.order_by: + q.add_criteria( + lambda q: + q.order_by(*util.to_list(self.parent_property.order_by))) + + for rev in self.parent_property._reverse_property: + # reverse props that are MANYTOONE are loading *this* + # object from get(), so don't need to eager out to those. + if rev.direction is interfaces.MANYTOONE and \ + rev._use_get and \ + not isinstance(rev.strategy, strategies.LazyLoader): + + q.add_criteria( + lambda q: + q.options( + strategy_options.Load.for_existing_path( + q._current_path[rev.parent] + ).baked_lazyload(rev.key) + ) + ) + + lazy_clause, params = self._generate_lazy_clause(state, passive) + + if pending: + if orm_util._none_set.intersection(params.values()): + return None + + q.add_criteria(lambda q: q.filter(lazy_clause)) + result = q(session).params(**params).all() + if self.uselist: + return result + else: + l = len(result) + if l: + if l > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for lazily-loaded attribute '%s' " + % self.parent_property) + + return result[0] + else: + return None + + +@strategy_options.loader_option() +def baked_lazyload(loadopt, attr): + """Indicate that the given attribute should be loaded using "lazy" + loading with a "baked" query used in the load. + + """ + return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"}) + + +@baked_lazyload._add_unbound_fn +def baked_lazyload(*keys): + return strategy_options._UnboundLoad._from_keys( + strategy_options._UnboundLoad.baked_lazyload, keys, False, {}) + + +@baked_lazyload._add_unbound_all_fn +def baked_lazyload_all(*keys): + return strategy_options._UnboundLoad._from_keys( + strategy_options._UnboundLoad.baked_lazyload, keys, True, {}) + +baked_lazyload = baked_lazyload._unbound_fn +baked_lazyload_all = baked_lazyload_all._unbound_all_fn + +bakery = BakedQuery.bakery diff --git a/app/lib/sqlalchemy/ext/compiler.py b/app/lib/sqlalchemy/ext/compiler.py new file mode 100644 index 0000000..8b2bc95 --- /dev/null +++ b/app/lib/sqlalchemy/ext/compiler.py @@ -0,0 +1,474 @@ +# ext/compiler.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r"""Provides an API for creation of custom ClauseElements and compilers. + +Synopsis +======== + +Usage involves the creation of one or more +:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or +more callables defining its compilation:: + + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.sql.expression import ColumnClause + + class MyColumn(ColumnClause): + pass + + @compiles(MyColumn) + def compile_mycolumn(element, compiler, **kw): + return "[%s]" % element.name + +Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`, +the base expression element for named column objects. The ``compiles`` +decorator registers itself with the ``MyColumn`` class so that it is invoked +when the object is compiled to a string:: + + from sqlalchemy import select + + s = select([MyColumn('x'), MyColumn('y')]) + print str(s) + +Produces:: + + SELECT [x], [y] + +Dialect-specific compilation rules +================================== + +Compilers can also be made dialect-specific. The appropriate compiler will be +invoked for the dialect in use:: + + from sqlalchemy.schema import DDLElement + + class AlterColumn(DDLElement): + + def __init__(self, column, cmd): + self.column = column + self.cmd = cmd + + @compiles(AlterColumn) + def visit_alter_column(element, compiler, **kw): + return "ALTER COLUMN %s ..." % element.column.name + + @compiles(AlterColumn, 'postgresql') + def visit_alter_column(element, compiler, **kw): + return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, + element.column.name) + +The second ``visit_alter_table`` will be invoked when any ``postgresql`` +dialect is used. + +Compiling sub-elements of a custom expression construct +======================================================= + +The ``compiler`` argument is the +:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object +can be inspected for any information about the in-progress compilation, +including ``compiler.dialect``, ``compiler.statement`` etc. The +:class:`~sqlalchemy.sql.compiler.SQLCompiler` and +:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()`` +method which can be used for compilation of embedded attributes:: + + from sqlalchemy.sql.expression import Executable, ClauseElement + + class InsertFromSelect(Executable, ClauseElement): + def __init__(self, table, select): + self.table = table + self.select = select + + @compiles(InsertFromSelect) + def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s (%s)" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select) + ) + + insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5)) + print insert + +Produces:: + + "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z + FROM mytable WHERE mytable.x > :x_1)" + +.. note:: + + The above ``InsertFromSelect`` construct is only an example, this actual + functionality is already available using the + :meth:`.Insert.from_select` method. + +.. note:: + + The above ``InsertFromSelect`` construct probably wants to have "autocommit" + enabled. See :ref:`enabling_compiled_autocommit` for this step. + +Cross Compiling between SQL and DDL compilers +--------------------------------------------- + +SQL and DDL constructs are each compiled using different base compilers - +``SQLCompiler`` and ``DDLCompiler``. A common need is to access the +compilation rules of SQL expressions from within a DDL expression. The +``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as +below where we generate a CHECK constraint that embeds a SQL expression:: + + @compiles(MyConstraint) + def compile_my_constraint(constraint, ddlcompiler, **kw): + return "CONSTRAINT %s CHECK (%s)" % ( + constraint.name, + ddlcompiler.sql_compiler.process( + constraint.expression, literal_binds=True) + ) + +Above, we add an additional flag to the process step as called by +:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This +indicates that any SQL expression which refers to a :class:`.BindParameter` +object or other "literal" object such as those which refer to strings or +integers should be rendered **in-place**, rather than being referred to as +a bound parameter; when emitting DDL, bound parameters are typically not +supported. + + +.. _enabling_compiled_autocommit: + +Enabling Autocommit on a Construct +================================== + +Recall from the section :ref:`autocommit` that the :class:`.Engine`, when +asked to execute a construct in the absence of a user-defined transaction, +detects if the given construct represents DML or DDL, that is, a data +modification or data definition statement, which requires (or may require, +in the case of DDL) that the transaction generated by the DBAPI be committed +(recall that DBAPI always has a transaction going on regardless of what +SQLAlchemy does). Checking for this is actually accomplished by checking for +the "autocommit" execution option on the construct. When building a +construct like an INSERT derivation, a new DDL type, or perhaps a stored +procedure that alters data, the "autocommit" option needs to be set in order +for the statement to function with "connectionless" execution +(as described in :ref:`dbengine_implicit`). + +Currently a quick way to do this is to subclass :class:`.Executable`, then +add the "autocommit" flag to the ``_execution_options`` dictionary (note this +is a "frozen" dictionary which supplies a generative ``union()`` method):: + + from sqlalchemy.sql.expression import Executable, ClauseElement + + class MyInsertThing(Executable, ClauseElement): + _execution_options = \ + Executable._execution_options.union({'autocommit': True}) + +More succinctly, if the construct is truly similar to an INSERT, UPDATE, or +DELETE, :class:`.UpdateBase` can be used, which already is a subclass +of :class:`.Executable`, :class:`.ClauseElement` and includes the +``autocommit`` flag:: + + from sqlalchemy.sql.expression import UpdateBase + + class MyInsertThing(UpdateBase): + def __init__(self, ...): + ... + + + + +DDL elements that subclass :class:`.DDLElement` already have the +"autocommit" flag turned on. + + + + +Changing the default compilation of existing constructs +======================================================= + +The compiler extension applies just as well to the existing constructs. When +overriding the compilation of a built in SQL construct, the @compiles +decorator is invoked upon the appropriate class (be sure to use the class, +i.e. ``Insert`` or ``Select``, instead of the creation function such +as ``insert()`` or ``select()``). + +Within the new compilation function, to get at the "original" compilation +routine, use the appropriate visit_XXX method - this +because compiler.process() will call upon the overriding routine and cause +an endless loop. Such as, to add "prefix" to all insert statements:: + + from sqlalchemy.sql.expression import Insert + + @compiles(Insert) + def prefix_inserts(insert, compiler, **kw): + return compiler.visit_insert(insert.prefix_with("some prefix"), **kw) + +The above compiler will prefix all INSERT statements with "some prefix" when +compiled. + +.. _type_compilation_extension: + +Changing Compilation of Types +============================= + +``compiler`` works for types, too, such as below where we implement the +MS-SQL specific 'max' keyword for ``String``/``VARCHAR``:: + + @compiles(String, 'mssql') + @compiles(VARCHAR, 'mssql') + def compile_varchar(element, compiler, **kw): + if element.length == 'max': + return "VARCHAR('max')" + else: + return compiler.visit_VARCHAR(element, **kw) + + foo = Table('foo', metadata, + Column('data', VARCHAR('max')) + ) + +Subclassing Guidelines +====================== + +A big part of using the compiler extension is subclassing SQLAlchemy +expression constructs. To make this easier, the expression and +schema packages feature a set of "bases" intended for common tasks. +A synopsis is as follows: + +* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root + expression class. Any SQL expression can be derived from this base, and is + probably the best choice for longer constructs such as specialized INSERT + statements. + +* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all + "column-like" elements. Anything that you'd place in the "columns" clause of + a SELECT statement (as well as order by and group by) can derive from this - + the object will automatically have Python "comparison" behavior. + + :class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a + ``type`` member which is expression's return type. This can be established + at the instance level in the constructor, or at the class level if its + generally constant:: + + class timestamp(ColumnElement): + type = TIMESTAMP() + +* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a + ``ColumnElement`` and a "from clause" like object, and represents a SQL + function or stored procedure type of call. Since most databases support + statements along the line of "SELECT FROM " + ``FunctionElement`` adds in the ability to be used in the FROM clause of a + ``select()`` construct:: + + from sqlalchemy.sql.expression import FunctionElement + + class coalesce(FunctionElement): + name = 'coalesce' + + @compiles(coalesce) + def compile(element, compiler, **kw): + return "coalesce(%s)" % compiler.process(element.clauses) + + @compiles(coalesce, 'oracle') + def compile(element, compiler, **kw): + if len(element.clauses) > 2: + raise TypeError("coalesce only supports two arguments on Oracle") + return "nvl(%s)" % compiler.process(element.clauses) + +* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, + like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` + subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. + ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the + ``execute_at()`` method, allowing the construct to be invoked during CREATE + TABLE and DROP TABLE sequences. + +* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which + should be used with any expression class that represents a "standalone" + SQL statement that can be passed directly to an ``execute()`` method. It + is already implicit within ``DDLElement`` and ``FunctionElement``. + +Further Examples +================ + +"UTC timestamp" function +------------------------- + +A function that works like "CURRENT_TIMESTAMP" except applies the +appropriate conversions so that the time is in UTC time. Timestamps are best +stored in relational databases as UTC, without time zones. UTC so that your +database doesn't think time has gone backwards in the hour when daylight +savings ends, without timezones because timezones are like character +encodings - they're best applied only at the endpoints of an application +(i.e. convert to UTC upon user input, re-apply desired timezone upon display). + +For PostgreSQL and Microsoft SQL Server:: + + from sqlalchemy.sql import expression + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.types import DateTime + + class utcnow(expression.FunctionElement): + type = DateTime() + + @compiles(utcnow, 'postgresql') + def pg_utcnow(element, compiler, **kw): + return "TIMEZONE('utc', CURRENT_TIMESTAMP)" + + @compiles(utcnow, 'mssql') + def ms_utcnow(element, compiler, **kw): + return "GETUTCDATE()" + +Example usage:: + + from sqlalchemy import ( + Table, Column, Integer, String, DateTime, MetaData + ) + metadata = MetaData() + event = Table("event", metadata, + Column("id", Integer, primary_key=True), + Column("description", String(50), nullable=False), + Column("timestamp", DateTime, server_default=utcnow()) + ) + +"GREATEST" function +------------------- + +The "GREATEST" function is given any number of arguments and returns the one +that is of the highest value - its equivalent to Python's ``max`` +function. A SQL standard version versus a CASE based version which only +accommodates two arguments:: + + from sqlalchemy.sql import expression + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.types import Numeric + + class greatest(expression.FunctionElement): + type = Numeric() + name = 'greatest' + + @compiles(greatest) + def default_greatest(element, compiler, **kw): + return compiler.visit_function(element) + + @compiles(greatest, 'sqlite') + @compiles(greatest, 'mssql') + @compiles(greatest, 'oracle') + def case_greatest(element, compiler, **kw): + arg1, arg2 = list(element.clauses) + return "CASE WHEN %s > %s THEN %s ELSE %s END" % ( + compiler.process(arg1), + compiler.process(arg2), + compiler.process(arg1), + compiler.process(arg2), + ) + +Example usage:: + + Session.query(Account).\ + filter( + greatest( + Account.checking_balance, + Account.savings_balance) > 10000 + ) + +"false" expression +------------------ + +Render a "false" constant expression, rendering as "0" on platforms that +don't have a "false" constant:: + + from sqlalchemy.sql import expression + from sqlalchemy.ext.compiler import compiles + + class sql_false(expression.ColumnElement): + pass + + @compiles(sql_false) + def default_false(element, compiler, **kw): + return "false" + + @compiles(sql_false, 'mssql') + @compiles(sql_false, 'mysql') + @compiles(sql_false, 'oracle') + def int_false(element, compiler, **kw): + return "0" + +Example usage:: + + from sqlalchemy import select, union_all + + exp = union_all( + select([users.c.name, sql_false().label("enrolled")]), + select([customers.c.name, customers.c.enrolled]) + ) + +""" +from .. import exc +from ..sql import visitors + + +def compiles(class_, *specs): + """Register a function as a compiler for a + given :class:`.ClauseElement` type.""" + + def decorate(fn): + # get an existing @compiles handler + existing = class_.__dict__.get('_compiler_dispatcher', None) + + # get the original handler. All ClauseElement classes have one + # of these, but some TypeEngine classes will not. + existing_dispatch = getattr(class_, '_compiler_dispatch', None) + + if not existing: + existing = _dispatcher() + + if existing_dispatch: + def _wrap_existing_dispatch(element, compiler, **kw): + try: + return existing_dispatch(element, compiler, **kw) + except exc.UnsupportedCompilationError: + raise exc.CompileError( + "%s construct has no default " + "compilation handler." % type(element)) + existing.specs['default'] = _wrap_existing_dispatch + + # TODO: why is the lambda needed ? + setattr(class_, '_compiler_dispatch', + lambda *arg, **kw: existing(*arg, **kw)) + setattr(class_, '_compiler_dispatcher', existing) + + if specs: + for s in specs: + existing.specs[s] = fn + + else: + existing.specs['default'] = fn + return fn + return decorate + + +def deregister(class_): + """Remove all custom compilers associated with a given + :class:`.ClauseElement` type.""" + + if hasattr(class_, '_compiler_dispatcher'): + # regenerate default _compiler_dispatch + visitors._generate_dispatch(class_) + # remove custom directive + del class_._compiler_dispatcher + + +class _dispatcher(object): + def __init__(self): + self.specs = {} + + def __call__(self, element, compiler, **kw): + # TODO: yes, this could also switch off of DBAPI in use. + fn = self.specs.get(compiler.dialect.name, None) + if not fn: + try: + fn = self.specs['default'] + except KeyError: + raise exc.CompileError( + "%s construct has no default " + "compilation handler." % type(element)) + + return fn(element, compiler, **kw) diff --git a/app/lib/sqlalchemy/ext/declarative/__init__.py b/app/lib/sqlalchemy/ext/declarative/__init__.py new file mode 100644 index 0000000..69c4f28 --- /dev/null +++ b/app/lib/sqlalchemy/ext/declarative/__init__.py @@ -0,0 +1,18 @@ +# ext/declarative/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .api import declarative_base, synonym_for, comparable_using, \ + instrument_declarative, ConcreteBase, AbstractConcreteBase, \ + DeclarativeMeta, DeferredReflection, has_inherited_table,\ + declared_attr, as_declarative + + +__all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', + 'comparable_using', 'instrument_declarative', 'declared_attr', + 'as_declarative', + 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', + 'DeferredReflection'] diff --git a/app/lib/sqlalchemy/ext/declarative/api.py b/app/lib/sqlalchemy/ext/declarative/api.py new file mode 100644 index 0000000..7c503d4 --- /dev/null +++ b/app/lib/sqlalchemy/ext/declarative/api.py @@ -0,0 +1,696 @@ +# ext/declarative/api.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Public API functions and helpers for declarative.""" + + +from ...schema import Table, MetaData, Column +from ...orm import synonym as _orm_synonym, \ + comparable_property,\ + interfaces, properties, attributes +from ...orm.util import polymorphic_union +from ...orm.base import _mapper_or_none +from ...util import OrderedDict, hybridmethod, hybridproperty +from ... import util +from ... import exc +import weakref + +from .base import _as_declarative, \ + _declarative_constructor,\ + _DeferredMapperConfig, _add_attribute +from .clsregistry import _class_resolver + + +def instrument_declarative(cls, registry, metadata): + """Given a class, configure the class declaratively, + using the given registry, which can be any dictionary, and + MetaData object. + + """ + if '_decl_class_registry' in cls.__dict__: + raise exc.InvalidRequestError( + "Class %r already has been " + "instrumented declaratively" % cls) + cls._decl_class_registry = registry + cls.metadata = metadata + _as_declarative(cls, cls.__name__, cls.__dict__) + + +def has_inherited_table(cls): + """Given a class, return True if any of the classes it inherits from has a + mapped table, otherwise return False. + + This is used in declarative mixins to build attributes that behave + differently for the base class vs. a subclass in an inheritance + hierarchy. + + .. seealso:: + + :ref:`decl_mixin_inheritance` + + """ + for class_ in cls.__mro__[1:]: + if getattr(class_, '__table__', None) is not None: + return True + return False + + +class DeclarativeMeta(type): + def __init__(cls, classname, bases, dict_): + if '_decl_class_registry' not in cls.__dict__: + _as_declarative(cls, classname, cls.__dict__) + type.__init__(cls, classname, bases, dict_) + + def __setattr__(cls, key, value): + _add_attribute(cls, key, value) + + +def synonym_for(name, map_column=False): + """Decorator, make a Python @property a query synonym for a column. + + A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being + decorated is the 'descriptor', otherwise passes its arguments through to + synonym():: + + @synonym_for('col') + @property + def prop(self): + return 'special sauce' + + The regular ``synonym()`` is also usable directly in a declarative setting + and may be convenient for read/write properties:: + + prop = synonym('col', descriptor=property(_read_prop, _write_prop)) + + """ + def decorate(fn): + return _orm_synonym(name, map_column=map_column, descriptor=fn) + return decorate + + +def comparable_using(comparator_factory): + """Decorator, allow a Python @property to be used in query criteria. + + This is a decorator front end to + :func:`~sqlalchemy.orm.comparable_property` that passes + through the comparator_factory and the function being decorated:: + + @comparable_using(MyComparatorType) + @property + def prop(self): + return 'special sauce' + + The regular ``comparable_property()`` is also usable directly in a + declarative setting and may be convenient for read/write properties:: + + prop = comparable_property(MyComparatorType) + + """ + def decorate(fn): + return comparable_property(comparator_factory, fn) + return decorate + + +class declared_attr(interfaces._MappedAttribute, property): + """Mark a class-level method as representing the definition of + a mapped property or special declarative member name. + + @declared_attr turns the attribute into a scalar-like + property that can be invoked from the uninstantiated class. + Declarative treats attributes specifically marked with + @declared_attr as returning a construct that is specific + to mapping or declarative table configuration. The name + of the attribute is that of what the non-dynamic version + of the attribute would be. + + @declared_attr is more often than not applicable to mixins, + to define relationships that are to be applied to different + implementors of the class:: + + class ProvidesUser(object): + "A mixin that adds a 'user' relationship to classes." + + @declared_attr + def user(self): + return relationship("User") + + It also can be applied to mapped classes, such as to provide + a "polymorphic" scheme for inheritance:: + + class Employee(Base): + id = Column(Integer, primary_key=True) + type = Column(String(50), nullable=False) + + @declared_attr + def __tablename__(cls): + return cls.__name__.lower() + + @declared_attr + def __mapper_args__(cls): + if cls.__name__ == 'Employee': + return { + "polymorphic_on":cls.type, + "polymorphic_identity":"Employee" + } + else: + return {"polymorphic_identity":cls.__name__} + + .. versionchanged:: 0.8 :class:`.declared_attr` can be used with + non-ORM or extension attributes, such as user-defined attributes + or :func:`.association_proxy` objects, which will be assigned + to the class at class construction time. + + + """ + + def __init__(self, fget, cascading=False): + super(declared_attr, self).__init__(fget) + self.__doc__ = fget.__doc__ + self._cascading = cascading + + def __get__(desc, self, cls): + reg = cls.__dict__.get('_sa_declared_attr_reg', None) + if reg is None: + manager = attributes.manager_of_class(cls) + if manager is None: + util.warn( + "Unmanaged access of declarative attribute %s from " + "non-mapped class %s" % + (desc.fget.__name__, cls.__name__)) + return desc.fget(cls) + elif desc in reg: + return reg[desc] + else: + reg[desc] = obj = desc.fget(cls) + return obj + + @hybridmethod + def _stateful(cls, **kw): + return _stateful_declared_attr(**kw) + + @hybridproperty + def cascading(cls): + """Mark a :class:`.declared_attr` as cascading. + + This is a special-use modifier which indicates that a column + or MapperProperty-based declared attribute should be configured + distinctly per mapped subclass, within a mapped-inheritance scenario. + + Below, both MyClass as well as MySubClass will have a distinct + ``id`` Column object established:: + + class HasIdMixin(object): + @declared_attr.cascading + def id(cls): + if has_inherited_table(cls): + return Column(ForeignKey('myclass.id'), primary_key=True) + else: + return Column(Integer, primary_key=True) + + class MyClass(HasIdMixin, Base): + __tablename__ = 'myclass' + # ... + + class MySubClass(MyClass): + "" + # ... + + The behavior of the above configuration is that ``MySubClass`` + will refer to both its own ``id`` column as well as that of + ``MyClass`` underneath the attribute named ``some_id``. + + .. seealso:: + + :ref:`declarative_inheritance` + + :ref:`mixin_inheritance_columns` + + + """ + return cls._stateful(cascading=True) + + +class _stateful_declared_attr(declared_attr): + def __init__(self, **kw): + self.kw = kw + + def _stateful(self, **kw): + new_kw = self.kw.copy() + new_kw.update(kw) + return _stateful_declared_attr(**new_kw) + + def __call__(self, fn): + return declared_attr(fn, **self.kw) + + +def declarative_base(bind=None, metadata=None, mapper=None, cls=object, + name='Base', constructor=_declarative_constructor, + class_registry=None, + metaclass=DeclarativeMeta): + r"""Construct a base class for declarative class definitions. + + The new base class will be given a metaclass that produces + appropriate :class:`~sqlalchemy.schema.Table` objects and makes + the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the + information provided declaratively in the class and any subclasses + of the class. + + :param bind: An optional + :class:`~sqlalchemy.engine.Connectable`, will be assigned + the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData` + instance. + + :param metadata: + An optional :class:`~sqlalchemy.schema.MetaData` instance. All + :class:`~sqlalchemy.schema.Table` objects implicitly declared by + subclasses of the base will share this MetaData. A MetaData instance + will be created if none is provided. The + :class:`~sqlalchemy.schema.MetaData` instance will be available via the + `metadata` attribute of the generated declarative base class. + + :param mapper: + An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will + be used to map subclasses to their Tables. + + :param cls: + Defaults to :class:`object`. A type to use as the base for the generated + declarative base class. May be a class or tuple of classes. + + :param name: + Defaults to ``Base``. The display name for the generated + class. Customizing this is not required, but can improve clarity in + tracebacks and debugging. + + :param constructor: + Defaults to + :func:`~sqlalchemy.ext.declarative.base._declarative_constructor`, an + __init__ implementation that assigns \**kwargs for declared + fields and relationships to an instance. If ``None`` is supplied, + no __init__ will be provided and construction will fall back to + cls.__init__ by way of the normal Python semantics. + + :param class_registry: optional dictionary that will serve as the + registry of class names-> mapped classes when string names + are used to identify classes inside of :func:`.relationship` + and others. Allows two or more declarative base classes + to share the same registry of class names for simplified + inter-base relationships. + + :param metaclass: + Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ + compatible callable to use as the meta type of the generated + declarative base class. + + .. versionchanged:: 1.1 if :paramref:`.declarative_base.cls` is a single class (rather + than a tuple), the constructed base class will inherit its docstring. + + .. seealso:: + + :func:`.as_declarative` + + """ + lcl_metadata = metadata or MetaData() + if bind: + lcl_metadata.bind = bind + + if class_registry is None: + class_registry = weakref.WeakValueDictionary() + + bases = not isinstance(cls, tuple) and (cls,) or cls + class_dict = dict(_decl_class_registry=class_registry, + metadata=lcl_metadata) + + if isinstance(cls, type): + class_dict['__doc__'] = cls.__doc__ + + if constructor: + class_dict['__init__'] = constructor + if mapper: + class_dict['__mapper_cls__'] = mapper + + return metaclass(name, bases, class_dict) + + +def as_declarative(**kw): + """ + Class decorator for :func:`.declarative_base`. + + Provides a syntactical shortcut to the ``cls`` argument + sent to :func:`.declarative_base`, allowing the base class + to be converted in-place to a "declarative" base:: + + from sqlalchemy.ext.declarative import as_declarative + + @as_declarative() + class Base(object): + @declared_attr + def __tablename__(cls): + return cls.__name__.lower() + id = Column(Integer, primary_key=True) + + class MyMappedClass(Base): + # ... + + All keyword arguments passed to :func:`.as_declarative` are passed + along to :func:`.declarative_base`. + + .. versionadded:: 0.8.3 + + .. seealso:: + + :func:`.declarative_base` + + """ + def decorate(cls): + kw['cls'] = cls + kw['name'] = cls.__name__ + return declarative_base(**kw) + + return decorate + + +class ConcreteBase(object): + """A helper class for 'concrete' declarative mappings. + + :class:`.ConcreteBase` will use the :func:`.polymorphic_union` + function automatically, against all tables mapped as a subclass + to this class. The function is called via the + ``__declare_last__()`` function, which is essentially + a hook for the :meth:`.after_configured` event. + + :class:`.ConcreteBase` produces a mapped + table for the class itself. Compare to :class:`.AbstractConcreteBase`, + which does not. + + Example:: + + from sqlalchemy.ext.declarative import ConcreteBase + + class Employee(ConcreteBase, Base): + __tablename__ = 'employee' + employee_id = Column(Integer, primary_key=True) + name = Column(String(50)) + __mapper_args__ = { + 'polymorphic_identity':'employee', + 'concrete':True} + + class Manager(Employee): + __tablename__ = 'manager' + employee_id = Column(Integer, primary_key=True) + name = Column(String(50)) + manager_data = Column(String(40)) + __mapper_args__ = { + 'polymorphic_identity':'manager', + 'concrete':True} + + .. seealso:: + + :class:`.AbstractConcreteBase` + + :ref:`concrete_inheritance` + + :ref:`inheritance_concrete_helpers` + + + """ + + @classmethod + def _create_polymorphic_union(cls, mappers): + return polymorphic_union(OrderedDict( + (mp.polymorphic_identity, mp.local_table) + for mp in mappers + ), 'type', 'pjoin') + + @classmethod + def __declare_first__(cls): + m = cls.__mapper__ + if m.with_polymorphic: + return + + mappers = list(m.self_and_descendants) + pjoin = cls._create_polymorphic_union(mappers) + m._set_with_polymorphic(("*", pjoin)) + m._set_polymorphic_on(pjoin.c.type) + + +class AbstractConcreteBase(ConcreteBase): + """A helper class for 'concrete' declarative mappings. + + :class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union` + function automatically, against all tables mapped as a subclass + to this class. The function is called via the + ``__declare_last__()`` function, which is essentially + a hook for the :meth:`.after_configured` event. + + :class:`.AbstractConcreteBase` does produce a mapped class + for the base class, however it is not persisted to any table; it + is instead mapped directly to the "polymorphic" selectable directly + and is only used for selecting. Compare to :class:`.ConcreteBase`, + which does create a persisted table for the base class. + + Example:: + + from sqlalchemy.ext.declarative import AbstractConcreteBase + + class Employee(AbstractConcreteBase, Base): + pass + + class Manager(Employee): + __tablename__ = 'manager' + employee_id = Column(Integer, primary_key=True) + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + 'polymorphic_identity':'manager', + 'concrete':True} + + The abstract base class is handled by declarative in a special way; + at class configuration time, it behaves like a declarative mixin + or an ``__abstract__`` base class. Once classes are configured + and mappings are produced, it then gets mapped itself, but + after all of its decscendants. This is a very unique system of mapping + not found in any other SQLAlchemy system. + + Using this approach, we can specify columns and properties + that will take place on mapped subclasses, in the way that + we normally do as in :ref:`declarative_mixins`:: + + class Company(Base): + __tablename__ = 'company' + id = Column(Integer, primary_key=True) + + class Employee(AbstractConcreteBase, Base): + employee_id = Column(Integer, primary_key=True) + + @declared_attr + def company_id(cls): + return Column(ForeignKey('company.id')) + + @declared_attr + def company(cls): + return relationship("Company") + + class Manager(Employee): + __tablename__ = 'manager' + + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + 'polymorphic_identity':'manager', + 'concrete':True} + + When we make use of our mappings however, both ``Manager`` and + ``Employee`` will have an independently usable ``.company`` attribute:: + + session.query(Employee).filter(Employee.company.has(id=5)) + + .. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase` + have been reworked to support relationships established directly + on the abstract base, without any special configurational steps. + + .. seealso:: + + :class:`.ConcreteBase` + + :ref:`concrete_inheritance` + + :ref:`inheritance_concrete_helpers` + + """ + + __no_table__ = True + + @classmethod + def __declare_first__(cls): + cls._sa_decl_prepare_nocascade() + + @classmethod + def _sa_decl_prepare_nocascade(cls): + if getattr(cls, '__mapper__', None): + return + + to_map = _DeferredMapperConfig.config_for_cls(cls) + + # can't rely on 'self_and_descendants' here + # since technically an immediate subclass + # might not be mapped, but a subclass + # may be. + mappers = [] + stack = list(cls.__subclasses__()) + while stack: + klass = stack.pop() + stack.extend(klass.__subclasses__()) + mn = _mapper_or_none(klass) + if mn is not None: + mappers.append(mn) + pjoin = cls._create_polymorphic_union(mappers) + + # For columns that were declared on the class, these + # are normally ignored with the "__no_table__" mapping, + # unless they have a different attribute key vs. col name + # and are in the properties argument. + # In that case, ensure we update the properties entry + # to the correct column from the pjoin target table. + declared_cols = set(to_map.declared_columns) + for k, v in list(to_map.properties.items()): + if v in declared_cols: + to_map.properties[k] = pjoin.c[v.key] + + to_map.local_table = pjoin + + m_args = to_map.mapper_args_fn or dict + + def mapper_args(): + args = m_args() + args['polymorphic_on'] = pjoin.c.type + return args + to_map.mapper_args_fn = mapper_args + + m = to_map.map() + + for scls in cls.__subclasses__(): + sm = _mapper_or_none(scls) + if sm and sm.concrete and cls in scls.__bases__: + sm._set_concrete_base(m) + + +class DeferredReflection(object): + """A helper class for construction of mappings based on + a deferred reflection step. + + Normally, declarative can be used with reflection by + setting a :class:`.Table` object using autoload=True + as the ``__table__`` attribute on a declarative class. + The caveat is that the :class:`.Table` must be fully + reflected, or at the very least have a primary key column, + at the point at which a normal declarative mapping is + constructed, meaning the :class:`.Engine` must be available + at class declaration time. + + The :class:`.DeferredReflection` mixin moves the construction + of mappers to be at a later point, after a specific + method is called which first reflects all :class:`.Table` + objects created so far. Classes can define it as such:: + + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.declarative import DeferredReflection + Base = declarative_base() + + class MyClass(DeferredReflection, Base): + __tablename__ = 'mytable' + + Above, ``MyClass`` is not yet mapped. After a series of + classes have been defined in the above fashion, all tables + can be reflected and mappings created using + :meth:`.prepare`:: + + engine = create_engine("someengine://...") + DeferredReflection.prepare(engine) + + The :class:`.DeferredReflection` mixin can be applied to individual + classes, used as the base for the declarative base itself, + or used in a custom abstract class. Using an abstract base + allows that only a subset of classes to be prepared for a + particular prepare step, which is necessary for applications + that use more than one engine. For example, if an application + has two engines, you might use two bases, and prepare each + separately, e.g.:: + + class ReflectedOne(DeferredReflection, Base): + __abstract__ = True + + class ReflectedTwo(DeferredReflection, Base): + __abstract__ = True + + class MyClass(ReflectedOne): + __tablename__ = 'mytable' + + class MyOtherClass(ReflectedOne): + __tablename__ = 'myothertable' + + class YetAnotherClass(ReflectedTwo): + __tablename__ = 'yetanothertable' + + # ... etc. + + Above, the class hierarchies for ``ReflectedOne`` and + ``ReflectedTwo`` can be configured separately:: + + ReflectedOne.prepare(engine_one) + ReflectedTwo.prepare(engine_two) + + .. versionadded:: 0.8 + + """ + @classmethod + def prepare(cls, engine): + """Reflect all :class:`.Table` objects for all current + :class:`.DeferredReflection` subclasses""" + + to_map = _DeferredMapperConfig.classes_for_base(cls) + for thingy in to_map: + cls._sa_decl_prepare(thingy.local_table, engine) + thingy.map() + mapper = thingy.cls.__mapper__ + metadata = mapper.class_.metadata + for rel in mapper._props.values(): + if isinstance(rel, properties.RelationshipProperty) and \ + rel.secondary is not None: + if isinstance(rel.secondary, Table): + cls._reflect_table(rel.secondary, engine) + elif isinstance(rel.secondary, _class_resolver): + rel.secondary._resolvers += ( + cls._sa_deferred_table_resolver(engine, metadata), + ) + + @classmethod + def _sa_deferred_table_resolver(cls, engine, metadata): + def _resolve(key): + t1 = Table(key, metadata) + cls._reflect_table(t1, engine) + return t1 + return _resolve + + @classmethod + def _sa_decl_prepare(cls, local_table, engine): + # autoload Table, which is already + # present in the metadata. This + # will fill in db-loaded columns + # into the existing Table object. + if local_table is not None: + cls._reflect_table(local_table, engine) + + @classmethod + def _reflect_table(cls, table, engine): + Table(table.name, + table.metadata, + extend_existing=True, + autoload_replace=False, + autoload=True, + autoload_with=engine, + schema=table.schema) diff --git a/app/lib/sqlalchemy/ext/declarative/base.py b/app/lib/sqlalchemy/ext/declarative/base.py new file mode 100644 index 0000000..3beee31 --- /dev/null +++ b/app/lib/sqlalchemy/ext/declarative/base.py @@ -0,0 +1,662 @@ +# ext/declarative/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Internal implementation for declarative.""" + +from ...schema import Table, Column +from ...orm import mapper, class_mapper, synonym +from ...orm.interfaces import MapperProperty +from ...orm.properties import ColumnProperty, CompositeProperty +from ...orm.attributes import QueryableAttribute +from ...orm.base import _is_mapped_class +from ... import util, exc +from ...util import topological +from ...sql import expression +from ... import event +from . import clsregistry +import collections +import weakref +from sqlalchemy.orm import instrumentation + +declared_attr = declarative_props = None + + +def _declared_mapping_info(cls): + # deferred mapping + if _DeferredMapperConfig.has_cls(cls): + return _DeferredMapperConfig.config_for_cls(cls) + # regular mapping + elif _is_mapped_class(cls): + return class_mapper(cls, configure=False) + else: + return None + + +def _resolve_for_abstract(cls): + if cls is object: + return None + + if _get_immediate_cls_attr(cls, '__abstract__', strict=True): + for sup in cls.__bases__: + sup = _resolve_for_abstract(sup) + if sup is not None: + return sup + else: + return None + else: + return cls + + +def _get_immediate_cls_attr(cls, attrname, strict=False): + """return an attribute of the class that is either present directly + on the class, e.g. not on a superclass, or is from a superclass but + this superclass is a mixin, that is, not a descendant of + the declarative base. + + This is used to detect attributes that indicate something about + a mapped class independently from any mapped classes that it may + inherit from. + + """ + if not issubclass(cls, object): + return None + + for base in cls.__mro__: + _is_declarative_inherits = hasattr(base, '_decl_class_registry') + if attrname in base.__dict__ and ( + base is cls or + ((base in cls.__bases__ if strict else True) + and not _is_declarative_inherits) + ): + return getattr(base, attrname) + else: + return None + + +def _as_declarative(cls, classname, dict_): + global declared_attr, declarative_props + if declared_attr is None: + from .api import declared_attr + declarative_props = (declared_attr, util.classproperty) + + if _get_immediate_cls_attr(cls, '__abstract__', strict=True): + return + + _MapperConfig.setup_mapping(cls, classname, dict_) + + +class _MapperConfig(object): + + @classmethod + def setup_mapping(cls, cls_, classname, dict_): + defer_map = _get_immediate_cls_attr( + cls_, '_sa_decl_prepare_nocascade', strict=True) or \ + hasattr(cls_, '_sa_decl_prepare') + + if defer_map: + cfg_cls = _DeferredMapperConfig + else: + cfg_cls = _MapperConfig + cfg_cls(cls_, classname, dict_) + + def __init__(self, cls_, classname, dict_): + + self.cls = cls_ + + # dict_ will be a dictproxy, which we can't write to, and we need to! + self.dict_ = dict(dict_) + self.classname = classname + self.mapped_table = None + self.properties = util.OrderedDict() + self.declared_columns = set() + self.column_copies = {} + self._setup_declared_events() + + # temporary registry. While early 1.0 versions + # set up the ClassManager here, by API contract + # we can't do that until there's a mapper. + self.cls._sa_declared_attr_reg = {} + + self._scan_attributes() + + clsregistry.add_class(self.classname, self.cls) + + self._extract_mappable_attributes() + + self._extract_declared_columns() + + self._setup_table() + + self._setup_inheritance() + + self._early_mapping() + + def _early_mapping(self): + self.map() + + def _setup_declared_events(self): + if _get_immediate_cls_attr(self.cls, '__declare_last__'): + @event.listens_for(mapper, "after_configured") + def after_configured(): + self.cls.__declare_last__() + + if _get_immediate_cls_attr(self.cls, '__declare_first__'): + @event.listens_for(mapper, "before_configured") + def before_configured(): + self.cls.__declare_first__() + + def _scan_attributes(self): + cls = self.cls + dict_ = self.dict_ + column_copies = self.column_copies + mapper_args_fn = None + table_args = inherited_table_args = None + tablename = None + + for base in cls.__mro__: + class_mapped = base is not cls and \ + _declared_mapping_info(base) is not None and \ + not _get_immediate_cls_attr( + base, '_sa_decl_prepare_nocascade', strict=True) + + if not class_mapped and base is not cls: + self._produce_column_copies(base) + + for name, obj in vars(base).items(): + if name == '__mapper_args__': + if not mapper_args_fn and ( + not class_mapped or + isinstance(obj, declarative_props) + ): + # don't even invoke __mapper_args__ until + # after we've determined everything about the + # mapped table. + # make a copy of it so a class-level dictionary + # is not overwritten when we update column-based + # arguments. + mapper_args_fn = lambda: dict(cls.__mapper_args__) + elif name == '__tablename__': + if not tablename and ( + not class_mapped or + isinstance(obj, declarative_props) + ): + tablename = cls.__tablename__ + elif name == '__table_args__': + if not table_args and ( + not class_mapped or + isinstance(obj, declarative_props) + ): + table_args = cls.__table_args__ + if not isinstance( + table_args, (tuple, dict, type(None))): + raise exc.ArgumentError( + "__table_args__ value must be a tuple, " + "dict, or None") + if base is not cls: + inherited_table_args = True + elif class_mapped: + if isinstance(obj, declarative_props): + util.warn("Regular (i.e. not __special__) " + "attribute '%s.%s' uses @declared_attr, " + "but owning class %s is mapped - " + "not applying to subclass %s." + % (base.__name__, name, base, cls)) + continue + elif base is not cls: + # we're a mixin, abstract base, or something that is + # acting like that for now. + if isinstance(obj, Column): + # already copied columns to the mapped class. + continue + elif isinstance(obj, MapperProperty): + raise exc.InvalidRequestError( + "Mapper properties (i.e. deferred," + "column_property(), relationship(), etc.) must " + "be declared as @declared_attr callables " + "on declarative mixin classes.") + elif isinstance(obj, declarative_props): + oldclassprop = isinstance(obj, util.classproperty) + if not oldclassprop and obj._cascading: + dict_[name] = column_copies[obj] = \ + ret = obj.__get__(obj, cls) + setattr(cls, name, ret) + else: + if oldclassprop: + util.warn_deprecated( + "Use of sqlalchemy.util.classproperty on " + "declarative classes is deprecated.") + dict_[name] = column_copies[obj] = \ + ret = getattr(cls, name) + if isinstance(ret, (Column, MapperProperty)) and \ + ret.doc is None: + ret.doc = obj.__doc__ + + if inherited_table_args and not tablename: + table_args = None + + self.table_args = table_args + self.tablename = tablename + self.mapper_args_fn = mapper_args_fn + + def _produce_column_copies(self, base): + cls = self.cls + dict_ = self.dict_ + column_copies = self.column_copies + # copy mixin columns to the mapped class + for name, obj in vars(base).items(): + if isinstance(obj, Column): + if getattr(cls, name) is not obj: + # if column has been overridden + # (like by the InstrumentedAttribute of the + # superclass), skip + continue + elif obj.foreign_keys: + raise exc.InvalidRequestError( + "Columns with foreign keys to other columns " + "must be declared as @declared_attr callables " + "on declarative mixin classes. ") + elif name not in dict_ and not ( + '__table__' in dict_ and + (obj.name or name) in dict_['__table__'].c + ): + column_copies[obj] = copy_ = obj.copy() + copy_._creation_order = obj._creation_order + setattr(cls, name, copy_) + dict_[name] = copy_ + + def _extract_mappable_attributes(self): + cls = self.cls + dict_ = self.dict_ + + our_stuff = self.properties + + for k in list(dict_): + + if k in ('__table__', '__tablename__', '__mapper_args__'): + continue + + value = dict_[k] + if isinstance(value, declarative_props): + value = getattr(cls, k) + + elif isinstance(value, QueryableAttribute) and \ + value.class_ is not cls and \ + value.key != k: + # detect a QueryableAttribute that's already mapped being + # assigned elsewhere in userland, turn into a synonym() + value = synonym(value.key) + setattr(cls, k, value) + + if (isinstance(value, tuple) and len(value) == 1 and + isinstance(value[0], (Column, MapperProperty))): + util.warn("Ignoring declarative-like tuple value of attribute " + "%s: possibly a copy-and-paste error with a comma " + "left at the end of the line?" % k) + continue + elif not isinstance(value, (Column, MapperProperty)): + # using @declared_attr for some object that + # isn't Column/MapperProperty; remove from the dict_ + # and place the evaluated value onto the class. + if not k.startswith('__'): + dict_.pop(k) + setattr(cls, k, value) + continue + # we expect to see the name 'metadata' in some valid cases; + # however at this point we see it's assigned to something trying + # to be mapped, so raise for that. + elif k == 'metadata': + raise exc.InvalidRequestError( + "Attribute name 'metadata' is reserved " + "for the MetaData instance when using a " + "declarative base class." + ) + prop = clsregistry._deferred_relationship(cls, value) + our_stuff[k] = prop + + def _extract_declared_columns(self): + our_stuff = self.properties + + # set up attributes in the order they were created + our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) + + # extract columns from the class dict + declared_columns = self.declared_columns + name_to_prop_key = collections.defaultdict(set) + for key, c in list(our_stuff.items()): + if isinstance(c, (ColumnProperty, CompositeProperty)): + for col in c.columns: + if isinstance(col, Column) and \ + col.table is None: + _undefer_column_name(key, col) + if not isinstance(c, CompositeProperty): + name_to_prop_key[col.name].add(key) + declared_columns.add(col) + elif isinstance(c, Column): + _undefer_column_name(key, c) + name_to_prop_key[c.name].add(key) + declared_columns.add(c) + # if the column is the same name as the key, + # remove it from the explicit properties dict. + # the normal rules for assigning column-based properties + # will take over, including precedence of columns + # in multi-column ColumnProperties. + if key == c.key: + del our_stuff[key] + + for name, keys in name_to_prop_key.items(): + if len(keys) > 1: + util.warn( + "On class %r, Column object %r named " + "directly multiple times, " + "only one will be used: %s. " + "Consider using orm.synonym instead" % + (self.classname, name, (", ".join(sorted(keys)))) + ) + + def _setup_table(self): + cls = self.cls + tablename = self.tablename + table_args = self.table_args + dict_ = self.dict_ + declared_columns = self.declared_columns + + declared_columns = self.declared_columns = sorted( + declared_columns, key=lambda c: c._creation_order) + table = None + + if hasattr(cls, '__table_cls__'): + table_cls = util.unbound_method_to_callable(cls.__table_cls__) + else: + table_cls = Table + + if '__table__' not in dict_: + if tablename is not None: + + args, table_kw = (), {} + if table_args: + if isinstance(table_args, dict): + table_kw = table_args + elif isinstance(table_args, tuple): + if isinstance(table_args[-1], dict): + args, table_kw = table_args[0:-1], table_args[-1] + else: + args = table_args + + autoload = dict_.get('__autoload__') + if autoload: + table_kw['autoload'] = True + + cls.__table__ = table = table_cls( + tablename, cls.metadata, + *(tuple(declared_columns) + tuple(args)), + **table_kw) + else: + table = cls.__table__ + if declared_columns: + for c in declared_columns: + if not table.c.contains_column(c): + raise exc.ArgumentError( + "Can't add additional column %r when " + "specifying __table__" % c.key + ) + self.local_table = table + + def _setup_inheritance(self): + table = self.local_table + cls = self.cls + table_args = self.table_args + declared_columns = self.declared_columns + for c in cls.__bases__: + c = _resolve_for_abstract(c) + if c is None: + continue + if _declared_mapping_info(c) is not None and \ + not _get_immediate_cls_attr( + c, '_sa_decl_prepare_nocascade', strict=True): + self.inherits = c + break + else: + self.inherits = None + + if table is None and self.inherits is None and \ + not _get_immediate_cls_attr(cls, '__no_table__'): + + raise exc.InvalidRequestError( + "Class %r does not have a __table__ or __tablename__ " + "specified and does not inherit from an existing " + "table-mapped class." % cls + ) + elif self.inherits: + inherited_mapper = _declared_mapping_info(self.inherits) + inherited_table = inherited_mapper.local_table + inherited_mapped_table = inherited_mapper.mapped_table + + if table is None: + # single table inheritance. + # ensure no table args + if table_args: + raise exc.ArgumentError( + "Can't place __table_args__ on an inherited class " + "with no table." + ) + # add any columns declared here to the inherited table. + for c in declared_columns: + if c.primary_key: + raise exc.ArgumentError( + "Can't place primary key columns on an inherited " + "class with no table." + ) + if c.name in inherited_table.c: + if inherited_table.c[c.name] is c: + continue + raise exc.ArgumentError( + "Column '%s' on class %s conflicts with " + "existing column '%s'" % + (c, cls, inherited_table.c[c.name]) + ) + inherited_table.append_column(c) + if inherited_mapped_table is not None and \ + inherited_mapped_table is not inherited_table: + inherited_mapped_table._refresh_for_new_column(c) + + def _prepare_mapper_arguments(self): + properties = self.properties + if self.mapper_args_fn: + mapper_args = self.mapper_args_fn() + else: + mapper_args = {} + + # make sure that column copies are used rather + # than the original columns from any mixins + for k in ('version_id_col', 'polymorphic_on',): + if k in mapper_args: + v = mapper_args[k] + mapper_args[k] = self.column_copies.get(v, v) + + assert 'inherits' not in mapper_args, \ + "Can't specify 'inherits' explicitly with declarative mappings" + + if self.inherits: + mapper_args['inherits'] = self.inherits + + if self.inherits and not mapper_args.get('concrete', False): + # single or joined inheritance + # exclude any cols on the inherited table which are + # not mapped on the parent class, to avoid + # mapping columns specific to sibling/nephew classes + inherited_mapper = _declared_mapping_info(self.inherits) + inherited_table = inherited_mapper.local_table + + if 'exclude_properties' not in mapper_args: + mapper_args['exclude_properties'] = exclude_properties = \ + set( + [c.key for c in inherited_table.c + if c not in inherited_mapper._columntoproperty] + ).union( + inherited_mapper.exclude_properties or () + ) + exclude_properties.difference_update( + [c.key for c in self.declared_columns]) + + # look through columns in the current mapper that + # are keyed to a propname different than the colname + # (if names were the same, we'd have popped it out above, + # in which case the mapper makes this combination). + # See if the superclass has a similar column property. + # If so, join them together. + for k, col in list(properties.items()): + if not isinstance(col, expression.ColumnElement): + continue + if k in inherited_mapper._props: + p = inherited_mapper._props[k] + if isinstance(p, ColumnProperty): + # note here we place the subclass column + # first. See [ticket:1892] for background. + properties[k] = [col] + p.columns + result_mapper_args = mapper_args.copy() + result_mapper_args['properties'] = properties + self.mapper_args = result_mapper_args + + def map(self): + self._prepare_mapper_arguments() + if hasattr(self.cls, '__mapper_cls__'): + mapper_cls = util.unbound_method_to_callable( + self.cls.__mapper_cls__) + else: + mapper_cls = mapper + + self.cls.__mapper__ = mp_ = mapper_cls( + self.cls, + self.local_table, + **self.mapper_args + ) + del self.cls._sa_declared_attr_reg + return mp_ + + +class _DeferredMapperConfig(_MapperConfig): + _configs = util.OrderedDict() + + def _early_mapping(self): + pass + + @property + def cls(self): + return self._cls() + + @cls.setter + def cls(self, class_): + self._cls = weakref.ref(class_, self._remove_config_cls) + self._configs[self._cls] = self + + @classmethod + def _remove_config_cls(cls, ref): + cls._configs.pop(ref, None) + + @classmethod + def has_cls(cls, class_): + # 2.6 fails on weakref if class_ is an old style class + return isinstance(class_, type) and \ + weakref.ref(class_) in cls._configs + + @classmethod + def config_for_cls(cls, class_): + return cls._configs[weakref.ref(class_)] + + @classmethod + def classes_for_base(cls, base_cls, sort=True): + classes_for_base = [m for m in cls._configs.values() + if issubclass(m.cls, base_cls)] + if not sort: + return classes_for_base + + all_m_by_cls = dict( + (m.cls, m) + for m in classes_for_base + ) + + tuples = [] + for m_cls in all_m_by_cls: + tuples.extend( + (all_m_by_cls[base_cls], all_m_by_cls[m_cls]) + for base_cls in m_cls.__bases__ + if base_cls in all_m_by_cls + ) + return list( + topological.sort( + tuples, + classes_for_base + ) + ) + + def map(self): + self._configs.pop(self._cls, None) + return super(_DeferredMapperConfig, self).map() + + +def _add_attribute(cls, key, value): + """add an attribute to an existing declarative class. + + This runs through the logic to determine MapperProperty, + adds it to the Mapper, adds a column to the mapped Table, etc. + + """ + + if '__mapper__' in cls.__dict__: + if isinstance(value, Column): + _undefer_column_name(key, value) + cls.__table__.append_column(value) + cls.__mapper__.add_property(key, value) + elif isinstance(value, ColumnProperty): + for col in value.columns: + if isinstance(col, Column) and col.table is None: + _undefer_column_name(key, col) + cls.__table__.append_column(col) + cls.__mapper__.add_property(key, value) + elif isinstance(value, MapperProperty): + cls.__mapper__.add_property( + key, + clsregistry._deferred_relationship(cls, value) + ) + elif isinstance(value, QueryableAttribute) and value.key != key: + # detect a QueryableAttribute that's already mapped being + # assigned elsewhere in userland, turn into a synonym() + value = synonym(value.key) + cls.__mapper__.add_property( + key, + clsregistry._deferred_relationship(cls, value) + ) + else: + type.__setattr__(cls, key, value) + else: + type.__setattr__(cls, key, value) + + +def _declarative_constructor(self, **kwargs): + """A simple constructor that allows initialization from kwargs. + + Sets attributes on the constructed instance using the names and + values in ``kwargs``. + + Only keys that are present as + attributes of the instance's class are allowed. These could be, + for example, any mapped columns or relationships. + """ + cls_ = type(self) + for k in kwargs: + if not hasattr(cls_, k): + raise TypeError( + "%r is an invalid keyword argument for %s" % + (k, cls_.__name__)) + setattr(self, k, kwargs[k]) +_declarative_constructor.__name__ = '__init__' + + +def _undefer_column_name(key, column): + if column.key is None: + column.key = key + if column.name is None: + column.name = key diff --git a/app/lib/sqlalchemy/ext/declarative/clsregistry.py b/app/lib/sqlalchemy/ext/declarative/clsregistry.py new file mode 100644 index 0000000..b2c5bc5 --- /dev/null +++ b/app/lib/sqlalchemy/ext/declarative/clsregistry.py @@ -0,0 +1,328 @@ +# ext/declarative/clsregistry.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Routines to handle the string class registry used by declarative. + +This system allows specification of classes and expressions used in +:func:`.relationship` using strings. + +""" +from ...orm.properties import ColumnProperty, RelationshipProperty, \ + SynonymProperty +from ...schema import _get_table_key +from ...orm import class_mapper, interfaces +from ... import util +from ... import inspection +from ... import exc +import weakref + +# strong references to registries which we place in +# the _decl_class_registry, which is usually weak referencing. +# the internal registries here link to classes with weakrefs and remove +# themselves when all references to contained classes are removed. +_registries = set() + + +def add_class(classname, cls): + """Add a class to the _decl_class_registry associated with the + given declarative class. + + """ + if classname in cls._decl_class_registry: + # class already exists. + existing = cls._decl_class_registry[classname] + if not isinstance(existing, _MultipleClassMarker): + existing = \ + cls._decl_class_registry[classname] = \ + _MultipleClassMarker([cls, existing]) + else: + cls._decl_class_registry[classname] = cls + + try: + root_module = cls._decl_class_registry['_sa_module_registry'] + except KeyError: + cls._decl_class_registry['_sa_module_registry'] = \ + root_module = _ModuleMarker('_sa_module_registry', None) + + tokens = cls.__module__.split(".") + + # build up a tree like this: + # modulename: myapp.snacks.nuts + # + # myapp->snack->nuts->(classes) + # snack->nuts->(classes) + # nuts->(classes) + # + # this allows partial token paths to be used. + while tokens: + token = tokens.pop(0) + module = root_module.get_module(token) + for token in tokens: + module = module.get_module(token) + module.add_class(classname, cls) + + +class _MultipleClassMarker(object): + """refers to multiple classes of the same name + within _decl_class_registry. + + """ + + __slots__ = 'on_remove', 'contents', '__weakref__' + + def __init__(self, classes, on_remove=None): + self.on_remove = on_remove + self.contents = set([ + weakref.ref(item, self._remove_item) for item in classes]) + _registries.add(self) + + def __iter__(self): + return (ref() for ref in self.contents) + + def attempt_get(self, path, key): + if len(self.contents) > 1: + raise exc.InvalidRequestError( + "Multiple classes found for path \"%s\" " + "in the registry of this declarative " + "base. Please use a fully module-qualified path." % + (".".join(path + [key])) + ) + else: + ref = list(self.contents)[0] + cls = ref() + if cls is None: + raise NameError(key) + return cls + + def _remove_item(self, ref): + self.contents.remove(ref) + if not self.contents: + _registries.discard(self) + if self.on_remove: + self.on_remove() + + def add_item(self, item): + # protect against class registration race condition against + # asynchronous garbage collection calling _remove_item, + # [ticket:3208] + modules = set([ + cls.__module__ for cls in + [ref() for ref in self.contents] if cls is not None]) + if item.__module__ in modules: + util.warn( + "This declarative base already contains a class with the " + "same class name and module name as %s.%s, and will " + "be replaced in the string-lookup table." % ( + item.__module__, + item.__name__ + ) + ) + self.contents.add(weakref.ref(item, self._remove_item)) + + +class _ModuleMarker(object): + """"refers to a module name within + _decl_class_registry. + + """ + + __slots__ = 'parent', 'name', 'contents', 'mod_ns', 'path', '__weakref__' + + def __init__(self, name, parent): + self.parent = parent + self.name = name + self.contents = {} + self.mod_ns = _ModNS(self) + if self.parent: + self.path = self.parent.path + [self.name] + else: + self.path = [] + _registries.add(self) + + def __contains__(self, name): + return name in self.contents + + def __getitem__(self, name): + return self.contents[name] + + def _remove_item(self, name): + self.contents.pop(name, None) + if not self.contents and self.parent is not None: + self.parent._remove_item(self.name) + _registries.discard(self) + + def resolve_attr(self, key): + return getattr(self.mod_ns, key) + + def get_module(self, name): + if name not in self.contents: + marker = _ModuleMarker(name, self) + self.contents[name] = marker + else: + marker = self.contents[name] + return marker + + def add_class(self, name, cls): + if name in self.contents: + existing = self.contents[name] + existing.add_item(cls) + else: + existing = self.contents[name] = \ + _MultipleClassMarker([cls], + on_remove=lambda: self._remove_item(name)) + + +class _ModNS(object): + __slots__ = '__parent', + + def __init__(self, parent): + self.__parent = parent + + def __getattr__(self, key): + try: + value = self.__parent.contents[key] + except KeyError: + pass + else: + if value is not None: + if isinstance(value, _ModuleMarker): + return value.mod_ns + else: + assert isinstance(value, _MultipleClassMarker) + return value.attempt_get(self.__parent.path, key) + raise AttributeError("Module %r has no mapped classes " + "registered under the name %r" % ( + self.__parent.name, key)) + + +class _GetColumns(object): + __slots__ = 'cls', + + def __init__(self, cls): + self.cls = cls + + def __getattr__(self, key): + mp = class_mapper(self.cls, configure=False) + if mp: + if key not in mp.all_orm_descriptors: + raise exc.InvalidRequestError( + "Class %r does not have a mapped column named %r" + % (self.cls, key)) + + desc = mp.all_orm_descriptors[key] + if desc.extension_type is interfaces.NOT_EXTENSION: + prop = desc.property + if isinstance(prop, SynonymProperty): + key = prop.name + elif not isinstance(prop, ColumnProperty): + raise exc.InvalidRequestError( + "Property %r is not an instance of" + " ColumnProperty (i.e. does not correspond" + " directly to a Column)." % key) + return getattr(self.cls, key) + +inspection._inspects(_GetColumns)( + lambda target: inspection.inspect(target.cls)) + + +class _GetTable(object): + __slots__ = 'key', 'metadata' + + def __init__(self, key, metadata): + self.key = key + self.metadata = metadata + + def __getattr__(self, key): + return self.metadata.tables[ + _get_table_key(key, self.key) + ] + + +def _determine_container(key, value): + if isinstance(value, _MultipleClassMarker): + value = value.attempt_get([], key) + return _GetColumns(value) + + +class _class_resolver(object): + def __init__(self, cls, prop, fallback, arg): + self.cls = cls + self.prop = prop + self.arg = self._declarative_arg = arg + self.fallback = fallback + self._dict = util.PopulateDict(self._access_cls) + self._resolvers = () + + def _access_cls(self, key): + cls = self.cls + if key in cls._decl_class_registry: + return _determine_container(key, cls._decl_class_registry[key]) + elif key in cls.metadata.tables: + return cls.metadata.tables[key] + elif key in cls.metadata._schemas: + return _GetTable(key, cls.metadata) + elif '_sa_module_registry' in cls._decl_class_registry and \ + key in cls._decl_class_registry['_sa_module_registry']: + registry = cls._decl_class_registry['_sa_module_registry'] + return registry.resolve_attr(key) + elif self._resolvers: + for resolv in self._resolvers: + value = resolv(key) + if value is not None: + return value + + return self.fallback[key] + + def __call__(self): + try: + x = eval(self.arg, globals(), self._dict) + + if isinstance(x, _GetColumns): + return x.cls + else: + return x + except NameError as n: + raise exc.InvalidRequestError( + "When initializing mapper %s, expression %r failed to " + "locate a name (%r). If this is a class name, consider " + "adding this relationship() to the %r class after " + "both dependent classes have been defined." % + (self.prop.parent, self.arg, n.args[0], self.cls) + ) + + +def _resolver(cls, prop): + import sqlalchemy + from sqlalchemy.orm import foreign, remote + + fallback = sqlalchemy.__dict__.copy() + fallback.update({'foreign': foreign, 'remote': remote}) + + def resolve_arg(arg): + return _class_resolver(cls, prop, fallback, arg) + return resolve_arg + + +def _deferred_relationship(cls, prop): + + if isinstance(prop, RelationshipProperty): + resolve_arg = _resolver(cls, prop) + + for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin', + 'secondary', '_user_defined_foreign_keys', 'remote_side'): + v = getattr(prop, attr) + if isinstance(v, util.string_types): + setattr(prop, attr, resolve_arg(v)) + + if prop.backref and isinstance(prop.backref, tuple): + key, kwargs = prop.backref + for attr in ('primaryjoin', 'secondaryjoin', 'secondary', + 'foreign_keys', 'remote_side', 'order_by'): + if attr in kwargs and isinstance(kwargs[attr], + util.string_types): + kwargs[attr] = resolve_arg(kwargs[attr]) + + return prop diff --git a/app/lib/sqlalchemy/ext/horizontal_shard.py b/app/lib/sqlalchemy/ext/horizontal_shard.py new file mode 100644 index 0000000..d20fbd4 --- /dev/null +++ b/app/lib/sqlalchemy/ext/horizontal_shard.py @@ -0,0 +1,131 @@ +# ext/horizontal_shard.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Horizontal sharding support. + +Defines a rudimental 'horizontal sharding' system which allows a Session to +distribute queries and persistence operations across multiple databases. + +For a usage example, see the :ref:`examples_sharding` example included in +the source distribution. + +""" + +from .. import util +from ..orm.session import Session +from ..orm.query import Query + +__all__ = ['ShardedSession', 'ShardedQuery'] + + +class ShardedQuery(Query): + def __init__(self, *args, **kwargs): + super(ShardedQuery, self).__init__(*args, **kwargs) + self.id_chooser = self.session.id_chooser + self.query_chooser = self.session.query_chooser + self._shard_id = None + + def set_shard(self, shard_id): + """return a new query, limited to a single shard ID. + + all subsequent operations with the returned query will + be against the single shard regardless of other state. + """ + + q = self._clone() + q._shard_id = shard_id + return q + + def _execute_and_instances(self, context): + def iter_for_shard(shard_id): + context.attributes['shard_id'] = shard_id + result = self._connection_from_session( + mapper=self._mapper_zero(), + shard_id=shard_id).execute( + context.statement, + self._params) + return self.instances(result, context) + + if self._shard_id is not None: + return iter_for_shard(self._shard_id) + else: + partial = [] + for shard_id in self.query_chooser(self): + partial.extend(iter_for_shard(shard_id)) + + # if some kind of in memory 'sorting' + # were done, this is where it would happen + return iter(partial) + + def get(self, ident, **kwargs): + if self._shard_id is not None: + return super(ShardedQuery, self).get(ident) + else: + ident = util.to_list(ident) + for shard_id in self.id_chooser(self, ident): + o = self.set_shard(shard_id).get(ident, **kwargs) + if o is not None: + return o + else: + return None + + +class ShardedSession(Session): + def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, + query_cls=ShardedQuery, **kwargs): + """Construct a ShardedSession. + + :param shard_chooser: A callable which, passed a Mapper, a mapped + instance, and possibly a SQL clause, returns a shard ID. This id + may be based off of the attributes present within the object, or on + some round-robin scheme. If the scheme is based on a selection, it + should set whatever state on the instance to mark it in the future as + participating in that shard. + + :param id_chooser: A callable, passed a query and a tuple of identity + values, which should return a list of shard ids where the ID might + reside. The databases will be queried in the order of this listing. + + :param query_chooser: For a given Query, returns the list of shard_ids + where the query should be issued. Results from all shards returned + will be combined together into a single listing. + + :param shards: A dictionary of string shard names + to :class:`~sqlalchemy.engine.Engine` objects. + + """ + super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) + self.shard_chooser = shard_chooser + self.id_chooser = id_chooser + self.query_chooser = query_chooser + self.__binds = {} + self.connection_callable = self.connection + if shards is not None: + for k in shards: + self.bind_shard(k, shards[k]) + + def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): + if shard_id is None: + shard_id = self.shard_chooser(mapper, instance) + + if self.transaction is not None: + return self.transaction.connection(mapper, shard_id=shard_id) + else: + return self.get_bind( + mapper, + shard_id=shard_id, + instance=instance + ).contextual_connect(**kwargs) + + def get_bind(self, mapper, shard_id=None, + instance=None, clause=None, **kw): + if shard_id is None: + shard_id = self.shard_chooser(mapper, instance, clause=clause) + return self.__binds[shard_id] + + def bind_shard(self, shard_id, bind): + self.__binds[shard_id] = bind diff --git a/app/lib/sqlalchemy/ext/hybrid.py b/app/lib/sqlalchemy/ext/hybrid.py new file mode 100644 index 0000000..509dd56 --- /dev/null +++ b/app/lib/sqlalchemy/ext/hybrid.py @@ -0,0 +1,841 @@ +# ext/hybrid.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r"""Define attributes on ORM-mapped classes that have "hybrid" behavior. + +"hybrid" means the attribute has distinct behaviors defined at the +class level and at the instance level. + +The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of +method decorator, is around 50 lines of code and has almost no +dependencies on the rest of SQLAlchemy. It can, in theory, work with +any descriptor-based expression system. + +Consider a mapping ``Interval``, representing integer ``start`` and ``end`` +values. We can define higher level functions on mapped classes that produce +SQL expressions at the class level, and Python expression evaluation at the +instance level. Below, each function decorated with :class:`.hybrid_method` or +:class:`.hybrid_property` may receive ``self`` as an instance of the class, or +as the class itself:: + + from sqlalchemy import Column, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.orm import Session, aliased + from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method + + Base = declarative_base() + + class Interval(Base): + __tablename__ = 'interval' + + id = Column(Integer, primary_key=True) + start = Column(Integer, nullable=False) + end = Column(Integer, nullable=False) + + def __init__(self, start, end): + self.start = start + self.end = end + + @hybrid_property + def length(self): + return self.end - self.start + + @hybrid_method + def contains(self, point): + return (self.start <= point) & (point <= self.end) + + @hybrid_method + def intersects(self, other): + return self.contains(other.start) | self.contains(other.end) + +Above, the ``length`` property returns the difference between the +``end`` and ``start`` attributes. With an instance of ``Interval``, +this subtraction occurs in Python, using normal Python descriptor +mechanics:: + + >>> i1 = Interval(5, 10) + >>> i1.length + 5 + +When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` +descriptor evaluates the function body given the ``Interval`` class as +the argument, which when evaluated with SQLAlchemy expression mechanics +returns a new SQL expression:: + + >>> print Interval.length + interval."end" - interval.start + + >>> print Session().query(Interval).filter(Interval.length > 10) + SELECT interval.id AS interval_id, interval.start AS interval_start, + interval."end" AS interval_end + FROM interval + WHERE interval."end" - interval.start > :param_1 + +ORM methods such as :meth:`~.Query.filter_by` generally use ``getattr()`` to +locate attributes, so can also be used with hybrid attributes:: + + >>> print Session().query(Interval).filter_by(length=5) + SELECT interval.id AS interval_id, interval.start AS interval_start, + interval."end" AS interval_end + FROM interval + WHERE interval."end" - interval.start = :param_1 + +The ``Interval`` class example also illustrates two methods, +``contains()`` and ``intersects()``, decorated with +:class:`.hybrid_method`. This decorator applies the same idea to +methods that :class:`.hybrid_property` applies to attributes. The +methods return boolean values, and take advantage of the Python ``|`` +and ``&`` bitwise operators to produce equivalent instance-level and +SQL expression-level boolean behavior:: + + >>> i1.contains(6) + True + >>> i1.contains(15) + False + >>> i1.intersects(Interval(7, 18)) + True + >>> i1.intersects(Interval(25, 29)) + False + + >>> print Session().query(Interval).filter(Interval.contains(15)) + SELECT interval.id AS interval_id, interval.start AS interval_start, + interval."end" AS interval_end + FROM interval + WHERE interval.start <= :start_1 AND interval."end" > :end_1 + + >>> ia = aliased(Interval) + >>> print Session().query(Interval, ia).filter(Interval.intersects(ia)) + SELECT interval.id AS interval_id, interval.start AS interval_start, + interval."end" AS interval_end, interval_1.id AS interval_1_id, + interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end + FROM interval, interval AS interval_1 + WHERE interval.start <= interval_1.start + AND interval."end" > interval_1.start + OR interval.start <= interval_1."end" + AND interval."end" > interval_1."end" + +Defining Expression Behavior Distinct from Attribute Behavior +-------------------------------------------------------------- + +Our usage of the ``&`` and ``|`` bitwise operators above was +fortunate, considering our functions operated on two boolean values to +return a new one. In many cases, the construction of an in-Python +function and a SQLAlchemy SQL expression have enough differences that +two separate Python expressions should be defined. The +:mod:`~sqlalchemy.ext.hybrid` decorators define the +:meth:`.hybrid_property.expression` modifier for this purpose. As an +example we'll define the radius of the interval, which requires the +usage of the absolute value function:: + + from sqlalchemy import func + + class Interval(object): + # ... + + @hybrid_property + def radius(self): + return abs(self.length) / 2 + + @radius.expression + def radius(cls): + return func.abs(cls.length) / 2 + +Above the Python function ``abs()`` is used for instance-level +operations, the SQL function ``ABS()`` is used via the :data:`.func` +object for class-level expressions:: + + >>> i1.radius + 2 + + >>> print Session().query(Interval).filter(Interval.radius > 5) + SELECT interval.id AS interval_id, interval.start AS interval_start, + interval."end" AS interval_end + FROM interval + WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 + +Defining Setters +---------------- + +Hybrid properties can also define setter methods. If we wanted +``length`` above, when set, to modify the endpoint value:: + + class Interval(object): + # ... + + @hybrid_property + def length(self): + return self.end - self.start + + @length.setter + def length(self, value): + self.end = self.start + value + +The ``length(self, value)`` method is now called upon set:: + + >>> i1 = Interval(5, 10) + >>> i1.length + 5 + >>> i1.length = 12 + >>> i1.end + 17 + +Working with Relationships +-------------------------- + +There's no essential difference when creating hybrids that work with +related objects as opposed to column-based data. The need for distinct +expressions tends to be greater. The two variants we'll illustrate +are the "join-dependent" hybrid, and the "correlated subquery" hybrid. + +Join-Dependent Relationship Hybrid +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Consider the following declarative +mapping which relates a ``User`` to a ``SavingsAccount``:: + + from sqlalchemy import Column, Integer, ForeignKey, Numeric, String + from sqlalchemy.orm import relationship + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.hybrid import hybrid_property + + Base = declarative_base() + + class SavingsAccount(Base): + __tablename__ = 'account' + id = Column(Integer, primary_key=True) + user_id = Column(Integer, ForeignKey('user.id'), nullable=False) + balance = Column(Numeric(15, 5)) + + class User(Base): + __tablename__ = 'user' + id = Column(Integer, primary_key=True) + name = Column(String(100), nullable=False) + + accounts = relationship("SavingsAccount", backref="owner") + + @hybrid_property + def balance(self): + if self.accounts: + return self.accounts[0].balance + else: + return None + + @balance.setter + def balance(self, value): + if not self.accounts: + account = Account(owner=self) + else: + account = self.accounts[0] + account.balance = value + + @balance.expression + def balance(cls): + return SavingsAccount.balance + +The above hybrid property ``balance`` works with the first +``SavingsAccount`` entry in the list of accounts for this user. The +in-Python getter/setter methods can treat ``accounts`` as a Python +list available on ``self``. + +However, at the expression level, it's expected that the ``User`` class will +be used in an appropriate context such that an appropriate join to +``SavingsAccount`` will be present:: + + >>> print Session().query(User, User.balance).\ + ... join(User.accounts).filter(User.balance > 5000) + SELECT "user".id AS user_id, "user".name AS user_name, + account.balance AS account_balance + FROM "user" JOIN account ON "user".id = account.user_id + WHERE account.balance > :balance_1 + +Note however, that while the instance level accessors need to worry +about whether ``self.accounts`` is even present, this issue expresses +itself differently at the SQL expression level, where we basically +would use an outer join:: + + >>> from sqlalchemy import or_ + >>> print (Session().query(User, User.balance).outerjoin(User.accounts). + ... filter(or_(User.balance < 5000, User.balance == None))) + SELECT "user".id AS user_id, "user".name AS user_name, + account.balance AS account_balance + FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id + WHERE account.balance < :balance_1 OR account.balance IS NULL + +Correlated Subquery Relationship Hybrid +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We can, of course, forego being dependent on the enclosing query's usage +of joins in favor of the correlated subquery, which can portably be packed +into a single column expression. A correlated subquery is more portable, but +often performs more poorly at the SQL level. Using the same technique +illustrated at :ref:`mapper_column_property_sql_expressions`, +we can adjust our ``SavingsAccount`` example to aggregate the balances for +*all* accounts, and use a correlated subquery for the column expression:: + + from sqlalchemy import Column, Integer, ForeignKey, Numeric, String + from sqlalchemy.orm import relationship + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.hybrid import hybrid_property + from sqlalchemy import select, func + + Base = declarative_base() + + class SavingsAccount(Base): + __tablename__ = 'account' + id = Column(Integer, primary_key=True) + user_id = Column(Integer, ForeignKey('user.id'), nullable=False) + balance = Column(Numeric(15, 5)) + + class User(Base): + __tablename__ = 'user' + id = Column(Integer, primary_key=True) + name = Column(String(100), nullable=False) + + accounts = relationship("SavingsAccount", backref="owner") + + @hybrid_property + def balance(self): + return sum(acc.balance for acc in self.accounts) + + @balance.expression + def balance(cls): + return select([func.sum(SavingsAccount.balance)]).\ + where(SavingsAccount.user_id==cls.id).\ + label('total_balance') + +The above recipe will give us the ``balance`` column which renders +a correlated SELECT:: + + >>> print s.query(User).filter(User.balance > 400) + SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE (SELECT sum(account.balance) AS sum_1 + FROM account + WHERE account.user_id = "user".id) > :param_1 + +.. _hybrid_custom_comparators: + +Building Custom Comparators +--------------------------- + +The hybrid property also includes a helper that allows construction of +custom comparators. A comparator object allows one to customize the +behavior of each SQLAlchemy expression operator individually. They +are useful when creating custom types that have some highly +idiosyncratic behavior on the SQL side. + +The example class below allows case-insensitive comparisons on the attribute +named ``word_insensitive``:: + + from sqlalchemy.ext.hybrid import Comparator, hybrid_property + from sqlalchemy import func, Column, Integer, String + from sqlalchemy.orm import Session + from sqlalchemy.ext.declarative import declarative_base + + Base = declarative_base() + + class CaseInsensitiveComparator(Comparator): + def __eq__(self, other): + return func.lower(self.__clause_element__()) == func.lower(other) + + class SearchWord(Base): + __tablename__ = 'searchword' + id = Column(Integer, primary_key=True) + word = Column(String(255), nullable=False) + + @hybrid_property + def word_insensitive(self): + return self.word.lower() + + @word_insensitive.comparator + def word_insensitive(cls): + return CaseInsensitiveComparator(cls.word) + +Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` +SQL function to both sides:: + + >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") + SELECT searchword.id AS searchword_id, searchword.word AS searchword_word + FROM searchword + WHERE lower(searchword.word) = lower(:lower_1) + +The ``CaseInsensitiveComparator`` above implements part of the +:class:`.ColumnOperators` interface. A "coercion" operation like +lowercasing can be applied to all comparison operations (i.e. ``eq``, +``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: + + class CaseInsensitiveComparator(Comparator): + def operate(self, op, other): + return op(func.lower(self.__clause_element__()), func.lower(other)) + +Hybrid Value Objects +-------------------- + +Note in our previous example, if we were to compare the +``word_insensitive`` attribute of a ``SearchWord`` instance to a plain +Python string, the plain Python string would not be coerced to lower +case - the ``CaseInsensitiveComparator`` we built, being returned by +``@word_insensitive.comparator``, only applies to the SQL side. + +A more comprehensive form of the custom comparator is to construct a +*Hybrid Value Object*. This technique applies the target value or +expression to a value object which is then returned by the accessor in +all cases. The value object allows control of all operations upon +the value as well as how compared values are treated, both on the SQL +expression side as well as the Python value side. Replacing the +previous ``CaseInsensitiveComparator`` class with a new +``CaseInsensitiveWord`` class:: + + class CaseInsensitiveWord(Comparator): + "Hybrid value representing a lower case representation of a word." + + def __init__(self, word): + if isinstance(word, basestring): + self.word = word.lower() + elif isinstance(word, CaseInsensitiveWord): + self.word = word.word + else: + self.word = func.lower(word) + + def operate(self, op, other): + if not isinstance(other, CaseInsensitiveWord): + other = CaseInsensitiveWord(other) + return op(self.word, other.word) + + def __clause_element__(self): + return self.word + + def __str__(self): + return self.word + + key = 'word' + "Label to apply to Query tuple results" + +Above, the ``CaseInsensitiveWord`` object represents ``self.word``, +which may be a SQL function, or may be a Python native. By +overriding ``operate()`` and ``__clause_element__()`` to work in terms +of ``self.word``, all comparison operations will work against the +"converted" form of ``word``, whether it be SQL side or Python side. +Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` +object unconditionally from a single hybrid call:: + + class SearchWord(Base): + __tablename__ = 'searchword' + id = Column(Integer, primary_key=True) + word = Column(String(255), nullable=False) + + @hybrid_property + def word_insensitive(self): + return CaseInsensitiveWord(self.word) + +The ``word_insensitive`` attribute now has case-insensitive comparison +behavior universally, including SQL expression vs. Python expression +(note the Python value is converted to lower case on the Python side +here):: + + >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") + SELECT searchword.id AS searchword_id, searchword.word AS searchword_word + FROM searchword + WHERE lower(searchword.word) = :lower_1 + +SQL expression versus SQL expression:: + + >>> sw1 = aliased(SearchWord) + >>> sw2 = aliased(SearchWord) + >>> print Session().query( + ... sw1.word_insensitive, + ... sw2.word_insensitive).\ + ... filter( + ... sw1.word_insensitive > sw2.word_insensitive + ... ) + SELECT lower(searchword_1.word) AS lower_1, + lower(searchword_2.word) AS lower_2 + FROM searchword AS searchword_1, searchword AS searchword_2 + WHERE lower(searchword_1.word) > lower(searchword_2.word) + +Python only expression:: + + >>> ws1 = SearchWord(word="SomeWord") + >>> ws1.word_insensitive == "sOmEwOrD" + True + >>> ws1.word_insensitive == "XOmEwOrX" + False + >>> print ws1.word_insensitive + someword + +The Hybrid Value pattern is very useful for any kind of value that may +have multiple representations, such as timestamps, time deltas, units +of measurement, currencies and encrypted passwords. + +.. seealso:: + + `Hybrids and Value Agnostic Types + `_ + - on the techspot.zzzeek.org blog + + `Value Agnostic Types, Part II + `_ - + on the techspot.zzzeek.org blog + +.. _hybrid_transformers: + +Building Transformers +---------------------- + +A *transformer* is an object which can receive a :class:`.Query` +object and return a new one. The :class:`.Query` object includes a +method :meth:`.with_transformation` that returns a new :class:`.Query` +transformed by the given function. + +We can combine this with the :class:`.Comparator` class to produce one type +of recipe which can both set up the FROM clause of a query as well as assign +filtering criterion. + +Consider a mapped class ``Node``, which assembles using adjacency list +into a hierarchical tree pattern:: + + from sqlalchemy import Column, Integer, ForeignKey + from sqlalchemy.orm import relationship + from sqlalchemy.ext.declarative import declarative_base + Base = declarative_base() + + class Node(Base): + __tablename__ = 'node' + id = Column(Integer, primary_key=True) + parent_id = Column(Integer, ForeignKey('node.id')) + parent = relationship("Node", remote_side=id) + +Suppose we wanted to add an accessor ``grandparent``. This would +return the ``parent`` of ``Node.parent``. When we have an instance of +``Node``, this is simple:: + + from sqlalchemy.ext.hybrid import hybrid_property + + class Node(Base): + # ... + + @hybrid_property + def grandparent(self): + return self.parent.parent + +For the expression, things are not so clear. We'd need to construct +a :class:`.Query` where we :meth:`~.Query.join` twice along +``Node.parent`` to get to the ``grandparent``. We can instead return +a transforming callable that we'll combine with the +:class:`.Comparator` class to receive any :class:`.Query` object, and +return a new one that's joined to the ``Node.parent`` attribute and +filtered based on the given criterion:: + + from sqlalchemy.ext.hybrid import Comparator + + class GrandparentTransformer(Comparator): + def operate(self, op, other): + def transform(q): + cls = self.__clause_element__() + parent_alias = aliased(cls) + return q.join(parent_alias, cls.parent).\ + filter(op(parent_alias.parent, other)) + return transform + + Base = declarative_base() + + class Node(Base): + __tablename__ = 'node' + id =Column(Integer, primary_key=True) + parent_id = Column(Integer, ForeignKey('node.id')) + parent = relationship("Node", remote_side=id) + + @hybrid_property + def grandparent(self): + return self.parent.parent + + @grandparent.comparator + def grandparent(cls): + return GrandparentTransformer(cls) + +The ``GrandparentTransformer`` overrides the core +:meth:`.Operators.operate` method at the base of the +:class:`.Comparator` hierarchy to return a query-transforming +callable, which then runs the given comparison operation in a +particular context. Such as, in the example above, the ``operate`` +method is called, given the :attr:`.Operators.eq` callable as well as +the right side of the comparison ``Node(id=5)``. A function +``transform`` is then returned which will transform a :class:`.Query` +first to join to ``Node.parent``, then to compare ``parent_alias`` +using :attr:`.Operators.eq` against the left and right sides, passing +into :class:`.Query.filter`: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.orm import Session + >>> session = Session() + {sql}>>> session.query(Node).\ + ... with_transformation(Node.grandparent==Node(id=5)).\ + ... all() + SELECT node.id AS node_id, node.parent_id AS node_parent_id + FROM node JOIN node AS node_1 ON node_1.id = node.parent_id + WHERE :param_1 = node_1.parent_id + {stop} + +We can modify the pattern to be more verbose but flexible by separating +the "join" step from the "filter" step. The tricky part here is ensuring +that successive instances of ``GrandparentTransformer`` use the same +:class:`.AliasedClass` object against ``Node``. Below we use a simple +memoizing approach that associates a ``GrandparentTransformer`` +with each class:: + + class Node(Base): + + # ... + + @grandparent.comparator + def grandparent(cls): + # memoize a GrandparentTransformer + # per class + if '_gp' not in cls.__dict__: + cls._gp = GrandparentTransformer(cls) + return cls._gp + + class GrandparentTransformer(Comparator): + + def __init__(self, cls): + self.parent_alias = aliased(cls) + + @property + def join(self): + def go(q): + return q.join(self.parent_alias, Node.parent) + return go + + def operate(self, op, other): + return op(self.parent_alias.parent, other) + +.. sourcecode:: pycon+sql + + {sql}>>> session.query(Node).\ + ... with_transformation(Node.grandparent.join).\ + ... filter(Node.grandparent==Node(id=5)) + SELECT node.id AS node_id, node.parent_id AS node_parent_id + FROM node JOIN node AS node_1 ON node_1.id = node.parent_id + WHERE :param_1 = node_1.parent_id + {stop} + +The "transformer" pattern is an experimental pattern that starts +to make usage of some functional programming paradigms. +While it's only recommended for advanced and/or patient developers, +there's probably a whole lot of amazing things it can be used for. + +""" +from .. import util +from ..orm import attributes, interfaces + +HYBRID_METHOD = util.symbol('HYBRID_METHOD') +"""Symbol indicating an :class:`InspectionAttr` that's + of type :class:`.hybrid_method`. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attibute. + + .. seealso:: + + :attr:`.Mapper.all_orm_attributes` + +""" + +HYBRID_PROPERTY = util.symbol('HYBRID_PROPERTY') +"""Symbol indicating an :class:`InspectionAttr` that's + of type :class:`.hybrid_method`. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attibute. + + .. seealso:: + + :attr:`.Mapper.all_orm_attributes` + +""" + + +class hybrid_method(interfaces.InspectionAttrInfo): + """A decorator which allows definition of a Python object method with both + instance-level and class-level behavior. + + """ + + is_attribute = True + extension_type = HYBRID_METHOD + + def __init__(self, func, expr=None): + """Create a new :class:`.hybrid_method`. + + Usage is typically via decorator:: + + from sqlalchemy.ext.hybrid import hybrid_method + + class SomeClass(object): + @hybrid_method + def value(self, x, y): + return self._value + x + y + + @value.expression + def value(self, x, y): + return func.some_function(self._value, x, y) + + """ + self.func = func + self.expression(expr or func) + + def __get__(self, instance, owner): + if instance is None: + return self.expr.__get__(owner, owner.__class__) + else: + return self.func.__get__(instance, owner) + + def expression(self, expr): + """Provide a modifying decorator that defines a + SQL-expression producing method.""" + + self.expr = expr + if not self.expr.__doc__: + self.expr.__doc__ = self.func.__doc__ + return self + + +class hybrid_property(interfaces.InspectionAttrInfo): + """A decorator which allows definition of a Python descriptor with both + instance-level and class-level behavior. + + """ + + is_attribute = True + extension_type = HYBRID_PROPERTY + + def __init__(self, fget, fset=None, fdel=None, expr=None): + """Create a new :class:`.hybrid_property`. + + Usage is typically via decorator:: + + from sqlalchemy.ext.hybrid import hybrid_property + + class SomeClass(object): + @hybrid_property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + """ + self.fget = fget + self.fset = fset + self.fdel = fdel + self.expression(expr or fget) + util.update_wrapper(self, fget) + + def __get__(self, instance, owner): + if instance is None: + return self.expr(owner) + else: + return self.fget(instance) + + def __set__(self, instance, value): + if self.fset is None: + raise AttributeError("can't set attribute") + self.fset(instance, value) + + def __delete__(self, instance): + if self.fdel is None: + raise AttributeError("can't delete attribute") + self.fdel(instance) + + def setter(self, fset): + """Provide a modifying decorator that defines a value-setter method.""" + + self.fset = fset + return self + + def deleter(self, fdel): + """Provide a modifying decorator that defines a + value-deletion method.""" + + self.fdel = fdel + return self + + def expression(self, expr): + """Provide a modifying decorator that defines a SQL-expression + producing method.""" + + def _expr(cls): + return ExprComparator(expr(cls), self) + util.update_wrapper(_expr, expr) + + self.expr = _expr + return self.comparator(_expr) + + def comparator(self, comparator): + """Provide a modifying decorator that defines a custom + comparator producing method. + + The return value of the decorated method should be an instance of + :class:`~.hybrid.Comparator`. + + """ + + proxy_attr = attributes.\ + create_proxied_attribute(self) + + def expr(owner): + return proxy_attr( + owner, self.__name__, self, comparator(owner), + doc=comparator.__doc__ or self.__doc__) + self.expr = expr + return self + + +class Comparator(interfaces.PropComparator): + """A helper class that allows easy construction of custom + :class:`~.orm.interfaces.PropComparator` + classes for usage with hybrids.""" + + property = None + + def __init__(self, expression): + self.expression = expression + + def __clause_element__(self): + expr = self.expression + if hasattr(expr, '__clause_element__'): + expr = expr.__clause_element__() + return expr + + def adapt_to_entity(self, adapt_to_entity): + # interesting.... + return self + + +class ExprComparator(Comparator): + def __init__(self, expression, hybrid): + self.expression = expression + self.hybrid = hybrid + + def __getattr__(self, key): + return getattr(self.expression, key) + + @property + def info(self): + return self.hybrid.info + + @property + def property(self): + return self.expression.property + + def operate(self, op, *other, **kwargs): + return op(self.expression, *other, **kwargs) + + def reverse_operate(self, op, other, **kwargs): + return op(other, self.expression, **kwargs) diff --git a/app/lib/sqlalchemy/ext/indexable.py b/app/lib/sqlalchemy/ext/indexable.py new file mode 100644 index 0000000..b1ce129 --- /dev/null +++ b/app/lib/sqlalchemy/ext/indexable.py @@ -0,0 +1,349 @@ +# ext/index.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Define attributes on ORM-mapped classes that have "index" attributes for +columns with :class:`~.types.Indexable` types. + +"index" means the attribute is associated with an element of an +:class:`~.types.Indexable` column with the predefined index to access it. +The :class:`~.types.Indexable` types include types such as +:class:`~.types.ARRAY`, :class:`~.types.JSON` and +:class:`~.postgresql.HSTORE`. + + + +The :mod:`~sqlalchemy.ext.indexable` extension provides +:class:`~.schema.Column`-like interface for any element of an +:class:`~.types.Indexable` typed column. In simple cases, it can be +treated as a :class:`~.schema.Column` - mapped attribute. + + +.. versionadded:: 1.1 + +Synopsis +======== + +Given ``Person`` as a model with a primary key and JSON data field. +While this field may have any number of elements encoded within it, +we would like to refer to the element called ``name`` individually +as a dedicated attribute which behaves like a standalone column:: + + from sqlalchemy import Column, JSON, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.indexable import index_property + + Base = declarative_base() + + class Person(Base): + __tablename__ = 'person' + + id = Column(Integer, primary_key=True) + data = Column(JSON) + + name = index_property('data', 'name') + + +Above, the ``name`` attribute now behaves like a mapped column. We +can compose a new ``Person`` and set the value of ``name``:: + + >>> person = Person(name='Alchemist') + +The value is now accessible:: + + >>> person.name + 'Alchemist' + +Behind the scenes, the JSON field was initialized to a new blank dictionary +and the field was set:: + + >>> person.data + {"name": "Alchemist'} + +The field is mutable in place:: + + >>> person.name = 'Renamed' + >>> person.name + 'Renamed' + >>> person.data + {'name': 'Renamed'} + +When using :class:`.index_property`, the change that we make to the indexable +structure is also automatically tracked as history; we no longer need +to use :class:`~.mutable.MutableDict` in order to track this change +for the unit of work. + +Deletions work normally as well:: + + >>> del person.name + >>> person.data + {} + +Above, deletion of ``person.name`` deletes the value from the dictionary, +but not the dictionary itself. + +A missing key will produce ``AttributeError``:: + + >>> person = Person() + >>> person.name + ... + AttributeError: 'name' + +Unless you set a default value:: + + >>> class Person(Base): + >>> __tablename__ = 'person' + >>> + >>> id = Column(Integer, primary_key=True) + >>> data = Column(JSON) + >>> + >>> name = index_property('data', 'name', default=None) # See default + + >>> person = Person() + >>> print(person.name) + None + + +The attributes are also accessible at the class level. +Below, we illustrate ``Person.name`` used to generate +an indexed SQL criteria:: + + >>> from sqlalchemy.orm import Session + >>> session = Session() + >>> query = session.query(Person).filter(Person.name == 'Alchemist') + +The above query is equivalent to:: + + >>> query = session.query(Person).filter(Person.data['name'] == 'Alchemist') + +Multiple :class:`.index_property` objects can be chained to produce +multiple levels of indexing:: + + from sqlalchemy import Column, JSON, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.ext.indexable import index_property + + Base = declarative_base() + + class Person(Base): + __tablename__ = 'person' + + id = Column(Integer, primary_key=True) + data = Column(JSON) + + birthday = index_property('data', 'birthday') + year = index_property('birthday', 'year') + month = index_property('birthday', 'month') + day = index_property('birthday', 'day') + +Above, a query such as:: + + q = session.query(Person).filter(Person.year == '1980') + +On a PostgreSQL backend, the above query will render as:: + + SELECT person.id, person.data + FROM person + WHERE person.data -> %(data_1)s -> %(param_1)s = %(param_2)s + +Default Values +============== + +:class:`.index_property` includes special behaviors for when the indexed +data structure does not exist, and a set operation is called: + +* For an :class:`.index_property` that is given an integer index value, + the default data structure will be a Python list of ``None`` values, + at least as long as the index value; the value is then set at its + place in the list. This means for an index value of zero, the list + will be initialized to ``[None]`` before setting the given value, + and for an index value of five, the list will be initialized to + ``[None, None, None, None, None]`` before setting the fifth element + to the given value. Note that an existing list is **not** extended + in place to receive a value. + +* for an :class:`.index_property` that is given any other kind of index + value (e.g. strings usually), a Python dictionary is used as the + default data structure. + +* The default data structure can be set to any Python callable using the + :paramref:`.index_property.datatype` parameter, overriding the previous + rules. + + + + + + +Subclassing +=========== + +:class:`.index_property` can be subclassed, in particular for the common +use case of providing coercion of values or SQL expressions as they are +accessed. Below is a common recipe for use with a PostgreSQL JSON type, +where we want to also include automatic casting plus ``astext()``:: + + class pg_json_property(index_property): + def __init__(self, attr_name, index, cast_type): + super(pg_json_property, self).__init__(attr_name, index) + self.cast_type = cast_type + + def expr(self, model): + expr = super(pg_json_property, self).expr(model) + return expr.astext.cast(self.cast_type) + +The above subclass can be used with the PostgreSQL-specific +version of :class:`.postgresql.JSON`:: + + from sqlalchemy import Column, Integer + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.dialects.postgresql import JSON + + Base = declarative_base() + + class Person(Base): + __tablename__ = 'person' + + id = Column(Integer, primary_key=True) + data = Column(JSON) + + age = pg_json_property('data', 'age', Integer) + +The ``age`` attribute at the instance level works as before; however +when rendering SQL, PostgreSQL's ``->>`` operator will be used +for indexed access, instead of the usual index opearator of ``->``:: + + >>> query = session.query(Person).filter(Person.age < 20) + +The above query will render:: + + SELECT person.id, person.data + FROM person + WHERE CAST(person.data ->> %(data_1)s AS INTEGER) < %(param_1)s + +""" +from __future__ import absolute_import + +from sqlalchemy import inspect +from ..orm.attributes import flag_modified +from ..ext.hybrid import hybrid_property + + +__all__ = ['index_property'] + + +class index_property(hybrid_property): # noqa + """A property generator. The generated property describes an object + attribute that corresponds to an :class:`~.types.Indexable` + column. + + .. versionadded:: 1.1 + + .. seealso:: + + :mod:`sqlalchemy.ext.indexable` + + """ + + _NO_DEFAULT_ARGUMENT = object() + + def __init__( + self, attr_name, index, default=_NO_DEFAULT_ARGUMENT, + datatype=None, mutable=True, onebased=True): + """Create a new :class:`.index_property`. + + :param attr_name: + An attribute name of an `Indexable` typed column, or other + attribute that returns an indexable structure. + :param index: + The index to be used for getting and setting this value. This + should be the Python-side index value for integers. + :param default: + A value which will be returned instead of `AttributeError` + when there is not a value at given index. + :param datatype: default datatype to use when the field is empty. + By default, this is derived from the type of index used; a + Python list for an integer index, or a Python dictionary for + any other style of index. For a list, the list will be + initialized to a list of None values that is at least + ``index`` elements long. + :param mutable: if False, writes and deletes to the attribute will + be disallowed. + :param onebased: assume the SQL representation of this value is + one-based; that is, the first index in SQL is 1, not zero. + """ + + if mutable: + super(index_property, self).__init__( + self.fget, self.fset, self.fdel, self.expr + ) + else: + super(index_property, self).__init__( + self.fget, None, None, self.expr + ) + self.attr_name = attr_name + self.index = index + self.default = default + is_numeric = isinstance(index, int) + onebased = is_numeric and onebased + + if datatype is not None: + self.datatype = datatype + else: + if is_numeric: + self.datatype = lambda: [None for x in range(index + 1)] + else: + self.datatype = dict + self.onebased = onebased + + def _fget_default(self): + if self.default == self._NO_DEFAULT_ARGUMENT: + raise AttributeError(self.attr_name) + else: + return self.default + + def fget(self, instance): + attr_name = self.attr_name + column_value = getattr(instance, attr_name) + if column_value is None: + return self._fget_default() + try: + value = column_value[self.index] + except (KeyError, IndexError): + return self._fget_default() + else: + return value + + def fset(self, instance, value): + attr_name = self.attr_name + column_value = getattr(instance, attr_name, None) + if column_value is None: + column_value = self.datatype() + setattr(instance, attr_name, column_value) + column_value[self.index] = value + setattr(instance, attr_name, column_value) + if attr_name in inspect(instance).mapper.attrs: + flag_modified(instance, attr_name) + + def fdel(self, instance): + attr_name = self.attr_name + column_value = getattr(instance, attr_name) + if column_value is None: + raise AttributeError(self.attr_name) + try: + del column_value[self.index] + except KeyError: + raise AttributeError(self.attr_name) + else: + setattr(instance, attr_name, column_value) + flag_modified(instance, attr_name) + + def expr(self, model): + column = getattr(model, self.attr_name) + index = self.index + if self.onebased: + index += 1 + return column[index] diff --git a/app/lib/sqlalchemy/ext/instrumentation.py b/app/lib/sqlalchemy/ext/instrumentation.py new file mode 100644 index 0000000..30a0ab7 --- /dev/null +++ b/app/lib/sqlalchemy/ext/instrumentation.py @@ -0,0 +1,414 @@ +"""Extensible class instrumentation. + +The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate +systems of class instrumentation within the ORM. Class instrumentation +refers to how the ORM places attributes on the class which maintain +data and track changes to that data, as well as event hooks installed +on the class. + +.. note:: + The extension package is provided for the benefit of integration + with other object management packages, which already perform + their own instrumentation. It is not intended for general use. + +For examples of how the instrumentation extension is used, +see the example :ref:`examples_instrumentation`. + +.. versionchanged:: 0.8 + The :mod:`sqlalchemy.orm.instrumentation` was split out so + that all functionality having to do with non-standard + instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`. + When imported, the module installs itself within + :mod:`sqlalchemy.orm.instrumentation` so that it + takes effect, including recognition of + ``__sa_instrumentation_manager__`` on mapped classes, as + well :data:`.instrumentation_finders` + being used to determine class instrumentation resolution. + +""" +from ..orm import instrumentation as orm_instrumentation +from ..orm.instrumentation import ( + ClassManager, InstrumentationFactory, _default_state_getter, + _default_dict_getter, _default_manager_getter +) +from ..orm import attributes, collections, base as orm_base +from .. import util +from ..orm import exc as orm_exc +import weakref + +INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__' +"""Attribute, elects custom instrumentation when present on a mapped class. + +Allows a class to specify a slightly or wildly different technique for +tracking changes made to mapped attributes and collections. + +Only one instrumentation implementation is allowed in a given object +inheritance hierarchy. + +The value of this attribute must be a callable and will be passed a class +object. The callable must return one of: + + - An instance of an InstrumentationManager or subclass + - An object implementing all or some of InstrumentationManager (TODO) + - A dictionary of callables, implementing all or some of the above (TODO) + - An instance of a ClassManager or subclass + +This attribute is consulted by SQLAlchemy instrumentation +resolution, once the :mod:`sqlalchemy.ext.instrumentation` module +has been imported. If custom finders are installed in the global +instrumentation_finders list, they may or may not choose to honor this +attribute. + +""" + + +def find_native_user_instrumentation_hook(cls): + """Find user-specified instrumentation management for a class.""" + return getattr(cls, INSTRUMENTATION_MANAGER, None) + +instrumentation_finders = [find_native_user_instrumentation_hook] +"""An extensible sequence of callables which return instrumentation +implementations + +When a class is registered, each callable will be passed a class object. +If None is returned, the +next finder in the sequence is consulted. Otherwise the return must be an +instrumentation factory that follows the same guidelines as +sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER. + +By default, the only finder is find_native_user_instrumentation_hook, which +searches for INSTRUMENTATION_MANAGER. If all finders return None, standard +ClassManager instrumentation is used. + +""" + + +class ExtendedInstrumentationRegistry(InstrumentationFactory): + """Extends :class:`.InstrumentationFactory` with additional + bookkeeping, to accommodate multiple types of + class managers. + + """ + _manager_finders = weakref.WeakKeyDictionary() + _state_finders = weakref.WeakKeyDictionary() + _dict_finders = weakref.WeakKeyDictionary() + _extended = False + + def _locate_extended_factory(self, class_): + for finder in instrumentation_finders: + factory = finder(class_) + if factory is not None: + manager = self._extended_class_manager(class_, factory) + return manager, factory + else: + return None, None + + def _check_conflicts(self, class_, factory): + existing_factories = self._collect_management_factories_for(class_).\ + difference([factory]) + if existing_factories: + raise TypeError( + "multiple instrumentation implementations specified " + "in %s inheritance hierarchy: %r" % ( + class_.__name__, list(existing_factories))) + + def _extended_class_manager(self, class_, factory): + manager = factory(class_) + if not isinstance(manager, ClassManager): + manager = _ClassInstrumentationAdapter(class_, manager) + + if factory != ClassManager and not self._extended: + # somebody invoked a custom ClassManager. + # reinstall global "getter" functions with the more + # expensive ones. + self._extended = True + _install_instrumented_lookups() + + self._manager_finders[class_] = manager.manager_getter() + self._state_finders[class_] = manager.state_getter() + self._dict_finders[class_] = manager.dict_getter() + return manager + + def _collect_management_factories_for(self, cls): + """Return a collection of factories in play or specified for a + hierarchy. + + Traverses the entire inheritance graph of a cls and returns a + collection of instrumentation factories for those classes. Factories + are extracted from active ClassManagers, if available, otherwise + instrumentation_finders is consulted. + + """ + hierarchy = util.class_hierarchy(cls) + factories = set() + for member in hierarchy: + manager = self.manager_of_class(member) + if manager is not None: + factories.add(manager.factory) + else: + for finder in instrumentation_finders: + factory = finder(member) + if factory is not None: + break + else: + factory = None + factories.add(factory) + factories.discard(None) + return factories + + def unregister(self, class_): + if class_ in self._manager_finders: + del self._manager_finders[class_] + del self._state_finders[class_] + del self._dict_finders[class_] + super(ExtendedInstrumentationRegistry, self).unregister(class_) + + def manager_of_class(self, cls): + if cls is None: + return None + try: + finder = self._manager_finders.get(cls, _default_manager_getter) + except TypeError: + # due to weakref lookup on invalid object + return None + else: + return finder(cls) + + def state_of(self, instance): + if instance is None: + raise AttributeError("None has no persistent state.") + return self._state_finders.get( + instance.__class__, _default_state_getter)(instance) + + def dict_of(self, instance): + if instance is None: + raise AttributeError("None has no persistent state.") + return self._dict_finders.get( + instance.__class__, _default_dict_getter)(instance) + + +orm_instrumentation._instrumentation_factory = \ + _instrumentation_factory = ExtendedInstrumentationRegistry() +orm_instrumentation.instrumentation_finders = instrumentation_finders + + +class InstrumentationManager(object): + """User-defined class instrumentation extension. + + :class:`.InstrumentationManager` can be subclassed in order + to change + how class instrumentation proceeds. This class exists for + the purposes of integration with other object management + frameworks which would like to entirely modify the + instrumentation methodology of the ORM, and is not intended + for regular usage. For interception of class instrumentation + events, see :class:`.InstrumentationEvents`. + + The API for this class should be considered as semi-stable, + and may change slightly with new releases. + + .. versionchanged:: 0.8 + :class:`.InstrumentationManager` was moved from + :mod:`sqlalchemy.orm.instrumentation` to + :mod:`sqlalchemy.ext.instrumentation`. + + """ + + # r4361 added a mandatory (cls) constructor to this interface. + # given that, perhaps class_ should be dropped from all of these + # signatures. + + def __init__(self, class_): + pass + + def manage(self, class_, manager): + setattr(class_, '_default_class_manager', manager) + + def dispose(self, class_, manager): + delattr(class_, '_default_class_manager') + + def manager_getter(self, class_): + def get(cls): + return cls._default_class_manager + return get + + def instrument_attribute(self, class_, key, inst): + pass + + def post_configure_attribute(self, class_, key, inst): + pass + + def install_descriptor(self, class_, key, inst): + setattr(class_, key, inst) + + def uninstall_descriptor(self, class_, key): + delattr(class_, key) + + def install_member(self, class_, key, implementation): + setattr(class_, key, implementation) + + def uninstall_member(self, class_, key): + delattr(class_, key) + + def instrument_collection_class(self, class_, key, collection_class): + return collections.prepare_instrumentation(collection_class) + + def get_instance_dict(self, class_, instance): + return instance.__dict__ + + def initialize_instance_dict(self, class_, instance): + pass + + def install_state(self, class_, instance, state): + setattr(instance, '_default_state', state) + + def remove_state(self, class_, instance): + delattr(instance, '_default_state') + + def state_getter(self, class_): + return lambda instance: getattr(instance, '_default_state') + + def dict_getter(self, class_): + return lambda inst: self.get_instance_dict(class_, inst) + + +class _ClassInstrumentationAdapter(ClassManager): + """Adapts a user-defined InstrumentationManager to a ClassManager.""" + + def __init__(self, class_, override): + self._adapted = override + self._get_state = self._adapted.state_getter(class_) + self._get_dict = self._adapted.dict_getter(class_) + + ClassManager.__init__(self, class_) + + def manage(self): + self._adapted.manage(self.class_, self) + + def dispose(self): + self._adapted.dispose(self.class_) + + def manager_getter(self): + return self._adapted.manager_getter(self.class_) + + def instrument_attribute(self, key, inst, propagated=False): + ClassManager.instrument_attribute(self, key, inst, propagated) + if not propagated: + self._adapted.instrument_attribute(self.class_, key, inst) + + def post_configure_attribute(self, key): + super(_ClassInstrumentationAdapter, self).post_configure_attribute(key) + self._adapted.post_configure_attribute(self.class_, key, self[key]) + + def install_descriptor(self, key, inst): + self._adapted.install_descriptor(self.class_, key, inst) + + def uninstall_descriptor(self, key): + self._adapted.uninstall_descriptor(self.class_, key) + + def install_member(self, key, implementation): + self._adapted.install_member(self.class_, key, implementation) + + def uninstall_member(self, key): + self._adapted.uninstall_member(self.class_, key) + + def instrument_collection_class(self, key, collection_class): + return self._adapted.instrument_collection_class( + self.class_, key, collection_class) + + def initialize_collection(self, key, state, factory): + delegate = getattr(self._adapted, 'initialize_collection', None) + if delegate: + return delegate(key, state, factory) + else: + return ClassManager.initialize_collection(self, key, + state, factory) + + def new_instance(self, state=None): + instance = self.class_.__new__(self.class_) + self.setup_instance(instance, state) + return instance + + def _new_state_if_none(self, instance): + """Install a default InstanceState if none is present. + + A private convenience method used by the __init__ decorator. + """ + if self.has_state(instance): + return False + else: + return self.setup_instance(instance) + + def setup_instance(self, instance, state=None): + self._adapted.initialize_instance_dict(self.class_, instance) + + if state is None: + state = self._state_constructor(instance, self) + + # the given instance is assumed to have no state + self._adapted.install_state(self.class_, instance, state) + return state + + def teardown_instance(self, instance): + self._adapted.remove_state(self.class_, instance) + + def has_state(self, instance): + try: + self._get_state(instance) + except orm_exc.NO_STATE: + return False + else: + return True + + def state_getter(self): + return self._get_state + + def dict_getter(self): + return self._get_dict + + +def _install_instrumented_lookups(): + """Replace global class/object management functions + with ExtendedInstrumentationRegistry implementations, which + allow multiple types of class managers to be present, + at the cost of performance. + + This function is called only by ExtendedInstrumentationRegistry + and unit tests specific to this behavior. + + The _reinstall_default_lookups() function can be called + after this one to re-establish the default functions. + + """ + _install_lookups( + dict( + instance_state=_instrumentation_factory.state_of, + instance_dict=_instrumentation_factory.dict_of, + manager_of_class=_instrumentation_factory.manager_of_class + ) + ) + + +def _reinstall_default_lookups(): + """Restore simplified lookups.""" + _install_lookups( + dict( + instance_state=_default_state_getter, + instance_dict=_default_dict_getter, + manager_of_class=_default_manager_getter + ) + ) + _instrumentation_factory._extended = False + + +def _install_lookups(lookups): + global instance_state, instance_dict, manager_of_class + instance_state = lookups['instance_state'] + instance_dict = lookups['instance_dict'] + manager_of_class = lookups['manager_of_class'] + orm_base.instance_state = attributes.instance_state = \ + orm_instrumentation.instance_state = instance_state + orm_base.instance_dict = attributes.instance_dict = \ + orm_instrumentation.instance_dict = instance_dict + orm_base.manager_of_class = attributes.manager_of_class = \ + orm_instrumentation.manager_of_class = manager_of_class diff --git a/app/lib/sqlalchemy/ext/mutable.py b/app/lib/sqlalchemy/ext/mutable.py new file mode 100644 index 0000000..53a4971 --- /dev/null +++ b/app/lib/sqlalchemy/ext/mutable.py @@ -0,0 +1,904 @@ +# ext/mutable.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +r"""Provide support for tracking of in-place changes to scalar values, +which are propagated into ORM change events on owning parent objects. + +.. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's + legacy approach to in-place mutations of scalar values; see + :ref:`07_migration_mutation_extension`. + +.. _mutable_scalars: + +Establishing Mutability on Scalar Column Values +=============================================== + +A typical example of a "mutable" structure is a Python dictionary. +Following the example introduced in :ref:`types_toplevel`, we +begin with a custom type that marshals Python dictionaries into +JSON strings before being persisted:: + + from sqlalchemy.types import TypeDecorator, VARCHAR + import json + + class JSONEncodedDict(TypeDecorator): + "Represents an immutable structure as a json-encoded string." + + impl = VARCHAR + + def process_bind_param(self, value, dialect): + if value is not None: + value = json.dumps(value) + return value + + def process_result_value(self, value, dialect): + if value is not None: + value = json.loads(value) + return value + +The usage of ``json`` is only for the purposes of example. The +:mod:`sqlalchemy.ext.mutable` extension can be used +with any type whose target Python type may be mutable, including +:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc. + +When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself +tracks all parents which reference it. Below, we illustrate a simple +version of the :class:`.MutableDict` dictionary object, which applies +the :class:`.Mutable` mixin to a plain Python dictionary:: + + from sqlalchemy.ext.mutable import Mutable + + class MutableDict(Mutable, dict): + @classmethod + def coerce(cls, key, value): + "Convert plain dictionaries to MutableDict." + + if not isinstance(value, MutableDict): + if isinstance(value, dict): + return MutableDict(value) + + # this call will raise ValueError + return Mutable.coerce(key, value) + else: + return value + + def __setitem__(self, key, value): + "Detect dictionary set events and emit change events." + + dict.__setitem__(self, key, value) + self.changed() + + def __delitem__(self, key): + "Detect dictionary del events and emit change events." + + dict.__delitem__(self, key) + self.changed() + +The above dictionary class takes the approach of subclassing the Python +built-in ``dict`` to produce a dict +subclass which routes all mutation events through ``__setitem__``. There are +variants on this approach, such as subclassing ``UserDict.UserDict`` or +``collections.MutableMapping``; the part that's important to this example is +that the :meth:`.Mutable.changed` method is called whenever an in-place +change to the datastructure takes place. + +We also redefine the :meth:`.Mutable.coerce` method which will be used to +convert any values that are not instances of ``MutableDict``, such +as the plain dictionaries returned by the ``json`` module, into the +appropriate type. Defining this method is optional; we could just as well +created our ``JSONEncodedDict`` such that it always returns an instance +of ``MutableDict``, and additionally ensured that all calling code +uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not +overridden, any values applied to a parent object which are not instances +of the mutable type will raise a ``ValueError``. + +Our new ``MutableDict`` type offers a class method +:meth:`~.Mutable.as_mutable` which we can use within column metadata +to associate with types. This method grabs the given type object or +class and associates a listener that will detect all future mappings +of this type, applying event listening instrumentation to the mapped +attribute. Such as, with classical table metadata:: + + from sqlalchemy import Table, Column, Integer + + my_data = Table('my_data', metadata, + Column('id', Integer, primary_key=True), + Column('data', MutableDict.as_mutable(JSONEncodedDict)) + ) + +Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict`` +(if the type object was not an instance already), which will intercept any +attributes which are mapped against this type. Below we establish a simple +mapping against the ``my_data`` table:: + + from sqlalchemy import mapper + + class MyDataClass(object): + pass + + # associates mutation listeners with MyDataClass.data + mapper(MyDataClass, my_data) + +The ``MyDataClass.data`` member will now be notified of in place changes +to its value. + +There's no difference in usage when using declarative:: + + from sqlalchemy.ext.declarative import declarative_base + + Base = declarative_base() + + class MyDataClass(Base): + __tablename__ = 'my_data' + id = Column(Integer, primary_key=True) + data = Column(MutableDict.as_mutable(JSONEncodedDict)) + +Any in-place changes to the ``MyDataClass.data`` member +will flag the attribute as "dirty" on the parent object:: + + >>> from sqlalchemy.orm import Session + + >>> sess = Session() + >>> m1 = MyDataClass(data={'value1':'foo'}) + >>> sess.add(m1) + >>> sess.commit() + + >>> m1.data['value1'] = 'bar' + >>> assert m1 in sess.dirty + True + +The ``MutableDict`` can be associated with all future instances +of ``JSONEncodedDict`` in one step, using +:meth:`~.Mutable.associate_with`. This is similar to +:meth:`~.Mutable.as_mutable` except it will intercept all occurrences +of ``MutableDict`` in all mappings unconditionally, without +the need to declare it individually:: + + MutableDict.associate_with(JSONEncodedDict) + + class MyDataClass(Base): + __tablename__ = 'my_data' + id = Column(Integer, primary_key=True) + data = Column(JSONEncodedDict) + + +Supporting Pickling +-------------------- + +The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the +placement of a ``weakref.WeakKeyDictionary`` upon the value object, which +stores a mapping of parent mapped objects keyed to the attribute name under +which they are associated with this value. ``WeakKeyDictionary`` objects are +not picklable, due to the fact that they contain weakrefs and function +callbacks. In our case, this is a good thing, since if this dictionary were +picklable, it could lead to an excessively large pickle size for our value +objects that are pickled by themselves outside of the context of the parent. +The developer responsibility here is only to provide a ``__getstate__`` method +that excludes the :meth:`~MutableBase._parents` collection from the pickle +stream:: + + class MyMutableType(Mutable): + def __getstate__(self): + d = self.__dict__.copy() + d.pop('_parents', None) + return d + +With our dictionary example, we need to return the contents of the dict itself +(and also restore them on __setstate__):: + + class MutableDict(Mutable, dict): + # .... + + def __getstate__(self): + return dict(self) + + def __setstate__(self, state): + self.update(state) + +In the case that our mutable value object is pickled as it is attached to one +or more parent objects that are also part of the pickle, the :class:`.Mutable` +mixin will re-establish the :attr:`.Mutable._parents` collection on each value +object as the owning parents themselves are unpickled. + +.. _mutable_composites: + +Establishing Mutability on Composites +===================================== + +Composites are a special ORM feature which allow a single scalar attribute to +be assigned an object value which represents information "composed" from one +or more columns from the underlying mapped table. The usual example is that of +a geometric "point", and is introduced in :ref:`mapper_composite`. + +.. versionchanged:: 0.7 + The internals of :func:`.orm.composite` have been + greatly simplified and in-place mutation detection is no longer enabled by + default; instead, the user-defined value must detect changes on its own and + propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable` + extension provides the helper class :class:`.MutableComposite`, which is a + slight variant on the :class:`.Mutable` class. + +As is the case with :class:`.Mutable`, the user-defined composite class +subclasses :class:`.MutableComposite` as a mixin, and detects and delivers +change events to its parents via the :meth:`.MutableComposite.changed` method. +In the case of a composite class, the detection is usually via the usage of +Python descriptors (i.e. ``@property``), or alternatively via the special +Python method ``__setattr__()``. Below we expand upon the ``Point`` class +introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite` +and to also route attribute set events via ``__setattr__`` to the +:meth:`.MutableComposite.changed` method:: + + from sqlalchemy.ext.mutable import MutableComposite + + class Point(MutableComposite): + def __init__(self, x, y): + self.x = x + self.y = y + + def __setattr__(self, key, value): + "Intercept set events" + + # set the attribute + object.__setattr__(self, key, value) + + # alert all parents to the change + self.changed() + + def __composite_values__(self): + return self.x, self.y + + def __eq__(self, other): + return isinstance(other, Point) and \ + other.x == self.x and \ + other.y == self.y + + def __ne__(self, other): + return not self.__eq__(other) + +The :class:`.MutableComposite` class uses a Python metaclass to automatically +establish listeners for any usage of :func:`.orm.composite` that specifies our +``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class, +listeners are established which will route change events from ``Point`` +objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes:: + + from sqlalchemy.orm import composite, mapper + from sqlalchemy import Table, Column + + vertices = Table('vertices', metadata, + Column('id', Integer, primary_key=True), + Column('x1', Integer), + Column('y1', Integer), + Column('x2', Integer), + Column('y2', Integer), + ) + + class Vertex(object): + pass + + mapper(Vertex, vertices, properties={ + 'start': composite(Point, vertices.c.x1, vertices.c.y1), + 'end': composite(Point, vertices.c.x2, vertices.c.y2) + }) + +Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members +will flag the attribute as "dirty" on the parent object:: + + >>> from sqlalchemy.orm import Session + + >>> sess = Session() + >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15)) + >>> sess.add(v1) + >>> sess.commit() + + >>> v1.end.x = 8 + >>> assert v1 in sess.dirty + True + +Coercing Mutable Composites +--------------------------- + +The :meth:`.MutableBase.coerce` method is also supported on composite types. +In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce` +method is only called for attribute set operations, not load operations. +Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent +to using a :func:`.validates` validation routine for all attributes which +make use of the custom composite type:: + + class Point(MutableComposite): + # other Point methods + # ... + + def coerce(cls, key, value): + if isinstance(value, tuple): + value = Point(*value) + elif not isinstance(value, Point): + raise ValueError("tuple or Point expected") + return value + +.. versionadded:: 0.7.10,0.8.0b2 + Support for the :meth:`.MutableBase.coerce` method in conjunction with + objects of type :class:`.MutableComposite`. + +Supporting Pickling +-------------------- + +As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper +class uses a ``weakref.WeakKeyDictionary`` available via the +:meth:`MutableBase._parents` attribute which isn't picklable. If we need to +pickle instances of ``Point`` or its owning class ``Vertex``, we at least need +to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary. +Below we define both a ``__getstate__`` and a ``__setstate__`` that package up +the minimal form of our ``Point`` class:: + + class Point(MutableComposite): + # ... + + def __getstate__(self): + return self.x, self.y + + def __setstate__(self, state): + self.x, self.y = state + +As with :class:`.Mutable`, the :class:`.MutableComposite` augments the +pickling process of the parent's object-relational state so that the +:meth:`MutableBase._parents` collection is restored to all ``Point`` objects. + +""" +from ..orm.attributes import flag_modified +from .. import event, types +from ..orm import mapper, object_mapper, Mapper +from ..util import memoized_property +from ..sql.base import SchemaEventTarget +import weakref + + +class MutableBase(object): + """Common base class to :class:`.Mutable` + and :class:`.MutableComposite`. + + """ + + @memoized_property + def _parents(self): + """Dictionary of parent object->attribute name on the parent. + + This attribute is a so-called "memoized" property. It initializes + itself with a new ``weakref.WeakKeyDictionary`` the first time + it is accessed, returning the same object upon subsequent access. + + """ + + return weakref.WeakKeyDictionary() + + @classmethod + def coerce(cls, key, value): + """Given a value, coerce it into the target type. + + Can be overridden by custom subclasses to coerce incoming + data into a particular type. + + By default, raises ``ValueError``. + + This method is called in different scenarios depending on if + the parent class is of type :class:`.Mutable` or of type + :class:`.MutableComposite`. In the case of the former, it is called + for both attribute-set operations as well as during ORM loading + operations. For the latter, it is only called during attribute-set + operations; the mechanics of the :func:`.composite` construct + handle coercion during load operations. + + + :param key: string name of the ORM-mapped attribute being set. + :param value: the incoming value. + :return: the method should return the coerced value, or raise + ``ValueError`` if the coercion cannot be completed. + + """ + if value is None: + return None + msg = "Attribute '%s' does not accept objects of type %s" + raise ValueError(msg % (key, type(value))) + + @classmethod + def _get_listen_keys(cls, attribute): + """Given a descriptor attribute, return a ``set()`` of the attribute + keys which indicate a change in the state of this attribute. + + This is normally just ``set([attribute.key])``, but can be overridden + to provide for additional keys. E.g. a :class:`.MutableComposite` + augments this set with the attribute keys associated with the columns + that comprise the composite value. + + This collection is consulted in the case of intercepting the + :meth:`.InstanceEvents.refresh` and + :meth:`.InstanceEvents.refresh_flush` events, which pass along a list + of attribute names that have been refreshed; the list is compared + against this set to determine if action needs to be taken. + + .. versionadded:: 1.0.5 + + """ + return set([attribute.key]) + + @classmethod + def _listen_on_attribute(cls, attribute, coerce, parent_cls): + """Establish this type as a mutation listener for the given + mapped descriptor. + + """ + key = attribute.key + if parent_cls is not attribute.class_: + return + + # rely on "propagate" here + parent_cls = attribute.class_ + + listen_keys = cls._get_listen_keys(attribute) + + def load(state, *args): + """Listen for objects loaded or refreshed. + + Wrap the target data member's value with + ``Mutable``. + + """ + val = state.dict.get(key, None) + if val is not None: + if coerce: + val = cls.coerce(key, val) + state.dict[key] = val + val._parents[state.obj()] = key + + def load_attrs(state, ctx, attrs): + if not attrs or listen_keys.intersection(attrs): + load(state) + + def set(target, value, oldvalue, initiator): + """Listen for set/replace events on the target + data member. + + Establish a weak reference to the parent object + on the incoming value, remove it for the one + outgoing. + + """ + if value is oldvalue: + return value + + if not isinstance(value, cls): + value = cls.coerce(key, value) + if value is not None: + value._parents[target.obj()] = key + if isinstance(oldvalue, cls): + oldvalue._parents.pop(target.obj(), None) + return value + + def pickle(state, state_dict): + val = state.dict.get(key, None) + if val is not None: + if 'ext.mutable.values' not in state_dict: + state_dict['ext.mutable.values'] = [] + state_dict['ext.mutable.values'].append(val) + + def unpickle(state, state_dict): + if 'ext.mutable.values' in state_dict: + for val in state_dict['ext.mutable.values']: + val._parents[state.obj()] = key + + event.listen(parent_cls, 'load', load, + raw=True, propagate=True) + event.listen(parent_cls, 'refresh', load_attrs, + raw=True, propagate=True) + event.listen(parent_cls, 'refresh_flush', load_attrs, + raw=True, propagate=True) + event.listen(attribute, 'set', set, + raw=True, retval=True, propagate=True) + event.listen(parent_cls, 'pickle', pickle, + raw=True, propagate=True) + event.listen(parent_cls, 'unpickle', unpickle, + raw=True, propagate=True) + + +class Mutable(MutableBase): + """Mixin that defines transparent propagation of change + events to a parent object. + + See the example in :ref:`mutable_scalars` for usage information. + + """ + + def changed(self): + """Subclasses should call this method whenever change events occur.""" + + for parent, key in self._parents.items(): + flag_modified(parent, key) + + @classmethod + def associate_with_attribute(cls, attribute): + """Establish this type as a mutation listener for the given + mapped descriptor. + + """ + cls._listen_on_attribute(attribute, True, attribute.class_) + + @classmethod + def associate_with(cls, sqltype): + """Associate this wrapper with all future mapped columns + of the given type. + + This is a convenience method that calls + ``associate_with_attribute`` automatically. + + .. warning:: + + The listeners established by this method are *global* + to all mappers, and are *not* garbage collected. Only use + :meth:`.associate_with` for types that are permanent to an + application, not with ad-hoc types else this will cause unbounded + growth in memory usage. + + """ + + def listen_for_type(mapper, class_): + for prop in mapper.column_attrs: + if isinstance(prop.columns[0].type, sqltype): + cls.associate_with_attribute(getattr(class_, prop.key)) + + event.listen(mapper, 'mapper_configured', listen_for_type) + + @classmethod + def as_mutable(cls, sqltype): + """Associate a SQL type with this mutable Python type. + + This establishes listeners that will detect ORM mappings against + the given type, adding mutation event trackers to those mappings. + + The type is returned, unconditionally as an instance, so that + :meth:`.as_mutable` can be used inline:: + + Table('mytable', metadata, + Column('id', Integer, primary_key=True), + Column('data', MyMutableType.as_mutable(PickleType)) + ) + + Note that the returned type is always an instance, even if a class + is given, and that only columns which are declared specifically with + that type instance receive additional instrumentation. + + To associate a particular mutable type with all occurrences of a + particular type, use the :meth:`.Mutable.associate_with` classmethod + of the particular :class:`.Mutable` subclass to establish a global + association. + + .. warning:: + + The listeners established by this method are *global* + to all mappers, and are *not* garbage collected. Only use + :meth:`.as_mutable` for types that are permanent to an application, + not with ad-hoc types else this will cause unbounded growth + in memory usage. + + """ + sqltype = types.to_instance(sqltype) + + # a SchemaType will be copied when the Column is copied, + # and we'll lose our ability to link that type back to the original. + # so track our original type w/ columns + if isinstance(sqltype, SchemaEventTarget): + @event.listens_for(sqltype, "before_parent_attach") + def _add_column_memo(sqltyp, parent): + parent.info['_ext_mutable_orig_type'] = sqltyp + schema_event_check = True + else: + schema_event_check = False + + def listen_for_type(mapper, class_): + for prop in mapper.column_attrs: + if ( + schema_event_check and + hasattr(prop.expression, 'info') and + prop.expression.info.get('_ext_mutable_orig_type') + is sqltype + ) or ( + prop.columns[0].type is sqltype + ): + cls.associate_with_attribute(getattr(class_, prop.key)) + + event.listen(mapper, 'mapper_configured', listen_for_type) + + return sqltype + + +class MutableComposite(MutableBase): + """Mixin that defines transparent propagation of change + events on a SQLAlchemy "composite" object to its + owning parent or parents. + + See the example in :ref:`mutable_composites` for usage information. + + """ + + @classmethod + def _get_listen_keys(cls, attribute): + return set([attribute.key]).union(attribute.property._attribute_keys) + + def changed(self): + """Subclasses should call this method whenever change events occur.""" + + for parent, key in self._parents.items(): + + prop = object_mapper(parent).get_property(key) + for value, attr_name in zip( + self.__composite_values__(), + prop._attribute_keys): + setattr(parent, attr_name, value) + + +def _setup_composite_listener(): + def _listen_for_type(mapper, class_): + for prop in mapper.iterate_properties: + if (hasattr(prop, 'composite_class') and + isinstance(prop.composite_class, type) and + issubclass(prop.composite_class, MutableComposite)): + prop.composite_class._listen_on_attribute( + getattr(class_, prop.key), False, class_) + if not event.contains(Mapper, "mapper_configured", _listen_for_type): + event.listen(Mapper, 'mapper_configured', _listen_for_type) +_setup_composite_listener() + + +class MutableDict(Mutable, dict): + """A dictionary type that implements :class:`.Mutable`. + + The :class:`.MutableDict` object implements a dictionary that will + emit change events to the underlying mapping when the contents of + the dictionary are altered, including when values are added or removed. + + Note that :class:`.MutableDict` does **not** apply mutable tracking to the + *values themselves* inside the dictionary. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + dictionary structure, such as a JSON structure. To support this use case, + build a subclass of :class:`.MutableDict` that provides appropriate + coersion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + + .. versionadded:: 0.8 + + .. seealso:: + + :class:`.MutableList` + + :class:`.MutableSet` + + """ + + def __setitem__(self, key, value): + """Detect dictionary set events and emit change events.""" + dict.__setitem__(self, key, value) + self.changed() + + def setdefault(self, key, value): + result = dict.setdefault(self, key, value) + self.changed() + return result + + def __delitem__(self, key): + """Detect dictionary del events and emit change events.""" + dict.__delitem__(self, key) + self.changed() + + def update(self, *a, **kw): + dict.update(self, *a, **kw) + self.changed() + + def pop(self, *arg): + result = dict.pop(self, *arg) + self.changed() + return result + + def popitem(self): + result = dict.popitem(self) + self.changed() + return result + + def clear(self): + dict.clear(self) + self.changed() + + @classmethod + def coerce(cls, key, value): + """Convert plain dictionary to instance of this class.""" + if not isinstance(value, cls): + if isinstance(value, dict): + return cls(value) + return Mutable.coerce(key, value) + else: + return value + + def __getstate__(self): + return dict(self) + + def __setstate__(self, state): + self.update(state) + + +class MutableList(Mutable, list): + """A list type that implements :class:`.Mutable`. + + The :class:`.MutableList` object implements a list that will + emit change events to the underlying mapping when the contents of + the list are altered, including when values are added or removed. + + Note that :class:`.MutableList` does **not** apply mutable tracking to the + *values themselves* inside the list. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + mutable structure, such as a JSON structure. To support this use case, + build a subclass of :class:`.MutableList` that provides appropriate + coersion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + + .. versionadded:: 1.1 + + .. seealso:: + + :class:`.MutableDict` + + :class:`.MutableSet` + + """ + + def __setitem__(self, index, value): + """Detect list set events and emit change events.""" + list.__setitem__(self, index, value) + self.changed() + + def __setslice__(self, start, end, value): + """Detect list set events and emit change events.""" + list.__setslice__(self, start, end, value) + self.changed() + + def __delitem__(self, index): + """Detect list del events and emit change events.""" + list.__delitem__(self, index) + self.changed() + + def __delslice__(self, start, end): + """Detect list del events and emit change events.""" + list.__delslice__(self, start, end) + self.changed() + + def pop(self, *arg): + result = list.pop(self, *arg) + self.changed() + return result + + def append(self, x): + list.append(self, x) + self.changed() + + def extend(self, x): + list.extend(self, x) + self.changed() + + def insert(self, i, x): + list.insert(self, i, x) + self.changed() + + def remove(self, i): + list.remove(self, i) + self.changed() + + def clear(self): + list.clear(self) + self.changed() + + def sort(self): + list.sort(self) + self.changed() + + def reverse(self): + list.reverse(self) + self.changed() + + @classmethod + def coerce(cls, index, value): + """Convert plain list to instance of this class.""" + if not isinstance(value, cls): + if isinstance(value, list): + return cls(value) + return Mutable.coerce(index, value) + else: + return value + + def __getstate__(self): + return list(self) + + def __setstate__(self, state): + self[:] = state + + +class MutableSet(Mutable, set): + """A set type that implements :class:`.Mutable`. + + The :class:`.MutableSet` object implements a set that will + emit change events to the underlying mapping when the contents of + the set are altered, including when values are added or removed. + + Note that :class:`.MutableSet` does **not** apply mutable tracking to the + *values themselves* inside the set. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + mutable structure. To support this use case, + build a subclass of :class:`.MutableSet` that provides appropriate + coersion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + + .. versionadded:: 1.1 + + .. seealso:: + + :class:`.MutableDict` + + :class:`.MutableList` + + + """ + + def update(self, *arg): + set.update(self, *arg) + self.changed() + + def intersection_update(self, *arg): + set.intersection_update(self, *arg) + self.changed() + + def difference_update(self, *arg): + set.difference_update(self, *arg) + self.changed() + + def symmetric_difference_update(self, *arg): + set.symmetric_difference_update(self, *arg) + self.changed() + + def add(self, elem): + set.add(self, elem) + self.changed() + + def remove(self, elem): + set.remove(self, elem) + self.changed() + + def discard(self, elem): + set.discard(self, elem) + self.changed() + + def pop(self, *arg): + result = set.pop(self, *arg) + self.changed() + return result + + def clear(self): + set.clear(self) + self.changed() + + @classmethod + def coerce(cls, index, value): + """Convert plain set to instance of this class.""" + if not isinstance(value, cls): + if isinstance(value, set): + return cls(value) + return Mutable.coerce(index, value) + else: + return value + + def __getstate__(self): + return set(self) + + def __setstate__(self, state): + self.update(state) + + def __reduce_ex__(self, proto): + return (self.__class__, (list(self), )) diff --git a/app/lib/sqlalchemy/ext/orderinglist.py b/app/lib/sqlalchemy/ext/orderinglist.py new file mode 100644 index 0000000..6b22aa6 --- /dev/null +++ b/app/lib/sqlalchemy/ext/orderinglist.py @@ -0,0 +1,380 @@ +# ext/orderinglist.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""A custom list that manages index/position information for contained +elements. + +:author: Jason Kirtland + +``orderinglist`` is a helper for mutable ordered relationships. It will +intercept list operations performed on a :func:`.relationship`-managed +collection and +automatically synchronize changes in list position onto a target scalar +attribute. + +Example: A ``slide`` table, where each row refers to zero or more entries +in a related ``bullet`` table. The bullets within a slide are +displayed in order based on the value of the ``position`` column in the +``bullet`` table. As entries are reordered in memory, the value of the +``position`` attribute should be updated to reflect the new sort order:: + + + Base = declarative_base() + + class Slide(Base): + __tablename__ = 'slide' + + id = Column(Integer, primary_key=True) + name = Column(String) + + bullets = relationship("Bullet", order_by="Bullet.position") + + class Bullet(Base): + __tablename__ = 'bullet' + id = Column(Integer, primary_key=True) + slide_id = Column(Integer, ForeignKey('slide.id')) + position = Column(Integer) + text = Column(String) + +The standard relationship mapping will produce a list-like attribute on each +``Slide`` containing all related ``Bullet`` objects, +but coping with changes in ordering is not handled automatically. +When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position`` +attribute will remain unset until manually assigned. When the ``Bullet`` +is inserted into the middle of the list, the following ``Bullet`` objects +will also need to be renumbered. + +The :class:`.OrderingList` object automates this task, managing the +``position`` attribute on all ``Bullet`` objects in the collection. It is +constructed using the :func:`.ordering_list` factory:: + + from sqlalchemy.ext.orderinglist import ordering_list + + Base = declarative_base() + + class Slide(Base): + __tablename__ = 'slide' + + id = Column(Integer, primary_key=True) + name = Column(String) + + bullets = relationship("Bullet", order_by="Bullet.position", + collection_class=ordering_list('position')) + + class Bullet(Base): + __tablename__ = 'bullet' + id = Column(Integer, primary_key=True) + slide_id = Column(Integer, ForeignKey('slide.id')) + position = Column(Integer) + text = Column(String) + +With the above mapping the ``Bullet.position`` attribute is managed:: + + s = Slide() + s.bullets.append(Bullet()) + s.bullets.append(Bullet()) + s.bullets[1].position + >>> 1 + s.bullets.insert(1, Bullet()) + s.bullets[2].position + >>> 2 + +The :class:`.OrderingList` construct only works with **changes** to a +collection, and not the initial load from the database, and requires that the +list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the +:func:`.relationship` against the target ordering attribute, so that the +ordering is correct when first loaded. + +.. warning:: + + :class:`.OrderingList` only provides limited functionality when a primary + key column or unique column is the target of the sort. Operations + that are unsupported or are problematic include: + + * two entries must trade values. This is not supported directly in the + case of a primary key or unique constraint because it means at least + one row would need to be temporarily removed first, or changed to + a third, neutral value while the switch occurs. + + * an entry must be deleted in order to make room for a new entry. + SQLAlchemy's unit of work performs all INSERTs before DELETEs within a + single flush. In the case of a primary key, it will trade + an INSERT/DELETE of the same primary key for an UPDATE statement in order + to lessen the impact of this limitation, however this does not take place + for a UNIQUE column. + A future feature will allow the "DELETE before INSERT" behavior to be + possible, allevating this limitation, though this feature will require + explicit configuration at the mapper level for sets of columns that + are to be handled in this way. + +:func:`.ordering_list` takes the name of the related object's ordering +attribute as an argument. By default, the zero-based integer index of the +object's position in the :func:`.ordering_list` is synchronized with the +ordering attribute: index 0 will get position 0, index 1 position 1, etc. To +start numbering at 1 or some other integer, provide ``count_from=1``. + + +""" +from ..orm.collections import collection, collection_adapter +from .. import util + +__all__ = ['ordering_list'] + + +def ordering_list(attr, count_from=None, **kw): + """Prepares an :class:`OrderingList` factory for use in mapper definitions. + + Returns an object suitable for use as an argument to a Mapper + relationship's ``collection_class`` option. e.g.:: + + from sqlalchemy.ext.orderinglist import ordering_list + + class Slide(Base): + __tablename__ = 'slide' + + id = Column(Integer, primary_key=True) + name = Column(String) + + bullets = relationship("Bullet", order_by="Bullet.position", + collection_class=ordering_list('position')) + + :param attr: + Name of the mapped attribute to use for storage and retrieval of + ordering information + + :param count_from: + Set up an integer-based ordering, starting at ``count_from``. For + example, ``ordering_list('pos', count_from=1)`` would create a 1-based + list in SQL, storing the value in the 'pos' column. Ignored if + ``ordering_func`` is supplied. + + Additional arguments are passed to the :class:`.OrderingList` constructor. + + """ + + kw = _unsugar_count_from(count_from=count_from, **kw) + return lambda: OrderingList(attr, **kw) + + +# Ordering utility functions + + +def count_from_0(index, collection): + """Numbering function: consecutive integers starting at 0.""" + + return index + + +def count_from_1(index, collection): + """Numbering function: consecutive integers starting at 1.""" + + return index + 1 + + +def count_from_n_factory(start): + """Numbering function: consecutive integers starting at arbitrary start.""" + + def f(index, collection): + return index + start + try: + f.__name__ = 'count_from_%i' % start + except TypeError: + pass + return f + + +def _unsugar_count_from(**kw): + """Builds counting functions from keyword arguments. + + Keyword argument filter, prepares a simple ``ordering_func`` from a + ``count_from`` argument, otherwise passes ``ordering_func`` on unchanged. + """ + + count_from = kw.pop('count_from', None) + if kw.get('ordering_func', None) is None and count_from is not None: + if count_from == 0: + kw['ordering_func'] = count_from_0 + elif count_from == 1: + kw['ordering_func'] = count_from_1 + else: + kw['ordering_func'] = count_from_n_factory(count_from) + return kw + + +class OrderingList(list): + """A custom list that manages position information for its children. + + The :class:`.OrderingList` object is normally set up using the + :func:`.ordering_list` factory function, used in conjunction with + the :func:`.relationship` function. + + """ + + def __init__(self, ordering_attr=None, ordering_func=None, + reorder_on_append=False): + """A custom list that manages position information for its children. + + ``OrderingList`` is a ``collection_class`` list implementation that + syncs position in a Python list with a position attribute on the + mapped objects. + + This implementation relies on the list starting in the proper order, + so be **sure** to put an ``order_by`` on your relationship. + + :param ordering_attr: + Name of the attribute that stores the object's order in the + relationship. + + :param ordering_func: Optional. A function that maps the position in + the Python list to a value to store in the + ``ordering_attr``. Values returned are usually (but need not be!) + integers. + + An ``ordering_func`` is called with two positional parameters: the + index of the element in the list, and the list itself. + + If omitted, Python list indexes are used for the attribute values. + Two basic pre-built numbering functions are provided in this module: + ``count_from_0`` and ``count_from_1``. For more exotic examples + like stepped numbering, alphabetical and Fibonacci numbering, see + the unit tests. + + :param reorder_on_append: + Default False. When appending an object with an existing (non-None) + ordering value, that value will be left untouched unless + ``reorder_on_append`` is true. This is an optimization to avoid a + variety of dangerous unexpected database writes. + + SQLAlchemy will add instances to the list via append() when your + object loads. If for some reason the result set from the database + skips a step in the ordering (say, row '1' is missing but you get + '2', '3', and '4'), reorder_on_append=True would immediately + renumber the items to '1', '2', '3'. If you have multiple sessions + making changes, any of whom happen to load this collection even in + passing, all of the sessions would try to "clean up" the numbering + in their commits, possibly causing all but one to fail with a + concurrent modification error. + + Recommend leaving this with the default of False, and just call + ``reorder()`` if you're doing ``append()`` operations with + previously ordered instances or when doing some housekeeping after + manual sql operations. + + """ + self.ordering_attr = ordering_attr + if ordering_func is None: + ordering_func = count_from_0 + self.ordering_func = ordering_func + self.reorder_on_append = reorder_on_append + + # More complex serialization schemes (multi column, e.g.) are possible by + # subclassing and reimplementing these two methods. + def _get_order_value(self, entity): + return getattr(entity, self.ordering_attr) + + def _set_order_value(self, entity, value): + setattr(entity, self.ordering_attr, value) + + def reorder(self): + """Synchronize ordering for the entire collection. + + Sweeps through the list and ensures that each object has accurate + ordering information set. + + """ + for index, entity in enumerate(self): + self._order_entity(index, entity, True) + + # As of 0.5, _reorder is no longer semi-private + _reorder = reorder + + def _order_entity(self, index, entity, reorder=True): + have = self._get_order_value(entity) + + # Don't disturb existing ordering if reorder is False + if have is not None and not reorder: + return + + should_be = self.ordering_func(index, self) + if have != should_be: + self._set_order_value(entity, should_be) + + def append(self, entity): + super(OrderingList, self).append(entity) + self._order_entity(len(self) - 1, entity, self.reorder_on_append) + + def _raw_append(self, entity): + """Append without any ordering behavior.""" + + super(OrderingList, self).append(entity) + _raw_append = collection.adds(1)(_raw_append) + + def insert(self, index, entity): + super(OrderingList, self).insert(index, entity) + self._reorder() + + def remove(self, entity): + super(OrderingList, self).remove(entity) + + adapter = collection_adapter(self) + if adapter and adapter._referenced_by_owner: + self._reorder() + + def pop(self, index=-1): + entity = super(OrderingList, self).pop(index) + self._reorder() + return entity + + def __setitem__(self, index, entity): + if isinstance(index, slice): + step = index.step or 1 + start = index.start or 0 + if start < 0: + start += len(self) + stop = index.stop or len(self) + if stop < 0: + stop += len(self) + + for i in range(start, stop, step): + self.__setitem__(i, entity[i]) + else: + self._order_entity(index, entity, True) + super(OrderingList, self).__setitem__(index, entity) + + def __delitem__(self, index): + super(OrderingList, self).__delitem__(index) + self._reorder() + + def __setslice__(self, start, end, values): + super(OrderingList, self).__setslice__(start, end, values) + self._reorder() + + def __delslice__(self, start, end): + super(OrderingList, self).__delslice__(start, end) + self._reorder() + + def __reduce__(self): + return _reconstitute, (self.__class__, self.__dict__, list(self)) + + for func_name, func in list(locals().items()): + if (util.callable(func) and func.__name__ == func_name and + not func.__doc__ and hasattr(list, func_name)): + func.__doc__ = getattr(list, func_name).__doc__ + del func_name, func + + +def _reconstitute(cls, dict_, items): + """ Reconstitute an :class:`.OrderingList`. + + This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for + unpickling :class:`.OrderingList` objects. + + """ + obj = cls.__new__(cls) + obj.__dict__.update(dict_) + list.extend(obj, items) + return obj diff --git a/app/lib/sqlalchemy/ext/serializer.py b/app/lib/sqlalchemy/ext/serializer.py new file mode 100644 index 0000000..2fbc62e --- /dev/null +++ b/app/lib/sqlalchemy/ext/serializer.py @@ -0,0 +1,159 @@ +# ext/serializer.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Serializer/Deserializer objects for usage with SQLAlchemy query structures, +allowing "contextual" deserialization. + +Any SQLAlchemy query structure, either based on sqlalchemy.sql.* +or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session +etc. which are referenced by the structure are not persisted in serialized +form, but are instead re-associated with the query structure +when it is deserialized. + +Usage is nearly the same as that of the standard Python pickle module:: + + from sqlalchemy.ext.serializer import loads, dumps + metadata = MetaData(bind=some_engine) + Session = scoped_session(sessionmaker()) + + # ... define mappers + + query = Session.query(MyClass). + filter(MyClass.somedata=='foo').order_by(MyClass.sortkey) + + # pickle the query + serialized = dumps(query) + + # unpickle. Pass in metadata + scoped_session + query2 = loads(serialized, metadata, Session) + + print query2.all() + +Similar restrictions as when using raw pickle apply; mapped classes must be +themselves be pickleable, meaning they are importable from a module-level +namespace. + +The serializer module is only appropriate for query structures. It is not +needed for: + +* instances of user-defined classes. These contain no references to engines, + sessions or expression constructs in the typical case and can be serialized + directly. + +* Table metadata that is to be loaded entirely from the serialized structure + (i.e. is not already declared in the application). Regular + pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object, + typically one which was reflected from an existing database at some previous + point in time. The serializer module is specifically for the opposite case, + where the Table metadata is already present in memory. + +""" + +from ..orm import class_mapper +from ..orm.session import Session +from ..orm.mapper import Mapper +from ..orm.interfaces import MapperProperty +from ..orm.attributes import QueryableAttribute +from .. import Table, Column +from ..engine import Engine +from ..util import pickle, byte_buffer, b64encode, b64decode, text_type +import re + + +__all__ = ['Serializer', 'Deserializer', 'dumps', 'loads'] + + +def Serializer(*args, **kw): + pickler = pickle.Pickler(*args, **kw) + + def persistent_id(obj): + # print "serializing:", repr(obj) + if isinstance(obj, QueryableAttribute): + cls = obj.impl.class_ + key = obj.impl.key + id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls)) + elif isinstance(obj, Mapper) and not obj.non_primary: + id = "mapper:" + b64encode(pickle.dumps(obj.class_)) + elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: + id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \ + ":" + obj.key + elif isinstance(obj, Table): + id = "table:" + text_type(obj.key) + elif isinstance(obj, Column) and isinstance(obj.table, Table): + id = "column:" + \ + text_type(obj.table.key) + ":" + text_type(obj.key) + elif isinstance(obj, Session): + id = "session:" + elif isinstance(obj, Engine): + id = "engine:" + else: + return None + return id + + pickler.persistent_id = persistent_id + return pickler + +our_ids = re.compile( + r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)') + + +def Deserializer(file, metadata=None, scoped_session=None, engine=None): + unpickler = pickle.Unpickler(file) + + def get_engine(): + if engine: + return engine + elif scoped_session and scoped_session().bind: + return scoped_session().bind + elif metadata and metadata.bind: + return metadata.bind + else: + return None + + def persistent_load(id): + m = our_ids.match(text_type(id)) + if not m: + return None + else: + type_, args = m.group(1, 2) + if type_ == 'attribute': + key, clsarg = args.split(":") + cls = pickle.loads(b64decode(clsarg)) + return getattr(cls, key) + elif type_ == "mapper": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls) + elif type_ == "mapperprop": + mapper, keyname = args.split(':') + cls = pickle.loads(b64decode(mapper)) + return class_mapper(cls).attrs[keyname] + elif type_ == "table": + return metadata.tables[args] + elif type_ == "column": + table, colname = args.split(':') + return metadata.tables[table].c[colname] + elif type_ == "session": + return scoped_session() + elif type_ == "engine": + return get_engine() + else: + raise Exception("Unknown token: %s" % type_) + unpickler.persistent_load = persistent_load + return unpickler + + +def dumps(obj, protocol=0): + buf = byte_buffer() + pickler = Serializer(buf, protocol) + pickler.dump(obj) + return buf.getvalue() + + +def loads(data, metadata=None, scoped_session=None, engine=None): + buf = byte_buffer(data) + unpickler = Deserializer(buf, metadata, scoped_session, engine) + return unpickler.load() diff --git a/app/lib/sqlalchemy/inspection.py b/app/lib/sqlalchemy/inspection.py new file mode 100644 index 0000000..4f6a5a0 --- /dev/null +++ b/app/lib/sqlalchemy/inspection.py @@ -0,0 +1,93 @@ +# sqlalchemy/inspect.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""The inspection module provides the :func:`.inspect` function, +which delivers runtime information about a wide variety +of SQLAlchemy objects, both within the Core as well as the +ORM. + +The :func:`.inspect` function is the entry point to SQLAlchemy's +public API for viewing the configuration and construction +of in-memory objects. Depending on the type of object +passed to :func:`.inspect`, the return value will either be +a related object which provides a known interface, or in many +cases it will return the object itself. + +The rationale for :func:`.inspect` is twofold. One is that +it replaces the need to be aware of a large variety of "information +getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`, +:func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`, +and others. The other is that the return value of :func:`.inspect` +is guaranteed to obey a documented API, thus allowing third party +tools which build on top of SQLAlchemy configurations to be constructed +in a forwards-compatible way. + +.. versionadded:: 0.8 The :func:`.inspect` system is introduced + as of version 0.8. + +""" + +from . import util, exc +_registrars = util.defaultdict(list) + + +def inspect(subject, raiseerr=True): + """Produce an inspection object for the given target. + + The returned value in some cases may be the + same object as the one given, such as if a + :class:`.Mapper` object is passed. In other + cases, it will be an instance of the registered + inspection type for the given object, such as + if an :class:`.engine.Engine` is passed, an + :class:`.Inspector` object is returned. + + :param subject: the subject to be inspected. + :param raiseerr: When ``True``, if the given subject + does not + correspond to a known SQLAlchemy inspected type, + :class:`sqlalchemy.exc.NoInspectionAvailable` + is raised. If ``False``, ``None`` is returned. + + """ + type_ = type(subject) + for cls in type_.__mro__: + if cls in _registrars: + reg = _registrars[cls] + if reg is True: + return subject + ret = reg(subject) + if ret is not None: + break + else: + reg = ret = None + + if raiseerr and ( + reg is None or ret is None + ): + raise exc.NoInspectionAvailable( + "No inspection system is " + "available for object of type %s" % + type_) + return ret + + +def _inspects(*types): + def decorate(fn_or_cls): + for type_ in types: + if type_ in _registrars: + raise AssertionError( + "Type %s is already " + "registered" % type_) + _registrars[type_] = fn_or_cls + return fn_or_cls + return decorate + + +def _self_inspects(cls): + _inspects(cls)(True) + return cls diff --git a/app/lib/sqlalchemy/interfaces.py b/app/lib/sqlalchemy/interfaces.py new file mode 100644 index 0000000..33f3cf1 --- /dev/null +++ b/app/lib/sqlalchemy/interfaces.py @@ -0,0 +1,312 @@ +# sqlalchemy/interfaces.py +# Copyright (C) 2007-2017 the SQLAlchemy authors and contributors +# +# Copyright (C) 2007 Jason Kirtland jek@discorporate.us +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Deprecated core event interfaces. + +This module is **deprecated** and is superseded by the +event system. + +""" + +from . import event, util + + +class PoolListener(object): + """Hooks into the lifecycle of connections in a :class:`.Pool`. + + .. note:: + + :class:`.PoolListener` is deprecated. Please + refer to :class:`.PoolEvents`. + + Usage:: + + class MyListener(PoolListener): + def connect(self, dbapi_con, con_record): + '''perform connect operations''' + # etc. + + # create a new pool with a listener + p = QueuePool(..., listeners=[MyListener()]) + + # add a listener after the fact + p.add_listener(MyListener()) + + # usage with create_engine() + e = create_engine("url://", listeners=[MyListener()]) + + All of the standard connection :class:`~sqlalchemy.pool.Pool` types can + accept event listeners for key connection lifecycle events: + creation, pool check-out and check-in. There are no events fired + when a connection closes. + + For any given DB-API connection, there will be one ``connect`` + event, `n` number of ``checkout`` events, and either `n` or `n - 1` + ``checkin`` events. (If a ``Connection`` is detached from its + pool via the ``detach()`` method, it won't be checked back in.) + + These are low-level events for low-level objects: raw Python + DB-API connections, without the conveniences of the SQLAlchemy + ``Connection`` wrapper, ``Dialect`` services or ``ClauseElement`` + execution. If you execute SQL through the connection, explicitly + closing all cursors and other resources is recommended. + + Events also receive a ``_ConnectionRecord``, a long-lived internal + ``Pool`` object that basically represents a "slot" in the + connection pool. ``_ConnectionRecord`` objects have one public + attribute of note: ``info``, a dictionary whose contents are + scoped to the lifetime of the DB-API connection managed by the + record. You can use this shared storage area however you like. + + There is no need to subclass ``PoolListener`` to handle events. + Any class that implements one or more of these methods can be used + as a pool listener. The ``Pool`` will inspect the methods + provided by a listener object and add the listener to one or more + internal event queues based on its capabilities. In terms of + efficiency and function call overhead, you're much better off only + providing implementations for the hooks you'll be using. + + """ + + @classmethod + def _adapt_listener(cls, self, listener): + """Adapt a :class:`.PoolListener` to individual + :class:`event.Dispatch` events. + + """ + + listener = util.as_interface(listener, + methods=('connect', 'first_connect', + 'checkout', 'checkin')) + if hasattr(listener, 'connect'): + event.listen(self, 'connect', listener.connect) + if hasattr(listener, 'first_connect'): + event.listen(self, 'first_connect', listener.first_connect) + if hasattr(listener, 'checkout'): + event.listen(self, 'checkout', listener.checkout) + if hasattr(listener, 'checkin'): + event.listen(self, 'checkin', listener.checkin) + + def connect(self, dbapi_con, con_record): + """Called once for each new DB-API connection or Pool's ``creator()``. + + dbapi_con + A newly connected raw DB-API connection (not a SQLAlchemy + ``Connection`` wrapper). + + con_record + The ``_ConnectionRecord`` that persistently manages the connection + + """ + + def first_connect(self, dbapi_con, con_record): + """Called exactly once for the first DB-API connection. + + dbapi_con + A newly connected raw DB-API connection (not a SQLAlchemy + ``Connection`` wrapper). + + con_record + The ``_ConnectionRecord`` that persistently manages the connection + + """ + + def checkout(self, dbapi_con, con_record, con_proxy): + """Called when a connection is retrieved from the Pool. + + dbapi_con + A raw DB-API connection + + con_record + The ``_ConnectionRecord`` that persistently manages the connection + + con_proxy + The ``_ConnectionFairy`` which manages the connection for the span of + the current checkout. + + If you raise an ``exc.DisconnectionError``, the current + connection will be disposed and a fresh connection retrieved. + Processing of all checkout listeners will abort and restart + using the new connection. + """ + + def checkin(self, dbapi_con, con_record): + """Called when a connection returns to the pool. + + Note that the connection may be closed, and may be None if the + connection has been invalidated. ``checkin`` will not be called + for detached connections. (They do not return to the pool.) + + dbapi_con + A raw DB-API connection + + con_record + The ``_ConnectionRecord`` that persistently manages the connection + + """ + + +class ConnectionProxy(object): + """Allows interception of statement execution by Connections. + + .. note:: + + :class:`.ConnectionProxy` is deprecated. Please + refer to :class:`.ConnectionEvents`. + + Either or both of the ``execute()`` and ``cursor_execute()`` + may be implemented to intercept compiled statement and + cursor level executions, e.g.:: + + class MyProxy(ConnectionProxy): + def execute(self, conn, execute, clauseelement, + *multiparams, **params): + print "compiled statement:", clauseelement + return execute(clauseelement, *multiparams, **params) + + def cursor_execute(self, execute, cursor, statement, + parameters, context, executemany): + print "raw statement:", statement + return execute(cursor, statement, parameters, context) + + The ``execute`` argument is a function that will fulfill the default + execution behavior for the operation. The signature illustrated + in the example should be used. + + The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via + the ``proxy`` argument:: + + e = create_engine('someurl://', proxy=MyProxy()) + + """ + + @classmethod + def _adapt_listener(cls, self, listener): + + def adapt_execute(conn, clauseelement, multiparams, params): + + def execute_wrapper(clauseelement, *multiparams, **params): + return clauseelement, multiparams, params + + return listener.execute(conn, execute_wrapper, + clauseelement, *multiparams, + **params) + + event.listen(self, 'before_execute', adapt_execute) + + def adapt_cursor_execute(conn, cursor, statement, + parameters, context, executemany): + + def execute_wrapper( + cursor, + statement, + parameters, + context, + ): + return statement, parameters + + return listener.cursor_execute( + execute_wrapper, + cursor, + statement, + parameters, + context, + executemany, + ) + + event.listen(self, 'before_cursor_execute', adapt_cursor_execute) + + def do_nothing_callback(*arg, **kw): + pass + + def adapt_listener(fn): + + def go(conn, *arg, **kw): + fn(conn, do_nothing_callback, *arg, **kw) + + return util.update_wrapper(go, fn) + + event.listen(self, 'begin', adapt_listener(listener.begin)) + event.listen(self, 'rollback', + adapt_listener(listener.rollback)) + event.listen(self, 'commit', adapt_listener(listener.commit)) + event.listen(self, 'savepoint', + adapt_listener(listener.savepoint)) + event.listen(self, 'rollback_savepoint', + adapt_listener(listener.rollback_savepoint)) + event.listen(self, 'release_savepoint', + adapt_listener(listener.release_savepoint)) + event.listen(self, 'begin_twophase', + adapt_listener(listener.begin_twophase)) + event.listen(self, 'prepare_twophase', + adapt_listener(listener.prepare_twophase)) + event.listen(self, 'rollback_twophase', + adapt_listener(listener.rollback_twophase)) + event.listen(self, 'commit_twophase', + adapt_listener(listener.commit_twophase)) + + def execute(self, conn, execute, clauseelement, *multiparams, **params): + """Intercept high level execute() events.""" + + return execute(clauseelement, *multiparams, **params) + + def cursor_execute(self, execute, cursor, statement, parameters, + context, executemany): + """Intercept low-level cursor execute() events.""" + + return execute(cursor, statement, parameters, context) + + def begin(self, conn, begin): + """Intercept begin() events.""" + + return begin() + + def rollback(self, conn, rollback): + """Intercept rollback() events.""" + + return rollback() + + def commit(self, conn, commit): + """Intercept commit() events.""" + + return commit() + + def savepoint(self, conn, savepoint, name=None): + """Intercept savepoint() events.""" + + return savepoint(name=name) + + def rollback_savepoint(self, conn, rollback_savepoint, name, context): + """Intercept rollback_savepoint() events.""" + + return rollback_savepoint(name, context) + + def release_savepoint(self, conn, release_savepoint, name, context): + """Intercept release_savepoint() events.""" + + return release_savepoint(name, context) + + def begin_twophase(self, conn, begin_twophase, xid): + """Intercept begin_twophase() events.""" + + return begin_twophase(xid) + + def prepare_twophase(self, conn, prepare_twophase, xid): + """Intercept prepare_twophase() events.""" + + return prepare_twophase(xid) + + def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared): + """Intercept rollback_twophase() events.""" + + return rollback_twophase(xid, is_prepared) + + def commit_twophase(self, conn, commit_twophase, xid, is_prepared): + """Intercept commit_twophase() events.""" + + return commit_twophase(xid, is_prepared) diff --git a/app/lib/sqlalchemy/log.py b/app/lib/sqlalchemy/log.py new file mode 100644 index 0000000..279538a --- /dev/null +++ b/app/lib/sqlalchemy/log.py @@ -0,0 +1,217 @@ +# sqlalchemy/log.py +# Copyright (C) 2006-2017 the SQLAlchemy authors and contributors +# +# Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Logging control and utilities. + +Control of logging for SA can be performed from the regular python logging +module. The regular dotted module namespace is used, starting at +'sqlalchemy'. For class-level logging, the class name is appended. + +The "echo" keyword parameter, available on SQLA :class:`.Engine` +and :class:`.Pool` objects, corresponds to a logger specific to that +instance only. + +""" + +import logging +import sys + +# set initial level to WARN. This so that +# log statements don't occur in the absence of explicit +# logging being enabled for 'sqlalchemy'. +rootlogger = logging.getLogger('sqlalchemy') +if rootlogger.level == logging.NOTSET: + rootlogger.setLevel(logging.WARN) + + +def _add_default_handler(logger): + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter( + '%(asctime)s %(levelname)s %(name)s %(message)s')) + logger.addHandler(handler) + + +_logged_classes = set() + + +def class_logger(cls): + logger = logging.getLogger(cls.__module__ + "." + cls.__name__) + cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG) + cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO) + cls.logger = logger + _logged_classes.add(cls) + return cls + + +class Identified(object): + logging_name = None + + def _should_log_debug(self): + return self.logger.isEnabledFor(logging.DEBUG) + + def _should_log_info(self): + return self.logger.isEnabledFor(logging.INFO) + + +class InstanceLogger(object): + """A logger adapter (wrapper) for :class:`.Identified` subclasses. + + This allows multiple instances (e.g. Engine or Pool instances) + to share a logger, but have its verbosity controlled on a + per-instance basis. + + The basic functionality is to return a logging level + which is based on an instance's echo setting. + + Default implementation is: + + 'debug' -> logging.DEBUG + True -> logging.INFO + False -> Effective level of underlying logger + (logging.WARNING by default) + None -> same as False + """ + + # Map echo settings to logger levels + _echo_map = { + None: logging.NOTSET, + False: logging.NOTSET, + True: logging.INFO, + 'debug': logging.DEBUG, + } + + def __init__(self, echo, name): + self.echo = echo + self.logger = logging.getLogger(name) + + # if echo flag is enabled and no handlers, + # add a handler to the list + if self._echo_map[echo] <= logging.INFO \ + and not self.logger.handlers: + _add_default_handler(self.logger) + + # + # Boilerplate convenience methods + # + def debug(self, msg, *args, **kwargs): + """Delegate a debug call to the underlying logger.""" + + self.log(logging.DEBUG, msg, *args, **kwargs) + + def info(self, msg, *args, **kwargs): + """Delegate an info call to the underlying logger.""" + + self.log(logging.INFO, msg, *args, **kwargs) + + def warning(self, msg, *args, **kwargs): + """Delegate a warning call to the underlying logger.""" + + self.log(logging.WARNING, msg, *args, **kwargs) + + warn = warning + + def error(self, msg, *args, **kwargs): + """ + Delegate an error call to the underlying logger. + """ + self.log(logging.ERROR, msg, *args, **kwargs) + + def exception(self, msg, *args, **kwargs): + """Delegate an exception call to the underlying logger.""" + + kwargs["exc_info"] = 1 + self.log(logging.ERROR, msg, *args, **kwargs) + + def critical(self, msg, *args, **kwargs): + """Delegate a critical call to the underlying logger.""" + + self.log(logging.CRITICAL, msg, *args, **kwargs) + + def log(self, level, msg, *args, **kwargs): + """Delegate a log call to the underlying logger. + + The level here is determined by the echo + flag as well as that of the underlying logger, and + logger._log() is called directly. + + """ + + # inline the logic from isEnabledFor(), + # getEffectiveLevel(), to avoid overhead. + + if self.logger.manager.disable >= level: + return + + selected_level = self._echo_map[self.echo] + if selected_level == logging.NOTSET: + selected_level = self.logger.getEffectiveLevel() + + if level >= selected_level: + self.logger._log(level, msg, args, **kwargs) + + def isEnabledFor(self, level): + """Is this logger enabled for level 'level'?""" + + if self.logger.manager.disable >= level: + return False + return level >= self.getEffectiveLevel() + + def getEffectiveLevel(self): + """What's the effective level for this logger?""" + + level = self._echo_map[self.echo] + if level == logging.NOTSET: + level = self.logger.getEffectiveLevel() + return level + + +def instance_logger(instance, echoflag=None): + """create a logger for an instance that implements :class:`.Identified`.""" + + if instance.logging_name: + name = "%s.%s.%s" % (instance.__class__.__module__, + instance.__class__.__name__, + instance.logging_name) + else: + name = "%s.%s" % (instance.__class__.__module__, + instance.__class__.__name__) + + instance._echo = echoflag + + if echoflag in (False, None): + # if no echo setting or False, return a Logger directly, + # avoiding overhead of filtering + logger = logging.getLogger(name) + else: + # if a specified echo flag, return an EchoLogger, + # which checks the flag, overrides normal log + # levels by calling logger._log() + logger = InstanceLogger(echoflag, name) + + instance.logger = logger + + +class echo_property(object): + __doc__ = """\ + When ``True``, enable log output for this element. + + This has the effect of setting the Python logging level for the namespace + of this element's class and object reference. A value of boolean ``True`` + indicates that the loglevel ``logging.INFO`` will be set for the logger, + whereas the string value ``debug`` will set the loglevel to + ``logging.DEBUG``. + """ + + def __get__(self, instance, owner): + if instance is None: + return self + else: + return instance._echo + + def __set__(self, instance, value): + instance_logger(instance, echoflag=value) diff --git a/app/lib/sqlalchemy/orm/__init__.py b/app/lib/sqlalchemy/orm/__init__.py new file mode 100644 index 0000000..4491735 --- /dev/null +++ b/app/lib/sqlalchemy/orm/__init__.py @@ -0,0 +1,276 @@ +# orm/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +Functional constructs for ORM configuration. + +See the SQLAlchemy object relational tutorial and mapper configuration +documentation for an overview of how this module is used. + +""" + +from . import exc +from .mapper import ( + Mapper, + _mapper_registry, + class_mapper, + configure_mappers, + reconstructor, + validates +) +from .interfaces import ( + EXT_CONTINUE, + EXT_STOP, + PropComparator, +) +from .deprecated_interfaces import ( + MapperExtension, + SessionExtension, + AttributeExtension, +) +from .util import ( + aliased, + join, + object_mapper, + outerjoin, + polymorphic_union, + was_deleted, + with_parent, + with_polymorphic, +) +from .properties import ColumnProperty +from .relationships import RelationshipProperty +from .descriptor_props import ( + ComparableProperty, + CompositeProperty, + SynonymProperty, +) +from .relationships import ( + foreign, + remote, +) +from .session import ( + Session, + object_session, + sessionmaker, + make_transient, + make_transient_to_detached +) +from .scoping import ( + scoped_session +) +from . import mapper as mapperlib +from .query import AliasOption, Query, Bundle +from ..util.langhelpers import public_factory +from .. import util as _sa_util +from . import strategies as _strategies + + +def create_session(bind=None, **kwargs): + r"""Create a new :class:`.Session` + with no automation enabled by default. + + This function is used primarily for testing. The usual + route to :class:`.Session` creation is via its constructor + or the :func:`.sessionmaker` function. + + :param bind: optional, a single Connectable to use for all + database access in the created + :class:`~sqlalchemy.orm.session.Session`. + + :param \*\*kwargs: optional, passed through to the + :class:`.Session` constructor. + + :returns: an :class:`~sqlalchemy.orm.session.Session` instance + + The defaults of create_session() are the opposite of that of + :func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are + False, ``autocommit`` is True. In this sense the session acts + more like the "classic" SQLAlchemy 0.3 session with these. + + Usage:: + + >>> from sqlalchemy.orm import create_session + >>> session = create_session() + + It is recommended to use :func:`sessionmaker` instead of + create_session(). + + """ + kwargs.setdefault('autoflush', False) + kwargs.setdefault('autocommit', True) + kwargs.setdefault('expire_on_commit', False) + return Session(bind=bind, **kwargs) + +relationship = public_factory(RelationshipProperty, ".orm.relationship") + + +def relation(*arg, **kw): + """A synonym for :func:`relationship`.""" + + return relationship(*arg, **kw) + + +def dynamic_loader(argument, **kw): + """Construct a dynamically-loading mapper property. + + This is essentially the same as + using the ``lazy='dynamic'`` argument with :func:`relationship`:: + + dynamic_loader(SomeClass) + + # is the same as + + relationship(SomeClass, lazy="dynamic") + + See the section :ref:`dynamic_relationship` for more details + on dynamic loading. + + """ + kw['lazy'] = 'dynamic' + return relationship(argument, **kw) + + +column_property = public_factory(ColumnProperty, ".orm.column_property") +composite = public_factory(CompositeProperty, ".orm.composite") + + +def backref(name, **kwargs): + """Create a back reference with explicit keyword arguments, which are the + same arguments one can send to :func:`relationship`. + + Used with the ``backref`` keyword argument to :func:`relationship` in + place of a string argument, e.g.:: + + 'items':relationship( + SomeItem, backref=backref('parent', lazy='subquery')) + + .. seealso:: + + :ref:`relationships_backref` + + """ + + return (name, kwargs) + + +def deferred(*columns, **kw): + r"""Indicate a column-based mapped attribute that by default will + not load unless accessed. + + :param \*columns: columns to be mapped. This is typically a single + :class:`.Column` object, however a collection is supported in order + to support multiple columns mapped under the same attribute. + + :param \**kw: additional keyword arguments passed to + :class:`.ColumnProperty`. + + .. seealso:: + + :ref:`deferred` + + """ + return ColumnProperty(deferred=True, *columns, **kw) + + +mapper = public_factory(Mapper, ".orm.mapper") + +synonym = public_factory(SynonymProperty, ".orm.synonym") + +comparable_property = public_factory(ComparableProperty, + ".orm.comparable_property") + + +@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` " + "is renamed to :func:`.configure_mappers`") +def compile_mappers(): + """Initialize the inter-mapper relationships of all mappers that have + been defined. + + """ + configure_mappers() + + +def clear_mappers(): + """Remove all mappers from all classes. + + This function removes all instrumentation from classes and disposes + of their associated mappers. Once called, the classes are unmapped + and can be later re-mapped with new mappers. + + :func:`.clear_mappers` is *not* for normal use, as there is literally no + valid usage for it outside of very specific testing scenarios. Normally, + mappers are permanent structural components of user-defined classes, and + are never discarded independently of their class. If a mapped class + itself is garbage collected, its mapper is automatically disposed of as + well. As such, :func:`.clear_mappers` is only for usage in test suites + that re-use the same classes with different mappings, which is itself an + extremely rare use case - the only such use case is in fact SQLAlchemy's + own test suite, and possibly the test suites of other ORM extension + libraries which intend to test various combinations of mapper construction + upon a fixed set of classes. + + """ + mapperlib._CONFIGURE_MUTEX.acquire() + try: + while _mapper_registry: + try: + # can't even reliably call list(weakdict) in jython + mapper, b = _mapper_registry.popitem() + mapper.dispose() + except KeyError: + pass + finally: + mapperlib._CONFIGURE_MUTEX.release() + +from . import strategy_options + +joinedload = strategy_options.joinedload._unbound_fn +joinedload_all = strategy_options.joinedload._unbound_all_fn +contains_eager = strategy_options.contains_eager._unbound_fn +defer = strategy_options.defer._unbound_fn +undefer = strategy_options.undefer._unbound_fn +undefer_group = strategy_options.undefer_group._unbound_fn +load_only = strategy_options.load_only._unbound_fn +lazyload = strategy_options.lazyload._unbound_fn +lazyload_all = strategy_options.lazyload_all._unbound_all_fn +subqueryload = strategy_options.subqueryload._unbound_fn +subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn +immediateload = strategy_options.immediateload._unbound_fn +noload = strategy_options.noload._unbound_fn +raiseload = strategy_options.raiseload._unbound_fn +defaultload = strategy_options.defaultload._unbound_fn + +from .strategy_options import Load + + +def eagerload(*args, **kwargs): + """A synonym for :func:`joinedload()`.""" + return joinedload(*args, **kwargs) + + +def eagerload_all(*args, **kwargs): + """A synonym for :func:`joinedload_all()`""" + return joinedload_all(*args, **kwargs) + + +contains_alias = public_factory(AliasOption, ".orm.contains_alias") + + +def __go(lcls): + global __all__ + from .. import util as sa_util + from . import dynamic + from . import events + import inspect as _inspect + + __all__ = sorted(name for name, obj in lcls.items() + if not (name.startswith('_') or _inspect.ismodule(obj))) + + _sa_util.dependencies.resolve_all("sqlalchemy.orm") + +__go(locals()) diff --git a/app/lib/sqlalchemy/orm/attributes.py b/app/lib/sqlalchemy/orm/attributes.py new file mode 100644 index 0000000..fc81db7 --- /dev/null +++ b/app/lib/sqlalchemy/orm/attributes.py @@ -0,0 +1,1617 @@ +# orm/attributes.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Defines instrumentation for class attributes and their interaction +with instances. + +This module is usually not directly visible to user applications, but +defines a large part of the ORM's interactivity. + + +""" + +import operator +from .. import util, event, inspection +from . import interfaces, collections, exc as orm_exc + +from .base import instance_state, instance_dict, manager_of_class + +from .base import PASSIVE_NO_RESULT, ATTR_WAS_SET, ATTR_EMPTY, NO_VALUE,\ + NEVER_SET, NO_CHANGE, CALLABLES_OK, SQL_OK, RELATED_OBJECT_OK,\ + INIT_OK, NON_PERSISTENT_OK, LOAD_AGAINST_COMMITTED, PASSIVE_OFF,\ + PASSIVE_RETURN_NEVER_SET, PASSIVE_NO_INITIALIZE, PASSIVE_NO_FETCH,\ + PASSIVE_NO_FETCH_RELATED, PASSIVE_ONLY_PERSISTENT, NO_AUTOFLUSH +from .base import state_str, instance_str + + +@inspection._self_inspects +class QueryableAttribute(interfaces._MappedAttribute, + interfaces.InspectionAttr, + interfaces.PropComparator): + """Base class for :term:`descriptor` objects that intercept + attribute events on behalf of a :class:`.MapperProperty` + object. The actual :class:`.MapperProperty` is accessible + via the :attr:`.QueryableAttribute.property` + attribute. + + + .. seealso:: + + :class:`.InstrumentedAttribute` + + :class:`.MapperProperty` + + :attr:`.Mapper.all_orm_descriptors` + + :attr:`.Mapper.attrs` + """ + + is_attribute = True + + def __init__(self, class_, key, impl=None, + comparator=None, parententity=None, + of_type=None): + self.class_ = class_ + self.key = key + self.impl = impl + self.comparator = comparator + self._parententity = parententity + self._of_type = of_type + + manager = manager_of_class(class_) + # manager is None in the case of AliasedClass + if manager: + # propagate existing event listeners from + # immediate superclass + for base in manager._bases: + if key in base: + self.dispatch._update(base[key].dispatch) + + @util.memoized_property + def _supports_population(self): + return self.impl.supports_population + + def get_history(self, instance, passive=PASSIVE_OFF): + return self.impl.get_history(instance_state(instance), + instance_dict(instance), passive) + + def __selectable__(self): + # TODO: conditionally attach this method based on clause_element ? + return self + + @util.memoized_property + def info(self): + """Return the 'info' dictionary for the underlying SQL element. + + The behavior here is as follows: + + * If the attribute is a column-mapped property, i.e. + :class:`.ColumnProperty`, which is mapped directly + to a schema-level :class:`.Column` object, this attribute + will return the :attr:`.SchemaItem.info` dictionary associated + with the core-level :class:`.Column` object. + + * If the attribute is a :class:`.ColumnProperty` but is mapped to + any other kind of SQL expression other than a :class:`.Column`, + the attribute will refer to the :attr:`.MapperProperty.info` + dictionary associated directly with the :class:`.ColumnProperty`, + assuming the SQL expression itself does not have its own ``.info`` + attribute (which should be the case, unless a user-defined SQL + construct has defined one). + + * If the attribute refers to any other kind of + :class:`.MapperProperty`, including :class:`.RelationshipProperty`, + the attribute will refer to the :attr:`.MapperProperty.info` + dictionary associated with that :class:`.MapperProperty`. + + * To access the :attr:`.MapperProperty.info` dictionary of the + :class:`.MapperProperty` unconditionally, including for a + :class:`.ColumnProperty` that's associated directly with a + :class:`.schema.Column`, the attribute can be referred to using + :attr:`.QueryableAttribute.property` attribute, as + ``MyClass.someattribute.property.info``. + + .. versionadded:: 0.8.0 + + .. seealso:: + + :attr:`.SchemaItem.info` + + :attr:`.MapperProperty.info` + + """ + return self.comparator.info + + @util.memoized_property + def parent(self): + """Return an inspection instance representing the parent. + + This will be either an instance of :class:`.Mapper` + or :class:`.AliasedInsp`, depending upon the nature + of the parent entity which this attribute is associated + with. + + """ + return inspection.inspect(self._parententity) + + @property + def expression(self): + return self.comparator.__clause_element__() + + def __clause_element__(self): + return self.comparator.__clause_element__() + + def _query_clause_element(self): + """like __clause_element__(), but called specifically + by :class:`.Query` to allow special behavior.""" + + return self.comparator._query_clause_element() + + def adapt_to_entity(self, adapt_to_entity): + assert not self._of_type + return self.__class__(adapt_to_entity.entity, + self.key, impl=self.impl, + comparator=self.comparator.adapt_to_entity( + adapt_to_entity), + parententity=adapt_to_entity) + + def of_type(self, cls): + return QueryableAttribute( + self.class_, + self.key, + self.impl, + self.comparator.of_type(cls), + self._parententity, + of_type=cls) + + def label(self, name): + return self._query_clause_element().label(name) + + def operate(self, op, *other, **kwargs): + return op(self.comparator, *other, **kwargs) + + def reverse_operate(self, op, other, **kwargs): + return op(other, self.comparator, **kwargs) + + def hasparent(self, state, optimistic=False): + return self.impl.hasparent(state, optimistic=optimistic) is not False + + def __getattr__(self, key): + try: + return getattr(self.comparator, key) + except AttributeError: + raise AttributeError( + 'Neither %r object nor %r object associated with %s ' + 'has an attribute %r' % ( + type(self).__name__, + type(self.comparator).__name__, + self, + key) + ) + + def __str__(self): + return "%s.%s" % (self.class_.__name__, self.key) + + @util.memoized_property + def property(self): + """Return the :class:`.MapperProperty` associated with this + :class:`.QueryableAttribute`. + + + Return values here will commonly be instances of + :class:`.ColumnProperty` or :class:`.RelationshipProperty`. + + + """ + return self.comparator.property + + +class InstrumentedAttribute(QueryableAttribute): + """Class bound instrumented attribute which adds basic + :term:`descriptor` methods. + + See :class:`.QueryableAttribute` for a description of most features. + + + """ + + def __set__(self, instance, value): + self.impl.set(instance_state(instance), + instance_dict(instance), value, None) + + def __delete__(self, instance): + self.impl.delete(instance_state(instance), instance_dict(instance)) + + def __get__(self, instance, owner): + if instance is None: + return self + + dict_ = instance_dict(instance) + if self._supports_population and self.key in dict_: + return dict_[self.key] + else: + return self.impl.get(instance_state(instance), dict_) + + +def create_proxied_attribute(descriptor): + """Create an QueryableAttribute / user descriptor hybrid. + + Returns a new QueryableAttribute type that delegates descriptor + behavior and getattr() to the given descriptor. + """ + + # TODO: can move this to descriptor_props if the need for this + # function is removed from ext/hybrid.py + + class Proxy(QueryableAttribute): + """Presents the :class:`.QueryableAttribute` interface as a + proxy on top of a Python descriptor / :class:`.PropComparator` + combination. + + """ + + def __init__(self, class_, key, descriptor, + comparator, + adapt_to_entity=None, doc=None, + original_property=None): + self.class_ = class_ + self.key = key + self.descriptor = descriptor + self.original_property = original_property + self._comparator = comparator + self._adapt_to_entity = adapt_to_entity + self.__doc__ = doc + + @property + def property(self): + return self.comparator.property + + @util.memoized_property + def comparator(self): + if util.callable(self._comparator): + self._comparator = self._comparator() + if self._adapt_to_entity: + self._comparator = self._comparator.adapt_to_entity( + self._adapt_to_entity) + return self._comparator + + def adapt_to_entity(self, adapt_to_entity): + return self.__class__(adapt_to_entity.entity, + self.key, + self.descriptor, + self._comparator, + adapt_to_entity) + + def __get__(self, instance, owner): + if instance is None: + return self + else: + return self.descriptor.__get__(instance, owner) + + def __str__(self): + return "%s.%s" % (self.class_.__name__, self.key) + + def __getattr__(self, attribute): + """Delegate __getattr__ to the original descriptor and/or + comparator.""" + + try: + return getattr(descriptor, attribute) + except AttributeError: + try: + return getattr(self.comparator, attribute) + except AttributeError: + raise AttributeError( + 'Neither %r object nor %r object associated with %s ' + 'has an attribute %r' % ( + type(descriptor).__name__, + type(self.comparator).__name__, + self, + attribute) + ) + + Proxy.__name__ = type(descriptor).__name__ + 'Proxy' + + util.monkeypatch_proxied_specials(Proxy, type(descriptor), + name='descriptor', + from_instance=descriptor) + return Proxy + +OP_REMOVE = util.symbol("REMOVE") +OP_APPEND = util.symbol("APPEND") +OP_REPLACE = util.symbol("REPLACE") + + +class Event(object): + """A token propagated throughout the course of a chain of attribute + events. + + Serves as an indicator of the source of the event and also provides + a means of controlling propagation across a chain of attribute + operations. + + The :class:`.Event` object is sent as the ``initiator`` argument + when dealing with the :meth:`.AttributeEvents.append`, + :meth:`.AttributeEvents.set`, + and :meth:`.AttributeEvents.remove` events. + + The :class:`.Event` object is currently interpreted by the backref + event handlers, and is used to control the propagation of operations + across two mutually-dependent attributes. + + .. versionadded:: 0.9.0 + + :var impl: The :class:`.AttributeImpl` which is the current event + initiator. + + :var op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or + :attr:`.OP_REPLACE`, indicating the source operation. + + """ + + __slots__ = 'impl', 'op', 'parent_token' + + def __init__(self, attribute_impl, op): + self.impl = attribute_impl + self.op = op + self.parent_token = self.impl.parent_token + + def __eq__(self, other): + return isinstance(other, Event) and \ + other.impl is self.impl and \ + other.op == self.op + + @property + def key(self): + return self.impl.key + + def hasparent(self, state): + return self.impl.hasparent(state) + + +class AttributeImpl(object): + """internal implementation for instrumented attributes.""" + + def __init__(self, class_, key, + callable_, dispatch, trackparent=False, extension=None, + compare_function=None, active_history=False, + parent_token=None, expire_missing=True, + send_modified_events=True, + **kwargs): + r"""Construct an AttributeImpl. + + \class_ + associated class + + key + string name of the attribute + + \callable_ + optional function which generates a callable based on a parent + instance, which produces the "default" values for a scalar or + collection attribute when it's first accessed, if not present + already. + + trackparent + if True, attempt to track if an instance has a parent attached + to it via this attribute. + + extension + a single or list of AttributeExtension object(s) which will + receive set/delete/append/remove/etc. events. Deprecated. + The event package is now used. + + compare_function + a function that compares two values which are normally + assignable to this attribute. + + active_history + indicates that get_history() should always return the "old" value, + even if it means executing a lazy callable upon attribute change. + + parent_token + Usually references the MapperProperty, used as a key for + the hasparent() function to identify an "owning" attribute. + Allows multiple AttributeImpls to all match a single + owner attribute. + + expire_missing + if False, don't add an "expiry" callable to this attribute + during state.expire_attributes(None), if no value is present + for this key. + + send_modified_events + if False, the InstanceState._modified_event method will have no + effect; this means the attribute will never show up as changed in a + history entry. + """ + self.class_ = class_ + self.key = key + self.callable_ = callable_ + self.dispatch = dispatch + self.trackparent = trackparent + self.parent_token = parent_token or self + self.send_modified_events = send_modified_events + if compare_function is None: + self.is_equal = operator.eq + else: + self.is_equal = compare_function + + # TODO: pass in the manager here + # instead of doing a lookup + attr = manager_of_class(class_)[key] + + for ext in util.to_list(extension or []): + ext._adapt_listener(attr, ext) + + if active_history: + self.dispatch._active_history = True + + self.expire_missing = expire_missing + + __slots__ = ( + 'class_', 'key', 'callable_', 'dispatch', 'trackparent', + 'parent_token', 'send_modified_events', 'is_equal', 'expire_missing' + ) + + def __str__(self): + return "%s.%s" % (self.class_.__name__, self.key) + + def _get_active_history(self): + """Backwards compat for impl.active_history""" + + return self.dispatch._active_history + + def _set_active_history(self, value): + self.dispatch._active_history = value + + active_history = property(_get_active_history, _set_active_history) + + def hasparent(self, state, optimistic=False): + """Return the boolean value of a `hasparent` flag attached to + the given state. + + The `optimistic` flag determines what the default return value + should be if no `hasparent` flag can be located. + + As this function is used to determine if an instance is an + *orphan*, instances that were loaded from storage should be + assumed to not be orphans, until a True/False value for this + flag is set. + + An instance attribute that is loaded by a callable function + will also not have a `hasparent` flag. + + """ + msg = "This AttributeImpl is not configured to track parents." + assert self.trackparent, msg + + return state.parents.get(id(self.parent_token), optimistic) \ + is not False + + def sethasparent(self, state, parent_state, value): + """Set a boolean flag on the given item corresponding to + whether or not it is attached to a parent object via the + attribute represented by this ``InstrumentedAttribute``. + + """ + msg = "This AttributeImpl is not configured to track parents." + assert self.trackparent, msg + + id_ = id(self.parent_token) + if value: + state.parents[id_] = parent_state + else: + if id_ in state.parents: + last_parent = state.parents[id_] + + if last_parent is not False and \ + last_parent.key != parent_state.key: + + if last_parent.obj() is None: + raise orm_exc.StaleDataError( + "Removing state %s from parent " + "state %s along attribute '%s', " + "but the parent record " + "has gone stale, can't be sure this " + "is the most recent parent." % + (state_str(state), + state_str(parent_state), + self.key)) + + return + + state.parents[id_] = False + + def get_history(self, state, dict_, passive=PASSIVE_OFF): + raise NotImplementedError() + + def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE): + """Return a list of tuples of (state, obj) + for all objects in this attribute's current state + + history. + + Only applies to object-based attributes. + + This is an inlining of existing functionality + which roughly corresponds to: + + get_state_history( + state, + key, + passive=PASSIVE_NO_INITIALIZE).sum() + + """ + raise NotImplementedError() + + def initialize(self, state, dict_): + """Initialize the given state's attribute with an empty value.""" + + value = None + for fn in self.dispatch.init_scalar: + ret = fn(state, value, dict_) + if ret is not ATTR_EMPTY: + value = ret + + return value + + def get(self, state, dict_, passive=PASSIVE_OFF): + """Retrieve a value from the given object. + If a callable is assembled on this object's attribute, and + passive is False, the callable will be executed and the + resulting value will be set as the new value for this attribute. + """ + if self.key in dict_: + return dict_[self.key] + else: + # if history present, don't load + key = self.key + if key not in state.committed_state or \ + state.committed_state[key] is NEVER_SET: + if not passive & CALLABLES_OK: + return PASSIVE_NO_RESULT + + if key in state.expired_attributes: + value = state._load_expired(state, passive) + elif key in state.callables: + callable_ = state.callables[key] + value = callable_(state, passive) + elif self.callable_: + value = self.callable_(state, passive) + else: + value = ATTR_EMPTY + + if value is PASSIVE_NO_RESULT or value is NEVER_SET: + return value + elif value is ATTR_WAS_SET: + try: + return dict_[key] + except KeyError: + # TODO: no test coverage here. + raise KeyError( + "Deferred loader for attribute " + "%r failed to populate " + "correctly" % key) + elif value is not ATTR_EMPTY: + return self.set_committed_value(state, dict_, value) + + if not passive & INIT_OK: + return NEVER_SET + else: + # Return a new, empty value + return self.initialize(state, dict_) + + def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): + self.set(state, dict_, value, initiator, passive=passive) + + def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): + self.set(state, dict_, None, initiator, + passive=passive, check_old=value) + + def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): + self.set(state, dict_, None, initiator, + passive=passive, check_old=value, pop=True) + + def set(self, state, dict_, value, initiator, + passive=PASSIVE_OFF, check_old=None, pop=False): + raise NotImplementedError() + + def get_committed_value(self, state, dict_, passive=PASSIVE_OFF): + """return the unchanged value of this attribute""" + + if self.key in state.committed_state: + value = state.committed_state[self.key] + if value in (NO_VALUE, NEVER_SET): + return None + else: + return value + else: + return self.get(state, dict_, passive=passive) + + def set_committed_value(self, state, dict_, value): + """set an attribute value on the given instance and 'commit' it.""" + + dict_[self.key] = value + state._commit(dict_, [self.key]) + return value + + +class ScalarAttributeImpl(AttributeImpl): + """represents a scalar value-holding InstrumentedAttribute.""" + + accepts_scalar_loader = True + uses_objects = False + supports_population = True + collection = False + + __slots__ = '_replace_token', '_append_token', '_remove_token' + + def __init__(self, *arg, **kw): + super(ScalarAttributeImpl, self).__init__(*arg, **kw) + self._replace_token = self._append_token = None + self._remove_token = None + + def _init_append_token(self): + self._replace_token = self._append_token = Event(self, OP_REPLACE) + return self._replace_token + + _init_append_or_replace_token = _init_append_token + + def _init_remove_token(self): + self._remove_token = Event(self, OP_REMOVE) + return self._remove_token + + def delete(self, state, dict_): + + # TODO: catch key errors, convert to attributeerror? + if self.dispatch._active_history: + old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) + else: + old = dict_.get(self.key, NO_VALUE) + + if self.dispatch.remove: + self.fire_remove_event(state, dict_, old, self._remove_token) + state._modified_event(dict_, self, old) + del dict_[self.key] + + def get_history(self, state, dict_, passive=PASSIVE_OFF): + if self.key in dict_: + return History.from_scalar_attribute(self, state, dict_[self.key]) + else: + if passive & INIT_OK: + passive ^= INIT_OK + current = self.get(state, dict_, passive=passive) + if current is PASSIVE_NO_RESULT: + return HISTORY_BLANK + else: + return History.from_scalar_attribute(self, state, current) + + def set(self, state, dict_, value, initiator, + passive=PASSIVE_OFF, check_old=None, pop=False): + if self.dispatch._active_history: + old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) + else: + old = dict_.get(self.key, NO_VALUE) + + if self.dispatch.set: + value = self.fire_replace_event(state, dict_, + value, old, initiator) + state._modified_event(dict_, self, old) + dict_[self.key] = value + + def fire_replace_event(self, state, dict_, value, previous, initiator): + for fn in self.dispatch.set: + value = fn( + state, value, previous, + initiator or self._replace_token or + self._init_append_or_replace_token()) + return value + + def fire_remove_event(self, state, dict_, value, initiator): + for fn in self.dispatch.remove: + fn(state, value, + initiator or self._remove_token or self._init_remove_token()) + + @property + def type(self): + self.property.columns[0].type + + +class ScalarObjectAttributeImpl(ScalarAttributeImpl): + """represents a scalar-holding InstrumentedAttribute, + where the target object is also instrumented. + + Adds events to delete/set operations. + + """ + + accepts_scalar_loader = False + uses_objects = True + supports_population = True + collection = False + + __slots__ = () + + def delete(self, state, dict_): + old = self.get(state, dict_) + self.fire_remove_event( + state, dict_, old, + self._remove_token or self._init_remove_token()) + del dict_[self.key] + + def get_history(self, state, dict_, passive=PASSIVE_OFF): + if self.key in dict_: + return History.from_object_attribute(self, state, dict_[self.key]) + else: + if passive & INIT_OK: + passive ^= INIT_OK + current = self.get(state, dict_, passive=passive) + if current is PASSIVE_NO_RESULT: + return HISTORY_BLANK + else: + return History.from_object_attribute(self, state, current) + + def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE): + if self.key in dict_: + current = dict_[self.key] + elif passive & CALLABLES_OK: + current = self.get(state, dict_, passive=passive) + else: + return [] + + # can't use __hash__(), can't use __eq__() here + if current is not None and \ + current is not PASSIVE_NO_RESULT and \ + current is not NEVER_SET: + ret = [(instance_state(current), current)] + else: + ret = [(None, None)] + + if self.key in state.committed_state: + original = state.committed_state[self.key] + if original is not None and \ + original is not PASSIVE_NO_RESULT and \ + original is not NEVER_SET and \ + original is not current: + + ret.append((instance_state(original), original)) + return ret + + def set(self, state, dict_, value, initiator, + passive=PASSIVE_OFF, check_old=None, pop=False): + """Set a value on the given InstanceState. + + """ + if self.dispatch._active_history: + old = self.get( + state, dict_, + passive=PASSIVE_ONLY_PERSISTENT | + NO_AUTOFLUSH | LOAD_AGAINST_COMMITTED) + else: + old = self.get( + state, dict_, passive=PASSIVE_NO_FETCH ^ INIT_OK | + LOAD_AGAINST_COMMITTED) + + if check_old is not None and \ + old is not PASSIVE_NO_RESULT and \ + check_old is not old: + if pop: + return + else: + raise ValueError( + "Object %s not associated with %s on attribute '%s'" % ( + instance_str(check_old), + state_str(state), + self.key + )) + + value = self.fire_replace_event(state, dict_, value, old, initiator) + dict_[self.key] = value + + def fire_remove_event(self, state, dict_, value, initiator): + if self.trackparent and value is not None: + self.sethasparent(instance_state(value), state, False) + + for fn in self.dispatch.remove: + fn(state, value, initiator or + self._remove_token or self._init_remove_token()) + + state._modified_event(dict_, self, value) + + def fire_replace_event(self, state, dict_, value, previous, initiator): + if self.trackparent: + if (previous is not value and + previous not in (None, PASSIVE_NO_RESULT, NEVER_SET)): + self.sethasparent(instance_state(previous), state, False) + + for fn in self.dispatch.set: + value = fn( + state, value, previous, initiator or + self._replace_token or self._init_append_or_replace_token()) + + state._modified_event(dict_, self, previous) + + if self.trackparent: + if value is not None: + self.sethasparent(instance_state(value), state, True) + + return value + + +class CollectionAttributeImpl(AttributeImpl): + """A collection-holding attribute that instruments changes in membership. + + Only handles collections of instrumented objects. + + InstrumentedCollectionAttribute holds an arbitrary, user-specified + container object (defaulting to a list) and brokers access to the + CollectionAdapter, a "view" onto that object that presents consistent bag + semantics to the orm layer independent of the user data implementation. + + """ + accepts_scalar_loader = False + uses_objects = True + supports_population = True + collection = True + + __slots__ = ( + 'copy', 'collection_factory', '_append_token', '_remove_token', + '_duck_typed_as' + ) + + def __init__(self, class_, key, callable_, dispatch, + typecallable=None, trackparent=False, extension=None, + copy_function=None, compare_function=None, **kwargs): + super(CollectionAttributeImpl, self).__init__( + class_, + key, + callable_, dispatch, + trackparent=trackparent, + extension=extension, + compare_function=compare_function, + **kwargs) + + if copy_function is None: + copy_function = self.__copy + self.copy = copy_function + self.collection_factory = typecallable + self._append_token = None + self._remove_token = None + self._duck_typed_as = util.duck_type_collection( + self.collection_factory()) + + if getattr(self.collection_factory, "_sa_linker", None): + + @event.listens_for(self, "init_collection") + def link(target, collection, collection_adapter): + collection._sa_linker(collection_adapter) + + @event.listens_for(self, "dispose_collection") + def unlink(target, collection, collection_adapter): + collection._sa_linker(None) + + def _init_append_token(self): + self._append_token = Event(self, OP_APPEND) + return self._append_token + + def _init_remove_token(self): + self._remove_token = Event(self, OP_REMOVE) + return self._remove_token + + def __copy(self, item): + return [y for y in collections.collection_adapter(item)] + + def get_history(self, state, dict_, passive=PASSIVE_OFF): + current = self.get(state, dict_, passive=passive) + if current is PASSIVE_NO_RESULT: + return HISTORY_BLANK + else: + return History.from_collection(self, state, current) + + def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE): + # NOTE: passive is ignored here at the moment + + if self.key not in dict_: + return [] + + current = dict_[self.key] + current = getattr(current, '_sa_adapter') + + if self.key in state.committed_state: + original = state.committed_state[self.key] + if original not in (NO_VALUE, NEVER_SET): + current_states = [((c is not None) and + instance_state(c) or None, c) + for c in current] + original_states = [((c is not None) and + instance_state(c) or None, c) + for c in original] + + current_set = dict(current_states) + original_set = dict(original_states) + + return \ + [(s, o) for s, o in current_states + if s not in original_set] + \ + [(s, o) for s, o in current_states + if s in original_set] + \ + [(s, o) for s, o in original_states + if s not in current_set] + + return [(instance_state(o), o) for o in current] + + def fire_append_event(self, state, dict_, value, initiator): + for fn in self.dispatch.append: + value = fn( + state, value, + initiator or self._append_token or self._init_append_token()) + + state._modified_event(dict_, self, NEVER_SET, True) + + if self.trackparent and value is not None: + self.sethasparent(instance_state(value), state, True) + + return value + + def fire_pre_remove_event(self, state, dict_, initiator): + state._modified_event(dict_, self, NEVER_SET, True) + + def fire_remove_event(self, state, dict_, value, initiator): + if self.trackparent and value is not None: + self.sethasparent(instance_state(value), state, False) + + for fn in self.dispatch.remove: + fn(state, value, + initiator or self._remove_token or self._init_remove_token()) + + state._modified_event(dict_, self, NEVER_SET, True) + + def delete(self, state, dict_): + if self.key not in dict_: + return + + state._modified_event(dict_, self, NEVER_SET, True) + + collection = self.get_collection(state, state.dict) + collection.clear_with_event() + # TODO: catch key errors, convert to attributeerror? + del dict_[self.key] + + def initialize(self, state, dict_): + """Initialize this attribute with an empty collection.""" + + _, user_data = self._initialize_collection(state) + dict_[self.key] = user_data + return user_data + + def _initialize_collection(self, state): + + adapter, collection = state.manager.initialize_collection( + self.key, state, self.collection_factory) + + self.dispatch.init_collection(state, collection, adapter) + + return adapter, collection + + def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): + collection = self.get_collection(state, dict_, passive=passive) + if collection is PASSIVE_NO_RESULT: + value = self.fire_append_event(state, dict_, value, initiator) + assert self.key not in dict_, \ + "Collection was loaded during event handling." + state._get_pending_mutation(self.key).append(value) + else: + collection.append_with_event(value, initiator) + + def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): + collection = self.get_collection(state, state.dict, passive=passive) + if collection is PASSIVE_NO_RESULT: + self.fire_remove_event(state, dict_, value, initiator) + assert self.key not in dict_, \ + "Collection was loaded during event handling." + state._get_pending_mutation(self.key).remove(value) + else: + collection.remove_with_event(value, initiator) + + def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): + try: + # TODO: better solution here would be to add + # a "popper" role to collections.py to complement + # "remover". + self.remove(state, dict_, value, initiator, passive=passive) + except (ValueError, KeyError, IndexError): + pass + + def set(self, state, dict_, value, initiator=None, + passive=PASSIVE_OFF, pop=False, _adapt=True): + iterable = orig_iterable = value + + # pulling a new collection first so that an adaptation exception does + # not trigger a lazy load of the old collection. + new_collection, user_data = self._initialize_collection(state) + if _adapt: + if new_collection._converter is not None: + iterable = new_collection._converter(iterable) + else: + setting_type = util.duck_type_collection(iterable) + receiving_type = self._duck_typed_as + + if setting_type is not receiving_type: + given = iterable is None and 'None' or \ + iterable.__class__.__name__ + wanted = self._duck_typed_as.__name__ + raise TypeError( + "Incompatible collection type: %s is not %s-like" % ( + given, wanted)) + + # If the object is an adapted collection, return the (iterable) + # adapter. + if hasattr(iterable, '_sa_iterator'): + iterable = iterable._sa_iterator() + elif setting_type is dict: + if util.py3k: + iterable = iterable.values() + else: + iterable = getattr( + iterable, 'itervalues', iterable.values)() + else: + iterable = iter(iterable) + new_values = list(iterable) + + old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) + if old is PASSIVE_NO_RESULT: + old = self.initialize(state, dict_) + elif old is orig_iterable: + # ignore re-assignment of the current collection, as happens + # implicitly with in-place operators (foo.collection |= other) + return + + # place a copy of "old" in state.committed_state + state._modified_event(dict_, self, old, True) + + old_collection = old._sa_adapter + + dict_[self.key] = user_data + + collections.bulk_replace( + new_values, old_collection, new_collection) + + del old._sa_adapter + self.dispatch.dispose_collection(state, old, old_collection) + + def _invalidate_collection(self, collection): + adapter = getattr(collection, '_sa_adapter') + adapter.invalidated = True + + def set_committed_value(self, state, dict_, value): + """Set an attribute value on the given instance and 'commit' it.""" + + collection, user_data = self._initialize_collection(state) + + if value: + collection.append_multiple_without_event(value) + + state.dict[self.key] = user_data + + state._commit(dict_, [self.key]) + + if self.key in state._pending_mutations: + # pending items exist. issue a modified event, + # add/remove new items. + state._modified_event(dict_, self, user_data, True) + + pending = state._pending_mutations.pop(self.key) + added = pending.added_items + removed = pending.deleted_items + for item in added: + collection.append_without_event(item) + for item in removed: + collection.remove_without_event(item) + + return user_data + + def get_collection(self, state, dict_, + user_data=None, passive=PASSIVE_OFF): + """Retrieve the CollectionAdapter associated with the given state. + + Creates a new CollectionAdapter if one does not exist. + + """ + if user_data is None: + user_data = self.get(state, dict_, passive=passive) + if user_data is PASSIVE_NO_RESULT: + return user_data + + return getattr(user_data, '_sa_adapter') + + +def backref_listeners(attribute, key, uselist): + """Apply listeners to synchronize a two-way relationship.""" + + # use easily recognizable names for stack traces + + parent_token = attribute.impl.parent_token + parent_impl = attribute.impl + + def _acceptable_key_err(child_state, initiator, child_impl): + raise ValueError( + "Bidirectional attribute conflict detected: " + 'Passing object %s to attribute "%s" ' + 'triggers a modify event on attribute "%s" ' + 'via the backref "%s".' % ( + state_str(child_state), + initiator.parent_token, + child_impl.parent_token, + attribute.impl.parent_token + ) + ) + + def emit_backref_from_scalar_set_event(state, child, oldchild, initiator): + if oldchild is child: + return child + if oldchild is not None and \ + oldchild is not PASSIVE_NO_RESULT and \ + oldchild is not NEVER_SET: + # With lazy=None, there's no guarantee that the full collection is + # present when updating via a backref. + old_state, old_dict = instance_state(oldchild),\ + instance_dict(oldchild) + impl = old_state.manager[key].impl + + if initiator.impl is not impl or \ + initiator.op not in (OP_REPLACE, OP_REMOVE): + impl.pop(old_state, + old_dict, + state.obj(), + parent_impl._append_token or + parent_impl._init_append_token(), + passive=PASSIVE_NO_FETCH) + + if child is not None: + child_state, child_dict = instance_state(child),\ + instance_dict(child) + child_impl = child_state.manager[key].impl + if initiator.parent_token is not parent_token and \ + initiator.parent_token is not child_impl.parent_token: + _acceptable_key_err(state, initiator, child_impl) + elif initiator.impl is not child_impl or \ + initiator.op not in (OP_APPEND, OP_REPLACE): + child_impl.append( + child_state, + child_dict, + state.obj(), + initiator, + passive=PASSIVE_NO_FETCH) + return child + + def emit_backref_from_collection_append_event(state, child, initiator): + if child is None: + return + + child_state, child_dict = instance_state(child), \ + instance_dict(child) + child_impl = child_state.manager[key].impl + + if initiator.parent_token is not parent_token and \ + initiator.parent_token is not child_impl.parent_token: + _acceptable_key_err(state, initiator, child_impl) + elif initiator.impl is not child_impl or \ + initiator.op not in (OP_APPEND, OP_REPLACE): + child_impl.append( + child_state, + child_dict, + state.obj(), + initiator, + passive=PASSIVE_NO_FETCH) + return child + + def emit_backref_from_collection_remove_event(state, child, initiator): + if child is not None: + child_state, child_dict = instance_state(child),\ + instance_dict(child) + child_impl = child_state.manager[key].impl + if initiator.impl is not child_impl or \ + initiator.op not in (OP_REMOVE, OP_REPLACE): + child_impl.pop( + child_state, + child_dict, + state.obj(), + initiator, + passive=PASSIVE_NO_FETCH) + + if uselist: + event.listen(attribute, "append", + emit_backref_from_collection_append_event, + retval=True, raw=True) + else: + event.listen(attribute, "set", + emit_backref_from_scalar_set_event, + retval=True, raw=True) + # TODO: need coverage in test/orm/ of remove event + event.listen(attribute, "remove", + emit_backref_from_collection_remove_event, + retval=True, raw=True) + +_NO_HISTORY = util.symbol('NO_HISTORY') +_NO_STATE_SYMBOLS = frozenset([ + id(PASSIVE_NO_RESULT), + id(NO_VALUE), + id(NEVER_SET)]) + +History = util.namedtuple("History", [ + "added", "unchanged", "deleted" +]) + + +class History(History): + """A 3-tuple of added, unchanged and deleted values, + representing the changes which have occurred on an instrumented + attribute. + + The easiest way to get a :class:`.History` object for a particular + attribute on an object is to use the :func:`.inspect` function:: + + from sqlalchemy import inspect + + hist = inspect(myobject).attrs.myattribute.history + + Each tuple member is an iterable sequence: + + * ``added`` - the collection of items added to the attribute (the first + tuple element). + + * ``unchanged`` - the collection of items that have not changed on the + attribute (the second tuple element). + + * ``deleted`` - the collection of items that have been removed from the + attribute (the third tuple element). + + """ + + def __bool__(self): + return self != HISTORY_BLANK + __nonzero__ = __bool__ + + def empty(self): + """Return True if this :class:`.History` has no changes + and no existing, unchanged state. + + """ + + return not bool( + (self.added or self.deleted) + or self.unchanged + ) + + def sum(self): + """Return a collection of added + unchanged + deleted.""" + + return (self.added or []) +\ + (self.unchanged or []) +\ + (self.deleted or []) + + def non_deleted(self): + """Return a collection of added + unchanged.""" + + return (self.added or []) +\ + (self.unchanged or []) + + def non_added(self): + """Return a collection of unchanged + deleted.""" + + return (self.unchanged or []) +\ + (self.deleted or []) + + def has_changes(self): + """Return True if this :class:`.History` has changes.""" + + return bool(self.added or self.deleted) + + def as_state(self): + return History( + [(c is not None) + and instance_state(c) or None + for c in self.added], + [(c is not None) + and instance_state(c) or None + for c in self.unchanged], + [(c is not None) + and instance_state(c) or None + for c in self.deleted], + ) + + @classmethod + def from_scalar_attribute(cls, attribute, state, current): + original = state.committed_state.get(attribute.key, _NO_HISTORY) + + if original is _NO_HISTORY: + if current is NEVER_SET: + return cls((), (), ()) + else: + return cls((), [current], ()) + # don't let ClauseElement expressions here trip things up + elif attribute.is_equal(current, original) is True: + return cls((), [current], ()) + else: + # current convention on native scalars is to not + # include information + # about missing previous value in "deleted", but + # we do include None, which helps in some primary + # key situations + if id(original) in _NO_STATE_SYMBOLS: + deleted = () + else: + deleted = [original] + if current is NEVER_SET: + return cls((), (), deleted) + else: + return cls([current], (), deleted) + + @classmethod + def from_object_attribute(cls, attribute, state, current): + original = state.committed_state.get(attribute.key, _NO_HISTORY) + + if original is _NO_HISTORY: + if current is NO_VALUE or current is NEVER_SET: + return cls((), (), ()) + else: + return cls((), [current], ()) + elif current is original: + return cls((), [current], ()) + else: + # current convention on related objects is to not + # include information + # about missing previous value in "deleted", and + # to also not include None - the dependency.py rules + # ignore the None in any case. + if id(original) in _NO_STATE_SYMBOLS or original is None: + deleted = () + else: + deleted = [original] + if current is NO_VALUE or current is NEVER_SET: + return cls((), (), deleted) + else: + return cls([current], (), deleted) + + @classmethod + def from_collection(cls, attribute, state, current): + original = state.committed_state.get(attribute.key, _NO_HISTORY) + + if current is NO_VALUE or current is NEVER_SET: + return cls((), (), ()) + + current = getattr(current, '_sa_adapter') + if original in (NO_VALUE, NEVER_SET): + return cls(list(current), (), ()) + elif original is _NO_HISTORY: + return cls((), list(current), ()) + else: + + current_states = [((c is not None) and instance_state(c) + or None, c) + for c in current + ] + original_states = [((c is not None) and instance_state(c) + or None, c) + for c in original + ] + + current_set = dict(current_states) + original_set = dict(original_states) + + return cls( + [o for s, o in current_states if s not in original_set], + [o for s, o in current_states if s in original_set], + [o for s, o in original_states if s not in current_set] + ) + +HISTORY_BLANK = History(None, None, None) + + +def get_history(obj, key, passive=PASSIVE_OFF): + """Return a :class:`.History` record for the given object + and attribute key. + + :param obj: an object whose class is instrumented by the + attributes package. + + :param key: string attribute name. + + :param passive: indicates loading behavior for the attribute + if the value is not already present. This is a + bitflag attribute, which defaults to the symbol + :attr:`.PASSIVE_OFF` indicating all necessary SQL + should be emitted. + + """ + if passive is True: + util.warn_deprecated("Passing True for 'passive' is deprecated. " + "Use attributes.PASSIVE_NO_INITIALIZE") + passive = PASSIVE_NO_INITIALIZE + elif passive is False: + util.warn_deprecated("Passing False for 'passive' is " + "deprecated. Use attributes.PASSIVE_OFF") + passive = PASSIVE_OFF + + return get_state_history(instance_state(obj), key, passive) + + +def get_state_history(state, key, passive=PASSIVE_OFF): + return state.get_history(key, passive) + + +def has_parent(cls, obj, key, optimistic=False): + """TODO""" + manager = manager_of_class(cls) + state = instance_state(obj) + return manager.has_parent(state, key, optimistic) + + +def register_attribute(class_, key, **kw): + comparator = kw.pop('comparator', None) + parententity = kw.pop('parententity', None) + doc = kw.pop('doc', None) + desc = register_descriptor(class_, key, + comparator, parententity, doc=doc) + register_attribute_impl(class_, key, **kw) + return desc + + +def register_attribute_impl(class_, key, + uselist=False, callable_=None, + useobject=False, + impl_class=None, backref=None, **kw): + + manager = manager_of_class(class_) + if uselist: + factory = kw.pop('typecallable', None) + typecallable = manager.instrument_collection_class( + key, factory or list) + else: + typecallable = kw.pop('typecallable', None) + + dispatch = manager[key].dispatch + + if impl_class: + impl = impl_class(class_, key, typecallable, dispatch, **kw) + elif uselist: + impl = CollectionAttributeImpl(class_, key, callable_, dispatch, + typecallable=typecallable, **kw) + elif useobject: + impl = ScalarObjectAttributeImpl(class_, key, callable_, + dispatch, **kw) + else: + impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw) + + manager[key].impl = impl + + if backref: + backref_listeners(manager[key], backref, uselist) + + manager.post_configure_attribute(key) + return manager[key] + + +def register_descriptor(class_, key, comparator=None, + parententity=None, doc=None): + manager = manager_of_class(class_) + + descriptor = InstrumentedAttribute(class_, key, comparator=comparator, + parententity=parententity) + + descriptor.__doc__ = doc + + manager.instrument_attribute(key, descriptor) + return descriptor + + +def unregister_attribute(class_, key): + manager_of_class(class_).uninstrument_attribute(key) + + +def init_collection(obj, key): + """Initialize a collection attribute and return the collection adapter. + + This function is used to provide direct access to collection internals + for a previously unloaded attribute. e.g.:: + + collection_adapter = init_collection(someobject, 'elements') + for elem in values: + collection_adapter.append_without_event(elem) + + For an easier way to do the above, see + :func:`~sqlalchemy.orm.attributes.set_committed_value`. + + obj is an instrumented object instance. An InstanceState + is accepted directly for backwards compatibility but + this usage is deprecated. + + """ + state = instance_state(obj) + dict_ = state.dict + return init_state_collection(state, dict_, key) + + +def init_state_collection(state, dict_, key): + """Initialize a collection attribute and return the collection adapter.""" + + attr = state.manager[key].impl + user_data = attr.initialize(state, dict_) + return attr.get_collection(state, dict_, user_data) + + +def set_committed_value(instance, key, value): + """Set the value of an attribute with no history events. + + Cancels any previous history present. The value should be + a scalar value for scalar-holding attributes, or + an iterable for any collection-holding attribute. + + This is the same underlying method used when a lazy loader + fires off and loads additional data from the database. + In particular, this method can be used by application code + which has loaded additional attributes or collections through + separate queries, which can then be attached to an instance + as though it were part of its original loaded state. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + state.manager[key].impl.set_committed_value(state, dict_, value) + + +def set_attribute(instance, key, value): + """Set the value of an attribute, firing history events. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + Custom attribute management schemes will need to make usage + of this method to establish attribute state as understood + by SQLAlchemy. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + state.manager[key].impl.set(state, dict_, value, None) + + +def get_attribute(instance, key): + """Get the value of an attribute, firing any callables required. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + Custom attribute management schemes will need to make usage + of this method to make usage of attribute state as understood + by SQLAlchemy. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + return state.manager[key].impl.get(state, dict_) + + +def del_attribute(instance, key): + """Delete the value of an attribute, firing history events. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + Custom attribute management schemes will need to make usage + of this method to establish attribute state as understood + by SQLAlchemy. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + state.manager[key].impl.delete(state, dict_) + + +def flag_modified(instance, key): + """Mark an attribute on an instance as 'modified'. + + This sets the 'modified' flag on the instance and + establishes an unconditional change event for the given attribute. + + """ + state, dict_ = instance_state(instance), instance_dict(instance) + impl = state.manager[key].impl + state._modified_event(dict_, impl, NO_VALUE, force=True) diff --git a/app/lib/sqlalchemy/orm/base.py b/app/lib/sqlalchemy/orm/base.py new file mode 100644 index 0000000..1ad5603 --- /dev/null +++ b/app/lib/sqlalchemy/orm/base.py @@ -0,0 +1,540 @@ +# orm/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Constants and rudimental functions used throughout the ORM. + +""" + +from .. import util, inspection, exc as sa_exc +from ..sql import expression +from . import exc +import operator + +PASSIVE_NO_RESULT = util.symbol( + 'PASSIVE_NO_RESULT', + """Symbol returned by a loader callable or other attribute/history + retrieval operation when a value could not be determined, based + on loader callable flags. + """ +) + +ATTR_WAS_SET = util.symbol( + 'ATTR_WAS_SET', + """Symbol returned by a loader callable to indicate the + retrieved value, or values, were assigned to their attributes + on the target object. + """ +) + +ATTR_EMPTY = util.symbol( + 'ATTR_EMPTY', + """Symbol used internally to indicate an attribute had no callable.""" +) + +NO_VALUE = util.symbol( + 'NO_VALUE', + """Symbol which may be placed as the 'previous' value of an attribute, + indicating no value was loaded for an attribute when it was modified, + and flags indicated we were not to load it. + """ +) + +NEVER_SET = util.symbol( + 'NEVER_SET', + """Symbol which may be placed as the 'previous' value of an attribute + indicating that the attribute had not been assigned to previously. + """ +) + +NO_CHANGE = util.symbol( + "NO_CHANGE", + """No callables or SQL should be emitted on attribute access + and no state should change + """, canonical=0 +) + +CALLABLES_OK = util.symbol( + "CALLABLES_OK", + """Loader callables can be fired off if a value + is not present. + """, canonical=1 +) + +SQL_OK = util.symbol( + "SQL_OK", + """Loader callables can emit SQL at least on scalar value attributes.""", + canonical=2 +) + +RELATED_OBJECT_OK = util.symbol( + "RELATED_OBJECT_OK", + """Callables can use SQL to load related objects as well + as scalar value attributes. + """, canonical=4 +) + +INIT_OK = util.symbol( + "INIT_OK", + """Attributes should be initialized with a blank + value (None or an empty collection) upon get, if no other + value can be obtained. + """, canonical=8 +) + +NON_PERSISTENT_OK = util.symbol( + "NON_PERSISTENT_OK", + """Callables can be emitted if the parent is not persistent.""", + canonical=16 +) + +LOAD_AGAINST_COMMITTED = util.symbol( + "LOAD_AGAINST_COMMITTED", + """Callables should use committed values as primary/foreign keys during a + load. + """, canonical=32 +) + +NO_AUTOFLUSH = util.symbol( + "NO_AUTOFLUSH", + """Loader callables should disable autoflush.""", + canonical=64 +) + +# pre-packaged sets of flags used as inputs +PASSIVE_OFF = util.symbol( + "PASSIVE_OFF", + "Callables can be emitted in all cases.", + canonical=(RELATED_OBJECT_OK | NON_PERSISTENT_OK | + INIT_OK | CALLABLES_OK | SQL_OK) +) +PASSIVE_RETURN_NEVER_SET = util.symbol( + "PASSIVE_RETURN_NEVER_SET", + """PASSIVE_OFF ^ INIT_OK""", + canonical=PASSIVE_OFF ^ INIT_OK +) +PASSIVE_NO_INITIALIZE = util.symbol( + "PASSIVE_NO_INITIALIZE", + "PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK", + canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK +) +PASSIVE_NO_FETCH = util.symbol( + "PASSIVE_NO_FETCH", + "PASSIVE_OFF ^ SQL_OK", + canonical=PASSIVE_OFF ^ SQL_OK +) +PASSIVE_NO_FETCH_RELATED = util.symbol( + "PASSIVE_NO_FETCH_RELATED", + "PASSIVE_OFF ^ RELATED_OBJECT_OK", + canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK +) +PASSIVE_ONLY_PERSISTENT = util.symbol( + "PASSIVE_ONLY_PERSISTENT", + "PASSIVE_OFF ^ NON_PERSISTENT_OK", + canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK +) + +DEFAULT_MANAGER_ATTR = '_sa_class_manager' +DEFAULT_STATE_ATTR = '_sa_instance_state' +_INSTRUMENTOR = ('mapper', 'instrumentor') + +EXT_CONTINUE = util.symbol('EXT_CONTINUE') +EXT_STOP = util.symbol('EXT_STOP') + +ONETOMANY = util.symbol( + 'ONETOMANY', + """Indicates the one-to-many direction for a :func:`.relationship`. + + This symbol is typically used by the internals but may be exposed within + certain API features. + + """) + +MANYTOONE = util.symbol( + 'MANYTOONE', + """Indicates the many-to-one direction for a :func:`.relationship`. + + This symbol is typically used by the internals but may be exposed within + certain API features. + + """) + +MANYTOMANY = util.symbol( + 'MANYTOMANY', + """Indicates the many-to-many direction for a :func:`.relationship`. + + This symbol is typically used by the internals but may be exposed within + certain API features. + + """) + +NOT_EXTENSION = util.symbol( + 'NOT_EXTENSION', + """Symbol indicating an :class:`InspectionAttr` that's + not part of sqlalchemy.ext. + + Is assigned to the :attr:`.InspectionAttr.extension_type` + attibute. + + """) + +_never_set = frozenset([NEVER_SET]) + +_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT]) + +_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED") + +_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE") + + +def _generative(*assertions): + """Mark a method as generative, e.g. method-chained.""" + + @util.decorator + def generate(fn, *args, **kw): + self = args[0]._clone() + for assertion in assertions: + assertion(self, fn.__name__) + fn(self, *args[1:], **kw) + return self + return generate + + +# these can be replaced by sqlalchemy.ext.instrumentation +# if augmented class instrumentation is enabled. +def manager_of_class(cls): + return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None) + +instance_state = operator.attrgetter(DEFAULT_STATE_ATTR) + +instance_dict = operator.attrgetter('__dict__') + + +def instance_str(instance): + """Return a string describing an instance.""" + + return state_str(instance_state(instance)) + + +def state_str(state): + """Return a string describing an instance via its InstanceState.""" + + if state is None: + return "None" + else: + return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj())) + + +def state_class_str(state): + """Return a string describing an instance's class via its + InstanceState. + """ + + if state is None: + return "None" + else: + return '<%s>' % (state.class_.__name__, ) + + +def attribute_str(instance, attribute): + return instance_str(instance) + "." + attribute + + +def state_attribute_str(state, attribute): + return state_str(state) + "." + attribute + + +def object_mapper(instance): + """Given an object, return the primary Mapper associated with the object + instance. + + Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` + if no mapping is configured. + + This function is available via the inspection system as:: + + inspect(instance).mapper + + Using the inspection system will raise + :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is + not part of a mapping. + + """ + return object_state(instance).mapper + + +def object_state(instance): + """Given an object, return the :class:`.InstanceState` + associated with the object. + + Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` + if no mapping is configured. + + Equivalent functionality is available via the :func:`.inspect` + function as:: + + inspect(instance) + + Using the inspection system will raise + :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is + not part of a mapping. + + """ + state = _inspect_mapped_object(instance) + if state is None: + raise exc.UnmappedInstanceError(instance) + else: + return state + + +@inspection._inspects(object) +def _inspect_mapped_object(instance): + try: + return instance_state(instance) + # TODO: whats the py-2/3 syntax to catch two + # different kinds of exceptions at once ? + except exc.UnmappedClassError: + return None + except exc.NO_STATE: + return None + + +def _class_to_mapper(class_or_mapper): + insp = inspection.inspect(class_or_mapper, False) + if insp is not None: + return insp.mapper + else: + raise exc.UnmappedClassError(class_or_mapper) + + +def _mapper_or_none(entity): + """Return the :class:`.Mapper` for the given class or None if the + class is not mapped. + """ + + insp = inspection.inspect(entity, False) + if insp is not None: + return insp.mapper + else: + return None + + +def _is_mapped_class(entity): + """Return True if the given object is a mapped class, + :class:`.Mapper`, or :class:`.AliasedClass`. + """ + + insp = inspection.inspect(entity, False) + return insp is not None and \ + not insp.is_clause_element and \ + ( + insp.is_mapper or insp.is_aliased_class + ) + + +def _attr_as_key(attr): + if hasattr(attr, 'key'): + return attr.key + else: + return expression._column_as_key(attr) + + +def _orm_columns(entity): + insp = inspection.inspect(entity, False) + if hasattr(insp, 'selectable') and hasattr(insp.selectable, 'c'): + return [c for c in insp.selectable.c] + else: + return [entity] + + +def _is_aliased_class(entity): + insp = inspection.inspect(entity, False) + return insp is not None and \ + getattr(insp, "is_aliased_class", False) + + +def _entity_descriptor(entity, key): + """Return a class attribute given an entity and string name. + + May return :class:`.InstrumentedAttribute` or user-defined + attribute. + + """ + insp = inspection.inspect(entity) + if insp.is_selectable: + description = entity + entity = insp.c + elif insp.is_aliased_class: + entity = insp.entity + description = entity + elif hasattr(insp, "mapper"): + description = entity = insp.mapper.class_ + else: + description = entity + + try: + return getattr(entity, key) + except AttributeError: + raise sa_exc.InvalidRequestError( + "Entity '%s' has no property '%s'" % + (description, key) + ) + +_state_mapper = util.dottedgetter('manager.mapper') + + +@inspection._inspects(type) +def _inspect_mapped_class(class_, configure=False): + try: + class_manager = manager_of_class(class_) + if not class_manager.is_mapped: + return None + mapper = class_manager.mapper + except exc.NO_STATE: + return None + else: + if configure and mapper._new_mappers: + mapper._configure_all() + return mapper + + +def class_mapper(class_, configure=True): + """Given a class, return the primary :class:`.Mapper` associated + with the key. + + Raises :exc:`.UnmappedClassError` if no mapping is configured + on the given class, or :exc:`.ArgumentError` if a non-class + object is passed. + + Equivalent functionality is available via the :func:`.inspect` + function as:: + + inspect(some_mapped_class) + + Using the inspection system will raise + :class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped. + + """ + mapper = _inspect_mapped_class(class_, configure=configure) + if mapper is None: + if not isinstance(class_, type): + raise sa_exc.ArgumentError( + "Class object expected, got '%r'." % (class_, )) + raise exc.UnmappedClassError(class_) + else: + return mapper + + +class InspectionAttr(object): + """A base class applied to all ORM objects that can be returned + by the :func:`.inspect` function. + + The attributes defined here allow the usage of simple boolean + checks to test basic facts about the object returned. + + While the boolean checks here are basically the same as using + the Python isinstance() function, the flags here can be used without + the need to import all of these classes, and also such that + the SQLAlchemy class system can change while leaving the flags + here intact for forwards-compatibility. + + """ + __slots__ = () + + is_selectable = False + """Return True if this object is an instance of :class:`.Selectable`.""" + + is_aliased_class = False + """True if this object is an instance of :class:`.AliasedClass`.""" + + is_instance = False + """True if this object is an instance of :class:`.InstanceState`.""" + + is_mapper = False + """True if this object is an instance of :class:`.Mapper`.""" + + is_property = False + """True if this object is an instance of :class:`.MapperProperty`.""" + + is_attribute = False + """True if this object is a Python :term:`descriptor`. + + This can refer to one of many types. Usually a + :class:`.QueryableAttribute` which handles attributes events on behalf + of a :class:`.MapperProperty`. But can also be an extension type + such as :class:`.AssociationProxy` or :class:`.hybrid_property`. + The :attr:`.InspectionAttr.extension_type` will refer to a constant + identifying the specific subtype. + + .. seealso:: + + :attr:`.Mapper.all_orm_descriptors` + + """ + + is_clause_element = False + """True if this object is an instance of :class:`.ClauseElement`.""" + + extension_type = NOT_EXTENSION + """The extension type, if any. + Defaults to :data:`.interfaces.NOT_EXTENSION` + + .. versionadded:: 0.8.0 + + .. seealso:: + + :data:`.HYBRID_METHOD` + + :data:`.HYBRID_PROPERTY` + + :data:`.ASSOCIATION_PROXY` + + """ + + +class InspectionAttrInfo(InspectionAttr): + """Adds the ``.info`` attribute to :class:`.InspectionAttr`. + + The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo` + is that the former is compatible as a mixin for classes that specify + ``__slots__``; this is essentially an implementation artifact. + + """ + + @util.memoized_property + def info(self): + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`.relationship`, or :func:`.composite` + functions. + + .. versionadded:: 0.8 Added support for .info to all + :class:`.MapperProperty` subclasses. + + .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also + available on extension types via the + :attr:`.InspectionAttrInfo.info` attribute, so that it can apply + to a wider variety of ORM and extension constructs. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + return {} + + +class _MappedAttribute(object): + """Mixin for attributes which should be replaced by mapper-assigned + attributes. + + """ + __slots__ = () diff --git a/app/lib/sqlalchemy/orm/collections.py b/app/lib/sqlalchemy/orm/collections.py new file mode 100644 index 0000000..2bb53e6 --- /dev/null +++ b/app/lib/sqlalchemy/orm/collections.py @@ -0,0 +1,1552 @@ +# orm/collections.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Support for collections of mapped entities. + +The collections package supplies the machinery used to inform the ORM of +collection membership changes. An instrumentation via decoration approach is +used, allowing arbitrary types (including built-ins) to be used as entity +collections without requiring inheritance from a base class. + +Instrumentation decoration relays membership change events to the +:class:`.CollectionAttributeImpl` that is currently managing the collection. +The decorators observe function call arguments and return values, tracking +entities entering or leaving the collection. Two decorator approaches are +provided. One is a bundle of generic decorators that map function arguments +and return values to events:: + + from sqlalchemy.orm.collections import collection + class MyClass(object): + # ... + + @collection.adds(1) + def store(self, item): + self.data.append(item) + + @collection.removes_return() + def pop(self): + return self.data.pop() + + +The second approach is a bundle of targeted decorators that wrap appropriate +append and remove notifiers around the mutation methods present in the +standard Python ``list``, ``set`` and ``dict`` interfaces. These could be +specified in terms of generic decorator recipes, but are instead hand-tooled +for increased efficiency. The targeted decorators occasionally implement +adapter-like behavior, such as mapping bulk-set methods (``extend``, +``update``, ``__setslice__``, etc.) into the series of atomic mutation events +that the ORM requires. + +The targeted decorators are used internally for automatic instrumentation of +entity collection classes. Every collection class goes through a +transformation process roughly like so: + +1. If the class is a built-in, substitute a trivial sub-class +2. Is this class already instrumented? +3. Add in generic decorators +4. Sniff out the collection interface through duck-typing +5. Add targeted decoration to any undecorated interface method + +This process modifies the class at runtime, decorating methods and adding some +bookkeeping properties. This isn't possible (or desirable) for built-in +classes like ``list``, so trivial sub-classes are substituted to hold +decoration:: + + class InstrumentedList(list): + pass + +Collection classes can be specified in ``relationship(collection_class=)`` as +types or a function that returns an instance. Collection classes are +inspected and instrumented during the mapper compilation phase. The +collection_class callable will be executed once to produce a specimen +instance, and the type of that specimen will be instrumented. Functions that +return built-in types like ``lists`` will be adapted to produce instrumented +instances. + +When extending a known type like ``list``, additional decorations are not +generally not needed. Odds are, the extension method will delegate to a +method that's already instrumented. For example:: + + class QueueIsh(list): + def push(self, item): + self.append(item) + def shift(self): + return self.pop(0) + +There's no need to decorate these methods. ``append`` and ``pop`` are already +instrumented as part of the ``list`` interface. Decorating them would fire +duplicate events, which should be avoided. + +The targeted decoration tries not to rely on other methods in the underlying +collection class, but some are unavoidable. Many depend on 'read' methods +being present to properly instrument a 'write', for example, ``__setitem__`` +needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also +reimplemented in terms of atomic appends and removes, so the ``extend`` +decoration will actually perform many ``append`` operations and not call the +underlying method at all. + +Tight control over bulk operation and the firing of events is also possible by +implementing the instrumentation internally in your methods. The basic +instrumentation package works under the general assumption that collection +mutation will not raise unusual exceptions. If you want to closely +orchestrate append and remove events with exception management, internal +instrumentation may be the answer. Within your method, +``collection_adapter(self)`` will retrieve an object that you can use for +explicit control over triggering append and remove events. + +The owning object and :class:`.CollectionAttributeImpl` are also reachable +through the adapter, allowing for some very sophisticated behavior. + +""" + +import inspect +import operator +import weakref + +from ..sql import expression +from .. import util, exc as sa_exc +from . import base + +from sqlalchemy.util.compat import inspect_getargspec + +__all__ = ['collection', 'collection_adapter', + 'mapped_collection', 'column_mapped_collection', + 'attribute_mapped_collection'] + +__instrumentation_mutex = util.threading.Lock() + + +class _PlainColumnGetter(object): + """Plain column getter, stores collection of Column objects + directly. + + Serializes to a :class:`._SerializableColumnGetterV2` + which has more expensive __call__() performance + and some rare caveats. + + """ + + def __init__(self, cols): + self.cols = cols + self.composite = len(cols) > 1 + + def __reduce__(self): + return _SerializableColumnGetterV2._reduce_from_cols(self.cols) + + def _cols(self, mapper): + return self.cols + + def __call__(self, value): + state = base.instance_state(value) + m = base._state_mapper(state) + + key = [ + m._get_state_attr_by_column(state, state.dict, col) + for col in self._cols(m) + ] + + if self.composite: + return tuple(key) + else: + return key[0] + + +class _SerializableColumnGetter(object): + """Column-based getter used in version 0.7.6 only. + + Remains here for pickle compatibility with 0.7.6. + + """ + + def __init__(self, colkeys): + self.colkeys = colkeys + self.composite = len(colkeys) > 1 + + def __reduce__(self): + return _SerializableColumnGetter, (self.colkeys,) + + def __call__(self, value): + state = base.instance_state(value) + m = base._state_mapper(state) + key = [m._get_state_attr_by_column( + state, state.dict, + m.mapped_table.columns[k]) + for k in self.colkeys] + if self.composite: + return tuple(key) + else: + return key[0] + + +class _SerializableColumnGetterV2(_PlainColumnGetter): + """Updated serializable getter which deals with + multi-table mapped classes. + + Two extremely unusual cases are not supported. + Mappings which have tables across multiple metadata + objects, or which are mapped to non-Table selectables + linked across inheriting mappers may fail to function + here. + + """ + + def __init__(self, colkeys): + self.colkeys = colkeys + self.composite = len(colkeys) > 1 + + def __reduce__(self): + return self.__class__, (self.colkeys,) + + @classmethod + def _reduce_from_cols(cls, cols): + def _table_key(c): + if not isinstance(c.table, expression.TableClause): + return None + else: + return c.table.key + colkeys = [(c.key, _table_key(c)) for c in cols] + return _SerializableColumnGetterV2, (colkeys,) + + def _cols(self, mapper): + cols = [] + metadata = getattr(mapper.local_table, 'metadata', None) + for (ckey, tkey) in self.colkeys: + if tkey is None or \ + metadata is None or \ + tkey not in metadata: + cols.append(mapper.local_table.c[ckey]) + else: + cols.append(metadata.tables[tkey].c[ckey]) + return cols + + +def column_mapped_collection(mapping_spec): + """A dictionary-based collection type with column-based keying. + + Returns a :class:`.MappedCollection` factory with a keying function + generated from mapping_spec, which may be a Column or a sequence + of Columns. + + The key value must be immutable for the lifetime of the object. You + can not, for example, map on foreign key values if those key values will + change during the session, i.e. from None to a database-assigned integer + after a session flush. + + """ + cols = [expression._only_column_elements(q, "mapping_spec") + for q in util.to_list(mapping_spec) + ] + keyfunc = _PlainColumnGetter(cols) + return lambda: MappedCollection(keyfunc) + + +class _SerializableAttrGetter(object): + def __init__(self, name): + self.name = name + self.getter = operator.attrgetter(name) + + def __call__(self, target): + return self.getter(target) + + def __reduce__(self): + return _SerializableAttrGetter, (self.name, ) + + +def attribute_mapped_collection(attr_name): + """A dictionary-based collection type with attribute-based keying. + + Returns a :class:`.MappedCollection` factory with a keying based on the + 'attr_name' attribute of entities in the collection, where ``attr_name`` + is the string name of the attribute. + + The key value must be immutable for the lifetime of the object. You + can not, for example, map on foreign key values if those key values will + change during the session, i.e. from None to a database-assigned integer + after a session flush. + + """ + getter = _SerializableAttrGetter(attr_name) + return lambda: MappedCollection(getter) + + +def mapped_collection(keyfunc): + """A dictionary-based collection type with arbitrary keying. + + Returns a :class:`.MappedCollection` factory with a keying function + generated from keyfunc, a callable that takes an entity and returns a + key value. + + The key value must be immutable for the lifetime of the object. You + can not, for example, map on foreign key values if those key values will + change during the session, i.e. from None to a database-assigned integer + after a session flush. + + """ + return lambda: MappedCollection(keyfunc) + + +class collection(object): + """Decorators for entity collection classes. + + The decorators fall into two groups: annotations and interception recipes. + + The annotating decorators (appender, remover, iterator, linker, converter, + internally_instrumented) indicate the method's purpose and take no + arguments. They are not written with parens:: + + @collection.appender + def append(self, append): ... + + The recipe decorators all require parens, even those that take no + arguments:: + + @collection.adds('entity') + def insert(self, position, entity): ... + + @collection.removes_return() + def popitem(self): ... + + """ + # Bundled as a class solely for ease of use: packaging, doc strings, + # importability. + + @staticmethod + def appender(fn): + """Tag the method as the collection appender. + + The appender method is called with one positional argument: the value + to append. The method will be automatically decorated with 'adds(1)' + if not already decorated:: + + @collection.appender + def add(self, append): ... + + # or, equivalently + @collection.appender + @collection.adds(1) + def add(self, append): ... + + # for mapping type, an 'append' may kick out a previous value + # that occupies that slot. consider d['a'] = 'foo'- any previous + # value in d['a'] is discarded. + @collection.appender + @collection.replaces(1) + def add(self, entity): + key = some_key_func(entity) + previous = None + if key in self: + previous = self[key] + self[key] = entity + return previous + + If the value to append is not allowed in the collection, you may + raise an exception. Something to remember is that the appender + will be called for each object mapped by a database query. If the + database contains rows that violate your collection semantics, you + will need to get creative to fix the problem, as access via the + collection will not work. + + If the appender method is internally instrumented, you must also + receive the keyword argument '_sa_initiator' and ensure its + promulgation to collection events. + + """ + fn._sa_instrument_role = 'appender' + return fn + + @staticmethod + def remover(fn): + """Tag the method as the collection remover. + + The remover method is called with one positional argument: the value + to remove. The method will be automatically decorated with + :meth:`removes_return` if not already decorated:: + + @collection.remover + def zap(self, entity): ... + + # or, equivalently + @collection.remover + @collection.removes_return() + def zap(self, ): ... + + If the value to remove is not present in the collection, you may + raise an exception or return None to ignore the error. + + If the remove method is internally instrumented, you must also + receive the keyword argument '_sa_initiator' and ensure its + promulgation to collection events. + + """ + fn._sa_instrument_role = 'remover' + return fn + + @staticmethod + def iterator(fn): + """Tag the method as the collection remover. + + The iterator method is called with no arguments. It is expected to + return an iterator over all collection members:: + + @collection.iterator + def __iter__(self): ... + + """ + fn._sa_instrument_role = 'iterator' + return fn + + @staticmethod + def internally_instrumented(fn): + """Tag the method as instrumented. + + This tag will prevent any decoration from being applied to the + method. Use this if you are orchestrating your own calls to + :func:`.collection_adapter` in one of the basic SQLAlchemy + interface methods, or to prevent an automatic ABC method + decoration from wrapping your implementation:: + + # normally an 'extend' method on a list-like class would be + # automatically intercepted and re-implemented in terms of + # SQLAlchemy events and append(). your implementation will + # never be called, unless: + @collection.internally_instrumented + def extend(self, items): ... + + """ + fn._sa_instrumented = True + return fn + + @staticmethod + def linker(fn): + """Tag the method as a "linked to attribute" event handler. + + This optional event handler will be called when the collection class + is linked to or unlinked from the InstrumentedAttribute. It is + invoked immediately after the '_sa_adapter' property is set on + the instance. A single argument is passed: the collection adapter + that has been linked, or None if unlinking. + + .. deprecated:: 1.0.0 - the :meth:`.collection.linker` handler + is superseded by the :meth:`.AttributeEvents.init_collection` + and :meth:`.AttributeEvents.dispose_collection` handlers. + + """ + fn._sa_instrument_role = 'linker' + return fn + + link = linker + """deprecated; synonym for :meth:`.collection.linker`.""" + + @staticmethod + def converter(fn): + """Tag the method as the collection converter. + + This optional method will be called when a collection is being + replaced entirely, as in:: + + myobj.acollection = [newvalue1, newvalue2] + + The converter method will receive the object being assigned and should + return an iterable of values suitable for use by the ``appender`` + method. A converter must not assign values or mutate the collection, + its sole job is to adapt the value the user provides into an iterable + of values for the ORM's use. + + The default converter implementation will use duck-typing to do the + conversion. A dict-like collection will be convert into an iterable + of dictionary values, and other types will simply be iterated:: + + @collection.converter + def convert(self, other): ... + + If the duck-typing of the object does not match the type of this + collection, a TypeError is raised. + + Supply an implementation of this method if you want to expand the + range of possible types that can be assigned in bulk or perform + validation on the values about to be assigned. + + """ + fn._sa_instrument_role = 'converter' + return fn + + @staticmethod + def adds(arg): + """Mark the method as adding an entity to the collection. + + Adds "add to collection" handling to the method. The decorator + argument indicates which method argument holds the SQLAlchemy-relevant + value. Arguments can be specified positionally (i.e. integer) or by + name:: + + @collection.adds(1) + def push(self, item): ... + + @collection.adds('entity') + def do_stuff(self, thing, entity=None): ... + + """ + def decorator(fn): + fn._sa_instrument_before = ('fire_append_event', arg) + return fn + return decorator + + @staticmethod + def replaces(arg): + """Mark the method as replacing an entity in the collection. + + Adds "add to collection" and "remove from collection" handling to + the method. The decorator argument indicates which method argument + holds the SQLAlchemy-relevant value to be added, and return value, if + any will be considered the value to remove. + + Arguments can be specified positionally (i.e. integer) or by name:: + + @collection.replaces(2) + def __setitem__(self, index, item): ... + + """ + def decorator(fn): + fn._sa_instrument_before = ('fire_append_event', arg) + fn._sa_instrument_after = 'fire_remove_event' + return fn + return decorator + + @staticmethod + def removes(arg): + """Mark the method as removing an entity in the collection. + + Adds "remove from collection" handling to the method. The decorator + argument indicates which method argument holds the SQLAlchemy-relevant + value to be removed. Arguments can be specified positionally (i.e. + integer) or by name:: + + @collection.removes(1) + def zap(self, item): ... + + For methods where the value to remove is not known at call-time, use + collection.removes_return. + + """ + def decorator(fn): + fn._sa_instrument_before = ('fire_remove_event', arg) + return fn + return decorator + + @staticmethod + def removes_return(): + """Mark the method as removing an entity in the collection. + + Adds "remove from collection" handling to the method. The return + value of the method, if any, is considered the value to remove. The + method arguments are not inspected:: + + @collection.removes_return() + def pop(self): ... + + For methods where the value to remove is known at call-time, use + collection.remove. + + """ + def decorator(fn): + fn._sa_instrument_after = 'fire_remove_event' + return fn + return decorator + + +collection_adapter = operator.attrgetter('_sa_adapter') +"""Fetch the :class:`.CollectionAdapter` for a collection.""" + + +class CollectionAdapter(object): + """Bridges between the ORM and arbitrary Python collections. + + Proxies base-level collection operations (append, remove, iterate) + to the underlying Python collection, and emits add/remove events for + entities entering or leaving the collection. + + The ORM uses :class:`.CollectionAdapter` exclusively for interaction with + entity collections. + + + """ + + __slots__ = ( + 'attr', '_key', '_data', 'owner_state', '_converter', 'invalidated') + + def __init__(self, attr, owner_state, data): + self.attr = attr + self._key = attr.key + self._data = weakref.ref(data) + self.owner_state = owner_state + data._sa_adapter = self + self._converter = data._sa_converter + self.invalidated = False + + def _warn_invalidated(self): + util.warn("This collection has been invalidated.") + + @property + def data(self): + "The entity collection being adapted." + return self._data() + + @property + def _referenced_by_owner(self): + """return True if the owner state still refers to this collection. + + This will return False within a bulk replace operation, + where this collection is the one being replaced. + + """ + return self.owner_state.dict[self._key] is self._data() + + def bulk_appender(self): + return self._data()._sa_appender + + def append_with_event(self, item, initiator=None): + """Add an entity to the collection, firing mutation events.""" + + self._data()._sa_appender(item, _sa_initiator=initiator) + + def append_without_event(self, item): + """Add or restore an entity to the collection, firing no events.""" + self._data()._sa_appender(item, _sa_initiator=False) + + def append_multiple_without_event(self, items): + """Add or restore an entity to the collection, firing no events.""" + appender = self._data()._sa_appender + for item in items: + appender(item, _sa_initiator=False) + + def bulk_remover(self): + return self._data()._sa_remover + + def remove_with_event(self, item, initiator=None): + """Remove an entity from the collection, firing mutation events.""" + self._data()._sa_remover(item, _sa_initiator=initiator) + + def remove_without_event(self, item): + """Remove an entity from the collection, firing no events.""" + self._data()._sa_remover(item, _sa_initiator=False) + + def clear_with_event(self, initiator=None): + """Empty the collection, firing a mutation event for each entity.""" + + remover = self._data()._sa_remover + for item in list(self): + remover(item, _sa_initiator=initiator) + + def clear_without_event(self): + """Empty the collection, firing no events.""" + + remover = self._data()._sa_remover + for item in list(self): + remover(item, _sa_initiator=False) + + def __iter__(self): + """Iterate over entities in the collection.""" + + return iter(self._data()._sa_iterator()) + + def __len__(self): + """Count entities in the collection.""" + return len(list(self._data()._sa_iterator())) + + def __bool__(self): + return True + + __nonzero__ = __bool__ + + def fire_append_event(self, item, initiator=None): + """Notify that a entity has entered the collection. + + Initiator is a token owned by the InstrumentedAttribute that + initiated the membership mutation, and should be left as None + unless you are passing along an initiator value from a chained + operation. + + """ + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + return self.attr.fire_append_event( + self.owner_state, + self.owner_state.dict, + item, initiator) + else: + return item + + def fire_remove_event(self, item, initiator=None): + """Notify that a entity has been removed from the collection. + + Initiator is the InstrumentedAttribute that initiated the membership + mutation, and should be left as None unless you are passing along + an initiator value from a chained operation. + + """ + if initiator is not False: + if self.invalidated: + self._warn_invalidated() + self.attr.fire_remove_event( + self.owner_state, + self.owner_state.dict, + item, initiator) + + def fire_pre_remove_event(self, initiator=None): + """Notify that an entity is about to be removed from the collection. + + Only called if the entity cannot be removed after calling + fire_remove_event(). + + """ + if self.invalidated: + self._warn_invalidated() + self.attr.fire_pre_remove_event( + self.owner_state, + self.owner_state.dict, + initiator=initiator) + + def __getstate__(self): + return {'key': self._key, + 'owner_state': self.owner_state, + 'owner_cls': self.owner_state.class_, + 'data': self.data, + 'invalidated': self.invalidated} + + def __setstate__(self, d): + self._key = d['key'] + self.owner_state = d['owner_state'] + self._data = weakref.ref(d['data']) + self._converter = d['data']._sa_converter + d['data']._sa_adapter = self + self.invalidated = d['invalidated'] + self.attr = getattr(d['owner_cls'], self._key).impl + + +def bulk_replace(values, existing_adapter, new_adapter): + """Load a new collection, firing events based on prior like membership. + + Appends instances in ``values`` onto the ``new_adapter``. Events will be + fired for any instance not present in the ``existing_adapter``. Any + instances in ``existing_adapter`` not present in ``values`` will have + remove events fired upon them. + + :param values: An iterable of collection member instances + + :param existing_adapter: A :class:`.CollectionAdapter` of + instances to be replaced + + :param new_adapter: An empty :class:`.CollectionAdapter` + to load with ``values`` + + + """ + + assert isinstance(values, list) + + idset = util.IdentitySet + existing_idset = idset(existing_adapter or ()) + constants = existing_idset.intersection(values or ()) + additions = idset(values or ()).difference(constants) + removals = existing_idset.difference(constants) + + appender = new_adapter.bulk_appender() + + for member in values or (): + if member in additions: + appender(member) + elif member in constants: + appender(member, _sa_initiator=False) + + if existing_adapter: + remover = existing_adapter.bulk_remover() + for member in removals: + remover(member) + + +def prepare_instrumentation(factory): + """Prepare a callable for future use as a collection class factory. + + Given a collection class factory (either a type or no-arg callable), + return another factory that will produce compatible instances when + called. + + This function is responsible for converting collection_class=list + into the run-time behavior of collection_class=InstrumentedList. + + """ + # Convert a builtin to 'Instrumented*' + if factory in __canned_instrumentation: + factory = __canned_instrumentation[factory] + + # Create a specimen + cls = type(factory()) + + # Did factory callable return a builtin? + if cls in __canned_instrumentation: + # Wrap it so that it returns our 'Instrumented*' + factory = __converting_factory(cls, factory) + cls = factory() + + # Instrument the class if needed. + if __instrumentation_mutex.acquire(): + try: + if getattr(cls, '_sa_instrumented', None) != id(cls): + _instrument_class(cls) + finally: + __instrumentation_mutex.release() + + return factory + + +def __converting_factory(specimen_cls, original_factory): + """Return a wrapper that converts a "canned" collection like + set, dict, list into the Instrumented* version. + + """ + + instrumented_cls = __canned_instrumentation[specimen_cls] + + def wrapper(): + collection = original_factory() + return instrumented_cls(collection) + + # often flawed but better than nothing + wrapper.__name__ = "%sWrapper" % original_factory.__name__ + wrapper.__doc__ = original_factory.__doc__ + + return wrapper + + +def _instrument_class(cls): + """Modify methods in a class and install instrumentation.""" + + # In the normal call flow, a request for any of the 3 basic collection + # types is transformed into one of our trivial subclasses + # (e.g. InstrumentedList). Catch anything else that sneaks in here... + if cls.__module__ == '__builtin__': + raise sa_exc.ArgumentError( + "Can not instrument a built-in type. Use a " + "subclass, even a trivial one.") + + roles, methods = _locate_roles_and_methods(cls) + + _setup_canned_roles(cls, roles, methods) + + _assert_required_roles(cls, roles, methods) + + _set_collection_attributes(cls, roles, methods) + + +def _locate_roles_and_methods(cls): + """search for _sa_instrument_role-decorated methods in + method resolution order, assign to roles. + + """ + + roles = {} + methods = {} + + for supercls in cls.__mro__: + for name, method in vars(supercls).items(): + if not util.callable(method): + continue + + # note role declarations + if hasattr(method, '_sa_instrument_role'): + role = method._sa_instrument_role + assert role in ('appender', 'remover', 'iterator', + 'linker', 'converter') + roles.setdefault(role, name) + + # transfer instrumentation requests from decorated function + # to the combined queue + before, after = None, None + if hasattr(method, '_sa_instrument_before'): + op, argument = method._sa_instrument_before + assert op in ('fire_append_event', 'fire_remove_event') + before = op, argument + if hasattr(method, '_sa_instrument_after'): + op = method._sa_instrument_after + assert op in ('fire_append_event', 'fire_remove_event') + after = op + if before: + methods[name] = before + (after, ) + elif after: + methods[name] = None, None, after + return roles, methods + + +def _setup_canned_roles(cls, roles, methods): + """see if this class has "canned" roles based on a known + collection type (dict, set, list). Apply those roles + as needed to the "roles" dictionary, and also + prepare "decorator" methods + + """ + collection_type = util.duck_type_collection(cls) + if collection_type in __interfaces: + canned_roles, decorators = __interfaces[collection_type] + for role, name in canned_roles.items(): + roles.setdefault(role, name) + + # apply ABC auto-decoration to methods that need it + for method, decorator in decorators.items(): + fn = getattr(cls, method, None) + if (fn and method not in methods and + not hasattr(fn, '_sa_instrumented')): + setattr(cls, method, decorator(fn)) + + +def _assert_required_roles(cls, roles, methods): + """ensure all roles are present, and apply implicit instrumentation if + needed + + """ + if 'appender' not in roles or not hasattr(cls, roles['appender']): + raise sa_exc.ArgumentError( + "Type %s must elect an appender method to be " + "a collection class" % cls.__name__) + elif (roles['appender'] not in methods and + not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')): + methods[roles['appender']] = ('fire_append_event', 1, None) + + if 'remover' not in roles or not hasattr(cls, roles['remover']): + raise sa_exc.ArgumentError( + "Type %s must elect a remover method to be " + "a collection class" % cls.__name__) + elif (roles['remover'] not in methods and + not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')): + methods[roles['remover']] = ('fire_remove_event', 1, None) + + if 'iterator' not in roles or not hasattr(cls, roles['iterator']): + raise sa_exc.ArgumentError( + "Type %s must elect an iterator method to be " + "a collection class" % cls.__name__) + + +def _set_collection_attributes(cls, roles, methods): + """apply ad-hoc instrumentation from decorators, class-level defaults + and implicit role declarations + + """ + for method_name, (before, argument, after) in methods.items(): + setattr(cls, method_name, + _instrument_membership_mutator(getattr(cls, method_name), + before, argument, after)) + # intern the role map + for role, method_name in roles.items(): + setattr(cls, '_sa_%s' % role, getattr(cls, method_name)) + + cls._sa_adapter = None + + if not hasattr(cls, '_sa_converter'): + cls._sa_converter = None + cls._sa_instrumented = id(cls) + + +def _instrument_membership_mutator(method, before, argument, after): + """Route method args and/or return value through the collection + adapter.""" + # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' + if before: + fn_args = list(util.flatten_iterator(inspect_getargspec(method)[0])) + if isinstance(argument, int): + pos_arg = argument + named_arg = len(fn_args) > argument and fn_args[argument] or None + else: + if argument in fn_args: + pos_arg = fn_args.index(argument) + else: + pos_arg = None + named_arg = argument + del fn_args + + def wrapper(*args, **kw): + if before: + if pos_arg is None: + if named_arg not in kw: + raise sa_exc.ArgumentError( + "Missing argument %s" % argument) + value = kw[named_arg] + else: + if len(args) > pos_arg: + value = args[pos_arg] + elif named_arg in kw: + value = kw[named_arg] + else: + raise sa_exc.ArgumentError( + "Missing argument %s" % argument) + + initiator = kw.pop('_sa_initiator', None) + if initiator is False: + executor = None + else: + executor = args[0]._sa_adapter + + if before and executor: + getattr(executor, before)(value, initiator) + + if not after or not executor: + return method(*args, **kw) + else: + res = method(*args, **kw) + if res is not None: + getattr(executor, after)(res, initiator) + return res + + wrapper._sa_instrumented = True + if hasattr(method, "_sa_instrument_role"): + wrapper._sa_instrument_role = method._sa_instrument_role + wrapper.__name__ = method.__name__ + wrapper.__doc__ = method.__doc__ + return wrapper + + +def __set(collection, item, _sa_initiator=None): + """Run set events, may eventually be inlined into decorators.""" + + if _sa_initiator is not False: + executor = collection._sa_adapter + if executor: + item = executor.fire_append_event(item, _sa_initiator) + return item + + +def __del(collection, item, _sa_initiator=None): + """Run del events, may eventually be inlined into decorators.""" + if _sa_initiator is not False: + executor = collection._sa_adapter + if executor: + executor.fire_remove_event(item, _sa_initiator) + + +def __before_delete(collection, _sa_initiator=None): + """Special method to run 'commit existing value' methods""" + executor = collection._sa_adapter + if executor: + executor.fire_pre_remove_event(_sa_initiator) + + +def _list_decorators(): + """Tailored instrumentation wrappers for any list-like class.""" + + def _tidy(fn): + fn._sa_instrumented = True + fn.__doc__ = getattr(list, fn.__name__).__doc__ + + def append(fn): + def append(self, item, _sa_initiator=None): + item = __set(self, item, _sa_initiator) + fn(self, item) + _tidy(append) + return append + + def remove(fn): + def remove(self, value, _sa_initiator=None): + __before_delete(self, _sa_initiator) + # testlib.pragma exempt:__eq__ + fn(self, value) + __del(self, value, _sa_initiator) + _tidy(remove) + return remove + + def insert(fn): + def insert(self, index, value): + value = __set(self, value) + fn(self, index, value) + _tidy(insert) + return insert + + def __setitem__(fn): + def __setitem__(self, index, value): + if not isinstance(index, slice): + existing = self[index] + if existing is not None: + __del(self, existing) + value = __set(self, value) + fn(self, index, value) + else: + # slice assignment requires __delitem__, insert, __len__ + step = index.step or 1 + start = index.start or 0 + if start < 0: + start += len(self) + if index.stop is not None: + stop = index.stop + else: + stop = len(self) + if stop < 0: + stop += len(self) + + if step == 1: + for i in range(start, stop, step): + if len(self) > start: + del self[start] + + for i, item in enumerate(value): + self.insert(i + start, item) + else: + rng = list(range(start, stop, step)) + if len(value) != len(rng): + raise ValueError( + "attempt to assign sequence of size %s to " + "extended slice of size %s" % (len(value), + len(rng))) + for i, item in zip(rng, value): + self.__setitem__(i, item) + _tidy(__setitem__) + return __setitem__ + + def __delitem__(fn): + def __delitem__(self, index): + if not isinstance(index, slice): + item = self[index] + __del(self, item) + fn(self, index) + else: + # slice deletion requires __getslice__ and a slice-groking + # __getitem__ for stepped deletion + # note: not breaking this into atomic dels + for item in self[index]: + __del(self, item) + fn(self, index) + _tidy(__delitem__) + return __delitem__ + + if util.py2k: + def __setslice__(fn): + def __setslice__(self, start, end, values): + for value in self[start:end]: + __del(self, value) + values = [__set(self, value) for value in values] + fn(self, start, end, values) + _tidy(__setslice__) + return __setslice__ + + def __delslice__(fn): + def __delslice__(self, start, end): + for value in self[start:end]: + __del(self, value) + fn(self, start, end) + _tidy(__delslice__) + return __delslice__ + + def extend(fn): + def extend(self, iterable): + for value in iterable: + self.append(value) + _tidy(extend) + return extend + + def __iadd__(fn): + def __iadd__(self, iterable): + # list.__iadd__ takes any iterable and seems to let TypeError + # raise as-is instead of returning NotImplemented + for value in iterable: + self.append(value) + return self + _tidy(__iadd__) + return __iadd__ + + def pop(fn): + def pop(self, index=-1): + __before_delete(self) + item = fn(self, index) + __del(self, item) + return item + _tidy(pop) + return pop + + if not util.py2k: + def clear(fn): + def clear(self, index=-1): + for item in self: + __del(self, item) + fn(self) + _tidy(clear) + return clear + + # __imul__ : not wrapping this. all members of the collection are already + # present, so no need to fire appends... wrapping it with an explicit + # decorator is still possible, so events on *= can be had if they're + # desired. hard to imagine a use case for __imul__, though. + + l = locals().copy() + l.pop('_tidy') + return l + + +def _dict_decorators(): + """Tailored instrumentation wrappers for any dict-like mapping class.""" + + def _tidy(fn): + fn._sa_instrumented = True + fn.__doc__ = getattr(dict, fn.__name__).__doc__ + + Unspecified = util.symbol('Unspecified') + + def __setitem__(fn): + def __setitem__(self, key, value, _sa_initiator=None): + if key in self: + __del(self, self[key], _sa_initiator) + value = __set(self, value, _sa_initiator) + fn(self, key, value) + _tidy(__setitem__) + return __setitem__ + + def __delitem__(fn): + def __delitem__(self, key, _sa_initiator=None): + if key in self: + __del(self, self[key], _sa_initiator) + fn(self, key) + _tidy(__delitem__) + return __delitem__ + + def clear(fn): + def clear(self): + for key in self: + __del(self, self[key]) + fn(self) + _tidy(clear) + return clear + + def pop(fn): + def pop(self, key, default=Unspecified): + if key in self: + __del(self, self[key]) + if default is Unspecified: + return fn(self, key) + else: + return fn(self, key, default) + _tidy(pop) + return pop + + def popitem(fn): + def popitem(self): + __before_delete(self) + item = fn(self) + __del(self, item[1]) + return item + _tidy(popitem) + return popitem + + def setdefault(fn): + def setdefault(self, key, default=None): + if key not in self: + self.__setitem__(key, default) + return default + else: + return self.__getitem__(key) + _tidy(setdefault) + return setdefault + + def update(fn): + def update(self, __other=Unspecified, **kw): + if __other is not Unspecified: + if hasattr(__other, 'keys'): + for key in list(__other): + if (key not in self or + self[key] is not __other[key]): + self[key] = __other[key] + else: + for key, value in __other: + if key not in self or self[key] is not value: + self[key] = value + for key in kw: + if key not in self or self[key] is not kw[key]: + self[key] = kw[key] + _tidy(update) + return update + + l = locals().copy() + l.pop('_tidy') + l.pop('Unspecified') + return l + +_set_binop_bases = (set, frozenset) + + +def _set_binops_check_strict(self, obj): + """Allow only set, frozenset and self.__class__-derived + objects in binops.""" + return isinstance(obj, _set_binop_bases + (self.__class__,)) + + +def _set_binops_check_loose(self, obj): + """Allow anything set-like to participate in set binops.""" + return (isinstance(obj, _set_binop_bases + (self.__class__,)) or + util.duck_type_collection(obj) == set) + + +def _set_decorators(): + """Tailored instrumentation wrappers for any set-like class.""" + + def _tidy(fn): + fn._sa_instrumented = True + fn.__doc__ = getattr(set, fn.__name__).__doc__ + + Unspecified = util.symbol('Unspecified') + + def add(fn): + def add(self, value, _sa_initiator=None): + if value not in self: + value = __set(self, value, _sa_initiator) + # testlib.pragma exempt:__hash__ + fn(self, value) + _tidy(add) + return add + + def discard(fn): + def discard(self, value, _sa_initiator=None): + # testlib.pragma exempt:__hash__ + if value in self: + __del(self, value, _sa_initiator) + # testlib.pragma exempt:__hash__ + fn(self, value) + _tidy(discard) + return discard + + def remove(fn): + def remove(self, value, _sa_initiator=None): + # testlib.pragma exempt:__hash__ + if value in self: + __del(self, value, _sa_initiator) + # testlib.pragma exempt:__hash__ + fn(self, value) + _tidy(remove) + return remove + + def pop(fn): + def pop(self): + __before_delete(self) + item = fn(self) + __del(self, item) + return item + _tidy(pop) + return pop + + def clear(fn): + def clear(self): + for item in list(self): + self.remove(item) + _tidy(clear) + return clear + + def update(fn): + def update(self, value): + for item in value: + self.add(item) + _tidy(update) + return update + + def __ior__(fn): + def __ior__(self, value): + if not _set_binops_check_strict(self, value): + return NotImplemented + for item in value: + self.add(item) + return self + _tidy(__ior__) + return __ior__ + + def difference_update(fn): + def difference_update(self, value): + for item in value: + self.discard(item) + _tidy(difference_update) + return difference_update + + def __isub__(fn): + def __isub__(self, value): + if not _set_binops_check_strict(self, value): + return NotImplemented + for item in value: + self.discard(item) + return self + _tidy(__isub__) + return __isub__ + + def intersection_update(fn): + def intersection_update(self, other): + want, have = self.intersection(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + _tidy(intersection_update) + return intersection_update + + def __iand__(fn): + def __iand__(self, other): + if not _set_binops_check_strict(self, other): + return NotImplemented + want, have = self.intersection(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + return self + _tidy(__iand__) + return __iand__ + + def symmetric_difference_update(fn): + def symmetric_difference_update(self, other): + want, have = self.symmetric_difference(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + _tidy(symmetric_difference_update) + return symmetric_difference_update + + def __ixor__(fn): + def __ixor__(self, other): + if not _set_binops_check_strict(self, other): + return NotImplemented + want, have = self.symmetric_difference(other), set(self) + remove, add = have - want, want - have + + for item in remove: + self.remove(item) + for item in add: + self.add(item) + return self + _tidy(__ixor__) + return __ixor__ + + l = locals().copy() + l.pop('_tidy') + l.pop('Unspecified') + return l + + +class InstrumentedList(list): + """An instrumented version of the built-in list.""" + + +class InstrumentedSet(set): + """An instrumented version of the built-in set.""" + + +class InstrumentedDict(dict): + """An instrumented version of the built-in dict.""" + + +__canned_instrumentation = { + list: InstrumentedList, + set: InstrumentedSet, + dict: InstrumentedDict, +} + +__interfaces = { + list: ( + {'appender': 'append', 'remover': 'remove', + 'iterator': '__iter__'}, _list_decorators() + ), + + set: ({'appender': 'add', + 'remover': 'remove', + 'iterator': '__iter__'}, _set_decorators() + ), + + # decorators are required for dicts and object collections. + dict: ({'iterator': 'values'}, _dict_decorators()) if util.py3k + else ({'iterator': 'itervalues'}, _dict_decorators()), +} + + +class MappedCollection(dict): + """A basic dictionary-based collection class. + + Extends dict with the minimal bag semantics that collection + classes require. ``set`` and ``remove`` are implemented in terms + of a keying function: any callable that takes an object and + returns an object for use as a dictionary key. + + """ + + def __init__(self, keyfunc): + """Create a new collection with keying provided by keyfunc. + + keyfunc may be any callable that takes an object and returns an object + for use as a dictionary key. + + The keyfunc will be called every time the ORM needs to add a member by + value-only (such as when loading instances from the database) or + remove a member. The usual cautions about dictionary keying apply- + ``keyfunc(object)`` should return the same output for the life of the + collection. Keying based on mutable properties can result in + unreachable instances "lost" in the collection. + + """ + self.keyfunc = keyfunc + + @collection.appender + @collection.internally_instrumented + def set(self, value, _sa_initiator=None): + """Add an item by value, consulting the keyfunc for the key.""" + + key = self.keyfunc(value) + self.__setitem__(key, value, _sa_initiator) + + @collection.remover + @collection.internally_instrumented + def remove(self, value, _sa_initiator=None): + """Remove an item by value, consulting the keyfunc for the key.""" + + key = self.keyfunc(value) + # Let self[key] raise if key is not in this collection + # testlib.pragma exempt:__ne__ + if self[key] != value: + raise sa_exc.InvalidRequestError( + "Can not remove '%s': collection holds '%s' for key '%s'. " + "Possible cause: is the MappedCollection key function " + "based on mutable properties or properties that only obtain " + "values after flush?" % + (value, self[key], key)) + self.__delitem__(key, _sa_initiator) + + @collection.converter + def _convert(self, dictlike): + """Validate and convert a dict-like object into values for set()ing. + + This is called behind the scenes when a MappedCollection is replaced + entirely by another collection, as in:: + + myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... + + Raises a TypeError if the key in any (key, value) pair in the dictlike + object does not match the key that this collection's keyfunc would + have assigned for that value. + + """ + for incoming_key, value in util.dictlike_iteritems(dictlike): + new_key = self.keyfunc(value) + if incoming_key != new_key: + raise TypeError( + "Found incompatible key %r for value %r; this " + "collection's " + "keying function requires a key of %r for this value." % ( + incoming_key, value, new_key)) + yield value + +# ensure instrumentation is associated with +# these built-in classes; if a user-defined class +# subclasses these and uses @internally_instrumented, +# the superclass is otherwise not instrumented. +# see [ticket:2406]. +_instrument_class(MappedCollection) +_instrument_class(InstrumentedList) +_instrument_class(InstrumentedSet) diff --git a/app/lib/sqlalchemy/orm/dependency.py b/app/lib/sqlalchemy/orm/dependency.py new file mode 100644 index 0000000..a87ec56 --- /dev/null +++ b/app/lib/sqlalchemy/orm/dependency.py @@ -0,0 +1,1175 @@ +# orm/dependency.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Relationship dependencies. + +""" + +from .. import sql, util, exc as sa_exc +from . import attributes, exc, sync, unitofwork, \ + util as mapperutil +from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY + + +class DependencyProcessor(object): + def __init__(self, prop): + self.prop = prop + self.cascade = prop.cascade + self.mapper = prop.mapper + self.parent = prop.parent + self.secondary = prop.secondary + self.direction = prop.direction + self.post_update = prop.post_update + self.passive_deletes = prop.passive_deletes + self.passive_updates = prop.passive_updates + self.enable_typechecks = prop.enable_typechecks + if self.passive_deletes: + self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE + else: + self._passive_delete_flag = attributes.PASSIVE_OFF + if self.passive_updates: + self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE + else: + self._passive_update_flag = attributes.PASSIVE_OFF + + self.key = prop.key + if not self.prop.synchronize_pairs: + raise sa_exc.ArgumentError( + "Can't build a DependencyProcessor for relationship %s. " + "No target attributes to populate between parent and " + "child are present" % + self.prop) + + @classmethod + def from_relationship(cls, prop): + return _direction_to_processor[prop.direction](prop) + + def hasparent(self, state): + """return True if the given object instance has a parent, + according to the ``InstrumentedAttribute`` handled by this + ``DependencyProcessor``. + + """ + return self.parent.class_manager.get_impl(self.key).hasparent(state) + + def per_property_preprocessors(self, uow): + """establish actions and dependencies related to a flush. + + These actions will operate on all relevant states in + the aggregate. + + """ + uow.register_preprocessor(self, True) + + def per_property_flush_actions(self, uow): + after_save = unitofwork.ProcessAll(uow, self, False, True) + before_delete = unitofwork.ProcessAll(uow, self, True, True) + + parent_saves = unitofwork.SaveUpdateAll( + uow, + self.parent.primary_base_mapper + ) + child_saves = unitofwork.SaveUpdateAll( + uow, + self.mapper.primary_base_mapper + ) + + parent_deletes = unitofwork.DeleteAll( + uow, + self.parent.primary_base_mapper + ) + child_deletes = unitofwork.DeleteAll( + uow, + self.mapper.primary_base_mapper + ) + + self.per_property_dependencies(uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete + ) + + def per_state_flush_actions(self, uow, states, isdelete): + """establish actions and dependencies related to a flush. + + These actions will operate on all relevant states + individually. This occurs only if there are cycles + in the 'aggregated' version of events. + + """ + + parent_base_mapper = self.parent.primary_base_mapper + child_base_mapper = self.mapper.primary_base_mapper + child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper) + child_deletes = unitofwork.DeleteAll(uow, child_base_mapper) + + # locate and disable the aggregate processors + # for this dependency + + if isdelete: + before_delete = unitofwork.ProcessAll(uow, self, True, True) + before_delete.disabled = True + else: + after_save = unitofwork.ProcessAll(uow, self, False, True) + after_save.disabled = True + + # check if the "child" side is part of the cycle + + if child_saves not in uow.cycles: + # based on the current dependencies we use, the saves/ + # deletes should always be in the 'cycles' collection + # together. if this changes, we will have to break up + # this method a bit more. + assert child_deletes not in uow.cycles + + # child side is not part of the cycle, so we will link per-state + # actions to the aggregate "saves", "deletes" actions + child_actions = [ + (child_saves, False), (child_deletes, True) + ] + child_in_cycles = False + else: + child_in_cycles = True + + # check if the "parent" side is part of the cycle + if not isdelete: + parent_saves = unitofwork.SaveUpdateAll( + uow, + self.parent.base_mapper) + parent_deletes = before_delete = None + if parent_saves in uow.cycles: + parent_in_cycles = True + else: + parent_deletes = unitofwork.DeleteAll( + uow, + self.parent.base_mapper) + parent_saves = after_save = None + if parent_deletes in uow.cycles: + parent_in_cycles = True + + # now create actions /dependencies for each state. + + for state in states: + # detect if there's anything changed or loaded + # by a preprocessor on this state/attribute. In the + # case of deletes we may try to load missing items here as well. + sum_ = state.manager[self.key].impl.get_all_pending( + state, state.dict, + self._passive_delete_flag + if isdelete + else attributes.PASSIVE_NO_INITIALIZE) + + if not sum_: + continue + + if isdelete: + before_delete = unitofwork.ProcessState(uow, + self, True, state) + if parent_in_cycles: + parent_deletes = unitofwork.DeleteState( + uow, + state, + parent_base_mapper) + else: + after_save = unitofwork.ProcessState(uow, self, False, state) + if parent_in_cycles: + parent_saves = unitofwork.SaveUpdateState( + uow, + state, + parent_base_mapper) + + if child_in_cycles: + child_actions = [] + for child_state, child in sum_: + if child_state not in uow.states: + child_action = (None, None) + else: + (deleted, listonly) = uow.states[child_state] + if deleted: + child_action = ( + unitofwork.DeleteState( + uow, child_state, + child_base_mapper), + True) + else: + child_action = ( + unitofwork.SaveUpdateState( + uow, child_state, + child_base_mapper), + False) + child_actions.append(child_action) + + # establish dependencies between our possibly per-state + # parent action and our possibly per-state child action. + for child_action, childisdelete in child_actions: + self.per_state_dependencies(uow, parent_saves, + parent_deletes, + child_action, + after_save, before_delete, + isdelete, childisdelete) + + def presort_deletes(self, uowcommit, states): + return False + + def presort_saves(self, uowcommit, states): + return False + + def process_deletes(self, uowcommit, states): + pass + + def process_saves(self, uowcommit, states): + pass + + def prop_has_changes(self, uowcommit, states, isdelete): + if not isdelete or self.passive_deletes: + passive = attributes.PASSIVE_NO_INITIALIZE + elif self.direction is MANYTOONE: + passive = attributes.PASSIVE_NO_FETCH_RELATED + else: + passive = attributes.PASSIVE_OFF + + for s in states: + # TODO: add a high speed method + # to InstanceState which returns: attribute + # has a non-None value, or had one + history = uowcommit.get_attribute_history( + s, + self.key, + passive) + if history and not history.empty(): + return True + else: + return states and \ + not self.prop._is_self_referential and \ + self.mapper in uowcommit.mappers + + def _verify_canload(self, state): + if self.prop.uselist and state is None: + raise exc.FlushError( + "Can't flush None value found in " + "collection %s" % (self.prop, )) + elif state is not None and \ + not self.mapper._canload( + state, allow_subtypes=not self.enable_typechecks): + if self.mapper._canload(state, allow_subtypes=True): + raise exc.FlushError('Attempting to flush an item of type ' + '%(x)s as a member of collection ' + '"%(y)s". Expected an object of type ' + '%(z)s or a polymorphic subclass of ' + 'this type. If %(x)s is a subclass of ' + '%(z)s, configure mapper "%(zm)s" to ' + 'load this subtype polymorphically, or ' + 'set enable_typechecks=False to allow ' + 'any subtype to be accepted for flush. ' + % { + 'x': state.class_, + 'y': self.prop, + 'z': self.mapper.class_, + 'zm': self.mapper, + }) + else: + raise exc.FlushError( + 'Attempting to flush an item of type ' + '%(x)s as a member of collection ' + '"%(y)s". Expected an object of type ' + '%(z)s or a polymorphic subclass of ' + 'this type.' % { + 'x': state.class_, + 'y': self.prop, + 'z': self.mapper.class_, + }) + + def _synchronize(self, state, child, associationrow, + clearkeys, uowcommit): + raise NotImplementedError() + + def _get_reversed_processed_set(self, uow): + if not self.prop._reverse_property: + return None + + process_key = tuple(sorted( + [self.key] + + [p.key for p in self.prop._reverse_property] + )) + return uow.memo( + ('reverse_key', process_key), + set + ) + + def _post_update(self, state, uowcommit, related, is_m2o_delete=False): + for x in related: + if not is_m2o_delete or x is not None: + uowcommit.issue_post_update( + state, + [r for l, r in self.prop.synchronize_pairs] + ) + break + + def _pks_changed(self, uowcommit, state): + raise NotImplementedError() + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.prop) + + +class OneToManyDP(DependencyProcessor): + + def per_property_dependencies(self, uow, parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete, + ): + if self.post_update: + child_post_updates = unitofwork.IssuePostUpdate( + uow, + self.mapper.primary_base_mapper, + False) + child_pre_updates = unitofwork.IssuePostUpdate( + uow, + self.mapper.primary_base_mapper, + True) + + uow.dependencies.update([ + (child_saves, after_save), + (parent_saves, after_save), + (after_save, child_post_updates), + + (before_delete, child_pre_updates), + (child_pre_updates, parent_deletes), + (child_pre_updates, child_deletes), + + ]) + else: + uow.dependencies.update([ + (parent_saves, after_save), + (after_save, child_saves), + (after_save, child_deletes), + + (child_saves, parent_deletes), + (child_deletes, parent_deletes), + + (before_delete, child_saves), + (before_delete, child_deletes), + ]) + + def per_state_dependencies(self, uow, + save_parent, + delete_parent, + child_action, + after_save, before_delete, + isdelete, childisdelete): + + if self.post_update: + + child_post_updates = unitofwork.IssuePostUpdate( + uow, + self.mapper.primary_base_mapper, + False) + child_pre_updates = unitofwork.IssuePostUpdate( + uow, + self.mapper.primary_base_mapper, + True) + + # TODO: this whole block is not covered + # by any tests + if not isdelete: + if childisdelete: + uow.dependencies.update([ + (child_action, after_save), + (after_save, child_post_updates), + ]) + else: + uow.dependencies.update([ + (save_parent, after_save), + (child_action, after_save), + (after_save, child_post_updates), + ]) + else: + if childisdelete: + uow.dependencies.update([ + (before_delete, child_pre_updates), + (child_pre_updates, delete_parent), + ]) + else: + uow.dependencies.update([ + (before_delete, child_pre_updates), + (child_pre_updates, delete_parent), + ]) + elif not isdelete: + uow.dependencies.update([ + (save_parent, after_save), + (after_save, child_action), + (save_parent, child_action) + ]) + else: + uow.dependencies.update([ + (before_delete, child_action), + (child_action, delete_parent) + ]) + + def presort_deletes(self, uowcommit, states): + # head object is being deleted, and we manage its list of + # child objects the child objects have to have their + # foreign key to the parent set to NULL + should_null_fks = not self.cascade.delete and \ + not self.passive_deletes == 'all' + + for state in states: + history = uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + if history: + for child in history.deleted: + if child is not None and self.hasparent(child) is False: + if self.cascade.delete_orphan: + uowcommit.register_object(child, isdelete=True) + else: + uowcommit.register_object(child) + + if should_null_fks: + for child in history.unchanged: + if child is not None: + uowcommit.register_object( + child, operation="delete", prop=self.prop) + + def presort_saves(self, uowcommit, states): + children_added = uowcommit.memo(('children_added', self), set) + + for state in states: + pks_changed = self._pks_changed(uowcommit, state) + + if not pks_changed or self.passive_updates: + passive = attributes.PASSIVE_NO_INITIALIZE + else: + passive = attributes.PASSIVE_OFF + + history = uowcommit.get_attribute_history( + state, + self.key, + passive) + if history: + for child in history.added: + if child is not None: + uowcommit.register_object(child, cancel_delete=True, + operation="add", + prop=self.prop) + + children_added.update(history.added) + + for child in history.deleted: + if not self.cascade.delete_orphan: + uowcommit.register_object(child, isdelete=False, + operation='delete', + prop=self.prop) + elif self.hasparent(child) is False: + uowcommit.register_object( + child, isdelete=True, + operation="delete", prop=self.prop) + for c, m, st_, dct_ in self.mapper.cascade_iterator( + 'delete', child): + uowcommit.register_object( + st_, + isdelete=True) + + if pks_changed: + if history: + for child in history.unchanged: + if child is not None: + uowcommit.register_object( + child, + False, + self.passive_updates, + operation="pk change", + prop=self.prop) + + def process_deletes(self, uowcommit, states): + # head object is being deleted, and we manage its list of + # child objects the child objects have to have their foreign + # key to the parent set to NULL this phase can be called + # safely for any cascade but is unnecessary if delete cascade + # is on. + + if self.post_update or not self.passive_deletes == 'all': + children_added = uowcommit.memo(('children_added', self), set) + + for state in states: + history = uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + if history: + for child in history.deleted: + if child is not None and \ + self.hasparent(child) is False: + self._synchronize( + state, + child, + None, True, + uowcommit, False) + if self.post_update and child: + self._post_update(child, uowcommit, [state]) + + if self.post_update or not self.cascade.delete: + for child in set(history.unchanged).\ + difference(children_added): + if child is not None: + self._synchronize( + state, + child, + None, True, + uowcommit, False) + if self.post_update and child: + self._post_update(child, + uowcommit, + [state]) + + # technically, we can even remove each child from the + # collection here too. but this would be a somewhat + # inconsistent behavior since it wouldn't happen + # if the old parent wasn't deleted but child was moved. + + def process_saves(self, uowcommit, states): + for state in states: + history = uowcommit.get_attribute_history( + state, + self.key, + attributes.PASSIVE_NO_INITIALIZE) + if history: + for child in history.added: + self._synchronize(state, child, None, + False, uowcommit, False) + if child is not None and self.post_update: + self._post_update(child, uowcommit, [state]) + + for child in history.deleted: + if not self.cascade.delete_orphan and \ + not self.hasparent(child): + self._synchronize(state, child, None, True, + uowcommit, False) + + if self._pks_changed(uowcommit, state): + for child in history.unchanged: + self._synchronize(state, child, None, + False, uowcommit, True) + + def _synchronize(self, state, child, + associationrow, clearkeys, uowcommit, + pks_changed): + source = state + dest = child + self._verify_canload(child) + if dest is None or \ + (not self.post_update and uowcommit.is_deleted(dest)): + return + if clearkeys: + sync.clear(dest, self.mapper, self.prop.synchronize_pairs) + else: + sync.populate(source, self.parent, dest, self.mapper, + self.prop.synchronize_pairs, uowcommit, + self.passive_updates and pks_changed) + + def _pks_changed(self, uowcommit, state): + return sync.source_modified( + uowcommit, + state, + self.parent, + self.prop.synchronize_pairs) + + +class ManyToOneDP(DependencyProcessor): + def __init__(self, prop): + DependencyProcessor.__init__(self, prop) + self.mapper._dependency_processors.append(DetectKeySwitch(prop)) + + def per_property_dependencies(self, uow, + parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete): + + if self.post_update: + parent_post_updates = unitofwork.IssuePostUpdate( + uow, + self.parent.primary_base_mapper, + False) + parent_pre_updates = unitofwork.IssuePostUpdate( + uow, + self.parent.primary_base_mapper, + True) + + uow.dependencies.update([ + (child_saves, after_save), + (parent_saves, after_save), + (after_save, parent_post_updates), + + (after_save, parent_pre_updates), + (before_delete, parent_pre_updates), + + (parent_pre_updates, child_deletes), + ]) + else: + uow.dependencies.update([ + (child_saves, after_save), + (after_save, parent_saves), + (parent_saves, child_deletes), + (parent_deletes, child_deletes) + ]) + + def per_state_dependencies(self, uow, + save_parent, + delete_parent, + child_action, + after_save, before_delete, + isdelete, childisdelete): + + if self.post_update: + + if not isdelete: + parent_post_updates = unitofwork.IssuePostUpdate( + uow, + self.parent.primary_base_mapper, + False) + if childisdelete: + uow.dependencies.update([ + (after_save, parent_post_updates), + (parent_post_updates, child_action) + ]) + else: + uow.dependencies.update([ + (save_parent, after_save), + (child_action, after_save), + + (after_save, parent_post_updates) + ]) + else: + parent_pre_updates = unitofwork.IssuePostUpdate( + uow, + self.parent.primary_base_mapper, + True) + + uow.dependencies.update([ + (before_delete, parent_pre_updates), + (parent_pre_updates, delete_parent), + (parent_pre_updates, child_action) + ]) + + elif not isdelete: + if not childisdelete: + uow.dependencies.update([ + (child_action, after_save), + (after_save, save_parent), + ]) + else: + uow.dependencies.update([ + (after_save, save_parent), + ]) + + else: + if childisdelete: + uow.dependencies.update([ + (delete_parent, child_action) + ]) + + def presort_deletes(self, uowcommit, states): + if self.cascade.delete or self.cascade.delete_orphan: + for state in states: + history = uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + if history: + if self.cascade.delete_orphan: + todelete = history.sum() + else: + todelete = history.non_deleted() + for child in todelete: + if child is None: + continue + uowcommit.register_object( + child, isdelete=True, + operation="delete", prop=self.prop) + t = self.mapper.cascade_iterator('delete', child) + for c, m, st_, dct_ in t: + uowcommit.register_object( + st_, isdelete=True) + + def presort_saves(self, uowcommit, states): + for state in states: + uowcommit.register_object(state, operation="add", prop=self.prop) + if self.cascade.delete_orphan: + history = uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + if history: + for child in history.deleted: + if self.hasparent(child) is False: + uowcommit.register_object( + child, isdelete=True, + operation="delete", prop=self.prop) + + t = self.mapper.cascade_iterator('delete', child) + for c, m, st_, dct_ in t: + uowcommit.register_object(st_, isdelete=True) + + def process_deletes(self, uowcommit, states): + if self.post_update and \ + not self.cascade.delete_orphan and \ + not self.passive_deletes == 'all': + + # post_update means we have to update our + # row to not reference the child object + # before we can DELETE the row + for state in states: + self._synchronize(state, None, None, True, uowcommit) + if state and self.post_update: + history = uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + if history: + self._post_update( + state, uowcommit, history.sum(), + is_m2o_delete=True) + + def process_saves(self, uowcommit, states): + for state in states: + history = uowcommit.get_attribute_history( + state, + self.key, + attributes.PASSIVE_NO_INITIALIZE) + if history: + if history.added: + for child in history.added: + self._synchronize(state, child, None, False, + uowcommit, "add") + if self.post_update: + self._post_update(state, uowcommit, history.sum()) + + def _synchronize(self, state, child, associationrow, + clearkeys, uowcommit, operation=None): + if state is None or \ + (not self.post_update and uowcommit.is_deleted(state)): + return + + if operation is not None and \ + child is not None and \ + not uowcommit.session._contains_state(child): + util.warn( + "Object of type %s not in session, %s " + "operation along '%s' won't proceed" % + (mapperutil.state_class_str(child), operation, self.prop)) + return + + if clearkeys or child is None: + sync.clear(state, self.parent, self.prop.synchronize_pairs) + else: + self._verify_canload(child) + sync.populate(child, self.mapper, state, + self.parent, + self.prop.synchronize_pairs, + uowcommit, + False) + + +class DetectKeySwitch(DependencyProcessor): + """For many-to-one relationships with no one-to-many backref, + searches for parents through the unit of work when a primary + key has changed and updates them. + + Theoretically, this approach could be expanded to support transparent + deletion of objects referenced via many-to-one as well, although + the current attribute system doesn't do enough bookkeeping for this + to be efficient. + + """ + + def per_property_preprocessors(self, uow): + if self.prop._reverse_property: + if self.passive_updates: + return + else: + if False in (prop.passive_updates for + prop in self.prop._reverse_property): + return + + uow.register_preprocessor(self, False) + + def per_property_flush_actions(self, uow): + parent_saves = unitofwork.SaveUpdateAll( + uow, + self.parent.base_mapper) + after_save = unitofwork.ProcessAll(uow, self, False, False) + uow.dependencies.update([ + (parent_saves, after_save) + ]) + + def per_state_flush_actions(self, uow, states, isdelete): + pass + + def presort_deletes(self, uowcommit, states): + pass + + def presort_saves(self, uow, states): + if not self.passive_updates: + # for non-passive updates, register in the preprocess stage + # so that mapper save_obj() gets a hold of changes + self._process_key_switches(states, uow) + + def prop_has_changes(self, uow, states, isdelete): + if not isdelete and self.passive_updates: + d = self._key_switchers(uow, states) + return bool(d) + + return False + + def process_deletes(self, uowcommit, states): + assert False + + def process_saves(self, uowcommit, states): + # for passive updates, register objects in the process stage + # so that we avoid ManyToOneDP's registering the object without + # the listonly flag in its own preprocess stage (results in UPDATE) + # statements being emitted + assert self.passive_updates + self._process_key_switches(states, uowcommit) + + def _key_switchers(self, uow, states): + switched, notswitched = uow.memo( + ('pk_switchers', self), + lambda: (set(), set()) + ) + + allstates = switched.union(notswitched) + for s in states: + if s not in allstates: + if self._pks_changed(uow, s): + switched.add(s) + else: + notswitched.add(s) + return switched + + def _process_key_switches(self, deplist, uowcommit): + switchers = self._key_switchers(uowcommit, deplist) + if switchers: + # if primary key values have actually changed somewhere, perform + # a linear search through the UOW in search of a parent. + for state in uowcommit.session.identity_map.all_states(): + if not issubclass(state.class_, self.parent.class_): + continue + dict_ = state.dict + related = state.get_impl(self.key).get( + state, dict_, passive=self._passive_update_flag) + if related is not attributes.PASSIVE_NO_RESULT and \ + related is not None: + related_state = attributes.instance_state(dict_[self.key]) + if related_state in switchers: + uowcommit.register_object(state, + False, + self.passive_updates) + sync.populate( + related_state, + self.mapper, state, + self.parent, self.prop.synchronize_pairs, + uowcommit, self.passive_updates) + + def _pks_changed(self, uowcommit, state): + return bool(state.key) and sync.source_modified( + uowcommit, state, self.mapper, self.prop.synchronize_pairs) + + +class ManyToManyDP(DependencyProcessor): + + def per_property_dependencies(self, uow, parent_saves, + child_saves, + parent_deletes, + child_deletes, + after_save, + before_delete + ): + + uow.dependencies.update([ + (parent_saves, after_save), + (child_saves, after_save), + (after_save, child_deletes), + + # a rowswitch on the parent from deleted to saved + # can make this one occur, as the "save" may remove + # an element from the + # "deleted" list before we have a chance to + # process its child rows + (before_delete, parent_saves), + + (before_delete, parent_deletes), + (before_delete, child_deletes), + (before_delete, child_saves), + ]) + + def per_state_dependencies(self, uow, + save_parent, + delete_parent, + child_action, + after_save, before_delete, + isdelete, childisdelete): + if not isdelete: + if childisdelete: + uow.dependencies.update([ + (save_parent, after_save), + (after_save, child_action), + ]) + else: + uow.dependencies.update([ + (save_parent, after_save), + (child_action, after_save), + ]) + else: + uow.dependencies.update([ + (before_delete, child_action), + (before_delete, delete_parent) + ]) + + def presort_deletes(self, uowcommit, states): + # TODO: no tests fail if this whole + # thing is removed !!!! + if not self.passive_deletes: + # if no passive deletes, load history on + # the collection, so that prop_has_changes() + # returns True + for state in states: + uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + + def presort_saves(self, uowcommit, states): + if not self.passive_updates: + # if no passive updates, load history on + # each collection where parent has changed PK, + # so that prop_has_changes() returns True + for state in states: + if self._pks_changed(uowcommit, state): + history = uowcommit.get_attribute_history( + state, + self.key, + attributes.PASSIVE_OFF) + + if not self.cascade.delete_orphan: + return + + # check for child items removed from the collection + # if delete_orphan check is turned on. + for state in states: + history = uowcommit.get_attribute_history( + state, + self.key, + attributes.PASSIVE_NO_INITIALIZE) + if history: + for child in history.deleted: + if self.hasparent(child) is False: + uowcommit.register_object( + child, isdelete=True, + operation="delete", prop=self.prop) + for c, m, st_, dct_ in self.mapper.cascade_iterator( + 'delete', + child): + uowcommit.register_object( + st_, isdelete=True) + + def process_deletes(self, uowcommit, states): + secondary_delete = [] + secondary_insert = [] + secondary_update = [] + + processed = self._get_reversed_processed_set(uowcommit) + tmp = set() + for state in states: + # this history should be cached already, as + # we loaded it in preprocess_deletes + history = uowcommit.get_attribute_history( + state, + self.key, + self._passive_delete_flag) + if history: + for child in history.non_added(): + if child is None or \ + (processed is not None and + (state, child) in processed): + continue + associationrow = {} + if not self._synchronize( + state, + child, + associationrow, + False, uowcommit, "delete"): + continue + secondary_delete.append(associationrow) + + tmp.update((c, state) for c in history.non_added()) + + if processed is not None: + processed.update(tmp) + + self._run_crud(uowcommit, secondary_insert, + secondary_update, secondary_delete) + + def process_saves(self, uowcommit, states): + secondary_delete = [] + secondary_insert = [] + secondary_update = [] + + processed = self._get_reversed_processed_set(uowcommit) + tmp = set() + + for state in states: + need_cascade_pks = not self.passive_updates and \ + self._pks_changed(uowcommit, state) + if need_cascade_pks: + passive = attributes.PASSIVE_OFF + else: + passive = attributes.PASSIVE_NO_INITIALIZE + history = uowcommit.get_attribute_history(state, self.key, + passive) + if history: + for child in history.added: + if (processed is not None and + (state, child) in processed): + continue + associationrow = {} + if not self._synchronize(state, + child, + associationrow, + False, uowcommit, "add"): + continue + secondary_insert.append(associationrow) + for child in history.deleted: + if (processed is not None and + (state, child) in processed): + continue + associationrow = {} + if not self._synchronize(state, + child, + associationrow, + False, uowcommit, "delete"): + continue + secondary_delete.append(associationrow) + + tmp.update((c, state) + for c in history.added + history.deleted) + + if need_cascade_pks: + + for child in history.unchanged: + associationrow = {} + sync.update(state, + self.parent, + associationrow, + "old_", + self.prop.synchronize_pairs) + sync.update(child, + self.mapper, + associationrow, + "old_", + self.prop.secondary_synchronize_pairs) + + secondary_update.append(associationrow) + + if processed is not None: + processed.update(tmp) + + self._run_crud(uowcommit, secondary_insert, + secondary_update, secondary_delete) + + def _run_crud(self, uowcommit, secondary_insert, + secondary_update, secondary_delete): + connection = uowcommit.transaction.connection(self.mapper) + + if secondary_delete: + associationrow = secondary_delete[0] + statement = self.secondary.delete(sql.and_(*[ + c == sql.bindparam(c.key, type_=c.type) + for c in self.secondary.c + if c.key in associationrow + ])) + result = connection.execute(statement, secondary_delete) + + if result.supports_sane_multi_rowcount() and \ + result.rowcount != len(secondary_delete): + raise exc.StaleDataError( + "DELETE statement on table '%s' expected to delete " + "%d row(s); Only %d were matched." % + (self.secondary.description, len(secondary_delete), + result.rowcount) + ) + + if secondary_update: + associationrow = secondary_update[0] + statement = self.secondary.update(sql.and_(*[ + c == sql.bindparam("old_" + c.key, type_=c.type) + for c in self.secondary.c + if c.key in associationrow + ])) + result = connection.execute(statement, secondary_update) + + if result.supports_sane_multi_rowcount() and \ + result.rowcount != len(secondary_update): + raise exc.StaleDataError( + "UPDATE statement on table '%s' expected to update " + "%d row(s); Only %d were matched." % + (self.secondary.description, len(secondary_update), + result.rowcount) + ) + + if secondary_insert: + statement = self.secondary.insert() + connection.execute(statement, secondary_insert) + + def _synchronize(self, state, child, associationrow, + clearkeys, uowcommit, operation): + + # this checks for None if uselist=True + self._verify_canload(child) + + # but if uselist=False we get here. If child is None, + # no association row can be generated, so return. + if child is None: + return False + + if child is not None and not uowcommit.session._contains_state(child): + if not child.deleted: + util.warn( + "Object of type %s not in session, %s " + "operation along '%s' won't proceed" % + (mapperutil.state_class_str(child), operation, self.prop)) + return False + + sync.populate_dict(state, self.parent, associationrow, + self.prop.synchronize_pairs) + sync.populate_dict(child, self.mapper, associationrow, + self.prop.secondary_synchronize_pairs) + + return True + + def _pks_changed(self, uowcommit, state): + return sync.source_modified( + uowcommit, + state, + self.parent, + self.prop.synchronize_pairs) + +_direction_to_processor = { + ONETOMANY: OneToManyDP, + MANYTOONE: ManyToOneDP, + MANYTOMANY: ManyToManyDP, +} diff --git a/app/lib/sqlalchemy/orm/deprecated_interfaces.py b/app/lib/sqlalchemy/orm/deprecated_interfaces.py new file mode 100644 index 0000000..c1bc2f9 --- /dev/null +++ b/app/lib/sqlalchemy/orm/deprecated_interfaces.py @@ -0,0 +1,487 @@ +# orm/deprecated_interfaces.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .. import event, util +from .interfaces import EXT_CONTINUE + + +@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces") +class MapperExtension(object): + """Base implementation for :class:`.Mapper` event hooks. + + .. note:: + + :class:`.MapperExtension` is deprecated. Please + refer to :func:`.event.listen` as well as + :class:`.MapperEvents`. + + New extension classes subclass :class:`.MapperExtension` and are specified + using the ``extension`` mapper() argument, which is a single + :class:`.MapperExtension` or a list of such:: + + from sqlalchemy.orm.interfaces import MapperExtension + + class MyExtension(MapperExtension): + def before_insert(self, mapper, connection, instance): + print "instance %s before insert !" % instance + + m = mapper(User, users_table, extension=MyExtension()) + + A single mapper can maintain a chain of ``MapperExtension`` + objects. When a particular mapping event occurs, the + corresponding method on each ``MapperExtension`` is invoked + serially, and each method has the ability to halt the chain + from proceeding further:: + + m = mapper(User, users_table, extension=[ext1, ext2, ext3]) + + Each ``MapperExtension`` method returns the symbol + EXT_CONTINUE by default. This symbol generally means "move + to the next ``MapperExtension`` for processing". For methods + that return objects like translated rows or new object + instances, EXT_CONTINUE means the result of the method + should be ignored. In some cases it's required for a + default mapper activity to be performed, such as adding a + new instance to a result list. + + The symbol EXT_STOP has significance within a chain + of ``MapperExtension`` objects that the chain will be stopped + when this symbol is returned. Like EXT_CONTINUE, it also + has additional significance in some cases that a default + mapper activity will not be performed. + + """ + + @classmethod + def _adapt_instrument_class(cls, self, listener): + cls._adapt_listener_methods(self, listener, ('instrument_class',)) + + @classmethod + def _adapt_listener(cls, self, listener): + cls._adapt_listener_methods( + self, listener, + ( + 'init_instance', + 'init_failed', + 'reconstruct_instance', + 'before_insert', + 'after_insert', + 'before_update', + 'after_update', + 'before_delete', + 'after_delete' + )) + + @classmethod + def _adapt_listener_methods(cls, self, listener, methods): + + for meth in methods: + me_meth = getattr(MapperExtension, meth) + ls_meth = getattr(listener, meth) + + if not util.methods_equivalent(me_meth, ls_meth): + if meth == 'reconstruct_instance': + def go(ls_meth): + def reconstruct(instance, ctx): + ls_meth(self, instance) + return reconstruct + event.listen(self.class_manager, 'load', + go(ls_meth), raw=False, propagate=True) + elif meth == 'init_instance': + def go(ls_meth): + def init_instance(instance, args, kwargs): + ls_meth(self, self.class_, + self.class_manager.original_init, + instance, args, kwargs) + return init_instance + event.listen(self.class_manager, 'init', + go(ls_meth), raw=False, propagate=True) + elif meth == 'init_failed': + def go(ls_meth): + def init_failed(instance, args, kwargs): + util.warn_exception( + ls_meth, self, self.class_, + self.class_manager.original_init, + instance, args, kwargs) + + return init_failed + event.listen(self.class_manager, 'init_failure', + go(ls_meth), raw=False, propagate=True) + else: + event.listen(self, "%s" % meth, ls_meth, + raw=False, retval=True, propagate=True) + + def instrument_class(self, mapper, class_): + """Receive a class when the mapper is first constructed, and has + applied instrumentation to the mapped class. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + return EXT_CONTINUE + + def init_instance(self, mapper, class_, oldinit, instance, args, kwargs): + """Receive an instance when its constructor is called. + + This method is only called during a userland construction of + an object. It is not called when an object is loaded from the + database. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + return EXT_CONTINUE + + def init_failed(self, mapper, class_, oldinit, instance, args, kwargs): + """Receive an instance when its constructor has been called, + and raised an exception. + + This method is only called during a userland construction of + an object. It is not called when an object is loaded from the + database. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + return EXT_CONTINUE + + def reconstruct_instance(self, mapper, instance): + """Receive an object instance after it has been created via + ``__new__``, and after initial attribute population has + occurred. + + This typically occurs when the instance is created based on + incoming result rows, and is only called once for that + instance's lifetime. + + Note that during a result-row load, this method is called upon + the first row received for this instance. Note that some + attributes and collections may or may not be loaded or even + initialized, depending on what's present in the result rows. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + return EXT_CONTINUE + + def before_insert(self, mapper, connection, instance): + """Receive an object instance before that instance is inserted + into its table. + + This is a good place to set up primary key values and such + that aren't handled otherwise. + + Column-based attributes can be modified within this method + which will result in the new value being inserted. However + *no* changes to the overall flush plan can be made, and + manipulation of the ``Session`` will not have the desired effect. + To manipulate the ``Session`` within an extension, use + ``SessionExtension``. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + + return EXT_CONTINUE + + def after_insert(self, mapper, connection, instance): + """Receive an object instance after that instance is inserted. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + + return EXT_CONTINUE + + def before_update(self, mapper, connection, instance): + """Receive an object instance before that instance is updated. + + Note that this method is called for all instances that are marked as + "dirty", even those which have no net changes to their column-based + attributes. An object is marked as dirty when any of its column-based + attributes have a "set attribute" operation called or when any of its + collections are modified. If, at update time, no column-based + attributes have any net changes, no UPDATE statement will be issued. + This means that an instance being sent to before_update is *not* a + guarantee that an UPDATE statement will be issued (although you can + affect the outcome here). + + To detect if the column-based attributes on the object have net + changes, and will therefore generate an UPDATE statement, use + ``object_session(instance).is_modified(instance, + include_collections=False)``. + + Column-based attributes can be modified within this method + which will result in the new value being updated. However + *no* changes to the overall flush plan can be made, and + manipulation of the ``Session`` will not have the desired effect. + To manipulate the ``Session`` within an extension, use + ``SessionExtension``. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + + return EXT_CONTINUE + + def after_update(self, mapper, connection, instance): + """Receive an object instance after that instance is updated. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + + return EXT_CONTINUE + + def before_delete(self, mapper, connection, instance): + """Receive an object instance before that instance is deleted. + + Note that *no* changes to the overall flush plan can be made + here; and manipulation of the ``Session`` will not have the + desired effect. To manipulate the ``Session`` within an + extension, use ``SessionExtension``. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + + return EXT_CONTINUE + + def after_delete(self, mapper, connection, instance): + """Receive an object instance after that instance is deleted. + + The return value is only significant within the ``MapperExtension`` + chain; the parent mapper's behavior isn't modified by this method. + + """ + + return EXT_CONTINUE + + +@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces") +class SessionExtension(object): + + """Base implementation for :class:`.Session` event hooks. + + .. note:: + + :class:`.SessionExtension` is deprecated. Please + refer to :func:`.event.listen` as well as + :class:`.SessionEvents`. + + Subclasses may be installed into a :class:`.Session` (or + :class:`.sessionmaker`) using the ``extension`` keyword + argument:: + + from sqlalchemy.orm.interfaces import SessionExtension + + class MySessionExtension(SessionExtension): + def before_commit(self, session): + print "before commit!" + + Session = sessionmaker(extension=MySessionExtension()) + + The same :class:`.SessionExtension` instance can be used + with any number of sessions. + + """ + + @classmethod + def _adapt_listener(cls, self, listener): + for meth in [ + 'before_commit', + 'after_commit', + 'after_rollback', + 'before_flush', + 'after_flush', + 'after_flush_postexec', + 'after_begin', + 'after_attach', + 'after_bulk_update', + 'after_bulk_delete', + ]: + me_meth = getattr(SessionExtension, meth) + ls_meth = getattr(listener, meth) + + if not util.methods_equivalent(me_meth, ls_meth): + event.listen(self, meth, getattr(listener, meth)) + + def before_commit(self, session): + """Execute right before commit is called. + + Note that this may not be per-flush if a longer running + transaction is ongoing.""" + + def after_commit(self, session): + """Execute after a commit has occurred. + + Note that this may not be per-flush if a longer running + transaction is ongoing.""" + + def after_rollback(self, session): + """Execute after a rollback has occurred. + + Note that this may not be per-flush if a longer running + transaction is ongoing.""" + + def before_flush(self, session, flush_context, instances): + """Execute before flush process has started. + + `instances` is an optional list of objects which were passed to + the ``flush()`` method. """ + + def after_flush(self, session, flush_context): + """Execute after flush has completed, but before commit has been + called. + + Note that the session's state is still in pre-flush, i.e. 'new', + 'dirty', and 'deleted' lists still show pre-flush state as well + as the history settings on instance attributes.""" + + def after_flush_postexec(self, session, flush_context): + """Execute after flush has completed, and after the post-exec + state occurs. + + This will be when the 'new', 'dirty', and 'deleted' lists are in + their final state. An actual commit() may or may not have + occurred, depending on whether or not the flush started its own + transaction or participated in a larger transaction. """ + + def after_begin(self, session, transaction, connection): + """Execute after a transaction is begun on a connection + + `transaction` is the SessionTransaction. This method is called + after an engine level transaction is begun on a connection. """ + + def after_attach(self, session, instance): + """Execute after an instance is attached to a session. + + This is called after an add, delete or merge. """ + + def after_bulk_update(self, session, query, query_context, result): + """Execute after a bulk update operation to the session. + + This is called after a session.query(...).update() + + `query` is the query object that this update operation was + called on. `query_context` was the query context object. + `result` is the result object returned from the bulk operation. + """ + + def after_bulk_delete(self, session, query, query_context, result): + """Execute after a bulk delete operation to the session. + + This is called after a session.query(...).delete() + + `query` is the query object that this delete operation was + called on. `query_context` was the query context object. + `result` is the result object returned from the bulk operation. + """ + + +@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces") +class AttributeExtension(object): + """Base implementation for :class:`.AttributeImpl` event hooks, events + that fire upon attribute mutations in user code. + + .. note:: + + :class:`.AttributeExtension` is deprecated. Please + refer to :func:`.event.listen` as well as + :class:`.AttributeEvents`. + + :class:`.AttributeExtension` is used to listen for set, + remove, and append events on individual mapped attributes. + It is established on an individual mapped attribute using + the `extension` argument, available on + :func:`.column_property`, :func:`.relationship`, and + others:: + + from sqlalchemy.orm.interfaces import AttributeExtension + from sqlalchemy.orm import mapper, relationship, column_property + + class MyAttrExt(AttributeExtension): + def append(self, state, value, initiator): + print "append event !" + return value + + def set(self, state, value, oldvalue, initiator): + print "set event !" + return value + + mapper(SomeClass, sometable, properties={ + 'foo':column_property(sometable.c.foo, extension=MyAttrExt()), + 'bar':relationship(Bar, extension=MyAttrExt()) + }) + + Note that the :class:`.AttributeExtension` methods + :meth:`~.AttributeExtension.append` and + :meth:`~.AttributeExtension.set` need to return the + ``value`` parameter. The returned value is used as the + effective value, and allows the extension to change what is + ultimately persisted. + + AttributeExtension is assembled within the descriptors associated + with a mapped class. + + """ + + active_history = True + """indicates that the set() method would like to receive the 'old' value, + even if it means firing lazy callables. + + Note that ``active_history`` can also be set directly via + :func:`.column_property` and :func:`.relationship`. + + """ + + @classmethod + def _adapt_listener(cls, self, listener): + event.listen(self, 'append', listener.append, + active_history=listener.active_history, + raw=True, retval=True) + event.listen(self, 'remove', listener.remove, + active_history=listener.active_history, + raw=True, retval=True) + event.listen(self, 'set', listener.set, + active_history=listener.active_history, + raw=True, retval=True) + + def append(self, state, value, initiator): + """Receive a collection append event. + + The returned value will be used as the actual value to be + appended. + + """ + return value + + def remove(self, state, value, initiator): + """Receive a remove event. + + No return value is defined. + + """ + pass + + def set(self, state, value, oldvalue, initiator): + """Receive a set event. + + The returned value will be used as the actual value to be + set. + + """ + return value diff --git a/app/lib/sqlalchemy/orm/descriptor_props.py b/app/lib/sqlalchemy/orm/descriptor_props.py new file mode 100644 index 0000000..0792ff2 --- /dev/null +++ b/app/lib/sqlalchemy/orm/descriptor_props.py @@ -0,0 +1,699 @@ +# orm/descriptor_props.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Descriptor properties are more "auxiliary" properties +that exist as configurational elements, but don't participate +as actively in the load/persist ORM loop. + +""" + +from .interfaces import MapperProperty, PropComparator +from .util import _none_set +from . import attributes +from .. import util, sql, exc as sa_exc, event, schema +from ..sql import expression +from . import properties +from . import query + + +class DescriptorProperty(MapperProperty): + """:class:`.MapperProperty` which proxies access to a + user-defined descriptor.""" + + doc = None + + def instrument_class(self, mapper): + prop = self + + class _ProxyImpl(object): + accepts_scalar_loader = False + expire_missing = True + collection = False + + def __init__(self, key): + self.key = key + + if hasattr(prop, 'get_history'): + def get_history(self, state, dict_, + passive=attributes.PASSIVE_OFF): + return prop.get_history(state, dict_, passive) + + if self.descriptor is None: + desc = getattr(mapper.class_, self.key, None) + if mapper._is_userland_descriptor(desc): + self.descriptor = desc + + if self.descriptor is None: + def fset(obj, value): + setattr(obj, self.name, value) + + def fdel(obj): + delattr(obj, self.name) + + def fget(obj): + return getattr(obj, self.name) + + self.descriptor = property( + fget=fget, + fset=fset, + fdel=fdel, + ) + + proxy_attr = attributes.create_proxied_attribute( + self.descriptor)( + self.parent.class_, + self.key, + self.descriptor, + lambda: self._comparator_factory(mapper), + doc=self.doc, + original_property=self + ) + proxy_attr.impl = _ProxyImpl(self.key) + mapper.class_manager.instrument_attribute(self.key, proxy_attr) + + +@util.langhelpers.dependency_for("sqlalchemy.orm.properties") +class CompositeProperty(DescriptorProperty): + """Defines a "composite" mapped attribute, representing a collection + of columns as one attribute. + + :class:`.CompositeProperty` is constructed using the :func:`.composite` + function. + + .. seealso:: + + :ref:`mapper_composite` + + """ + + def __init__(self, class_, *attrs, **kwargs): + r"""Return a composite column-based property for use with a Mapper. + + See the mapping documentation section :ref:`mapper_composite` for a + full usage example. + + The :class:`.MapperProperty` returned by :func:`.composite` + is the :class:`.CompositeProperty`. + + :param class\_: + The "composite type" class. + + :param \*cols: + List of Column objects to be mapped. + + :param active_history=False: + When ``True``, indicates that the "previous" value for a + scalar attribute should be loaded when replaced, if not + already loaded. See the same flag on :func:`.column_property`. + + .. versionchanged:: 0.7 + This flag specifically becomes meaningful + - previously it was a placeholder. + + :param group: + A group name for this property when marked as deferred. + + :param deferred: + When True, the column property is "deferred", meaning that it does + not load immediately, and is instead loaded when the attribute is + first accessed on an instance. See also + :func:`~sqlalchemy.orm.deferred`. + + :param comparator_factory: a class which extends + :class:`.CompositeProperty.Comparator` which provides custom SQL + clause generation for comparison operations. + + :param doc: + optional string that will be applied as the doc on the + class-bound descriptor. + + :param info: Optional data dictionary which will be populated into the + :attr:`.MapperProperty.info` attribute of this object. + + .. versionadded:: 0.8 + + :param extension: + an :class:`.AttributeExtension` instance, + or list of extensions, which will be prepended to the list of + attribute listeners for the resulting descriptor placed on the + class. **Deprecated.** Please see :class:`.AttributeEvents`. + + """ + super(CompositeProperty, self).__init__() + + self.attrs = attrs + self.composite_class = class_ + self.active_history = kwargs.get('active_history', False) + self.deferred = kwargs.get('deferred', False) + self.group = kwargs.get('group', None) + self.comparator_factory = kwargs.pop('comparator_factory', + self.__class__.Comparator) + if 'info' in kwargs: + self.info = kwargs.pop('info') + + util.set_creation_order(self) + self._create_descriptor() + + def instrument_class(self, mapper): + super(CompositeProperty, self).instrument_class(mapper) + self._setup_event_handlers() + + def do_init(self): + """Initialization which occurs after the :class:`.CompositeProperty` + has been associated with its parent mapper. + + """ + self._setup_arguments_on_columns() + + def _create_descriptor(self): + """Create the Python descriptor that will serve as + the access point on instances of the mapped class. + + """ + + def fget(instance): + dict_ = attributes.instance_dict(instance) + state = attributes.instance_state(instance) + + if self.key not in dict_: + # key not present. Iterate through related + # attributes, retrieve their values. This + # ensures they all load. + values = [ + getattr(instance, key) + for key in self._attribute_keys + ] + + # current expected behavior here is that the composite is + # created on access if the object is persistent or if + # col attributes have non-None. This would be better + # if the composite were created unconditionally, + # but that would be a behavioral change. + if self.key not in dict_ and ( + state.key is not None or + not _none_set.issuperset(values) + ): + dict_[self.key] = self.composite_class(*values) + state.manager.dispatch.refresh(state, None, [self.key]) + + return dict_.get(self.key, None) + + def fset(instance, value): + dict_ = attributes.instance_dict(instance) + state = attributes.instance_state(instance) + attr = state.manager[self.key] + previous = dict_.get(self.key, attributes.NO_VALUE) + for fn in attr.dispatch.set: + value = fn(state, value, previous, attr.impl) + dict_[self.key] = value + if value is None: + for key in self._attribute_keys: + setattr(instance, key, None) + else: + for key, value in zip( + self._attribute_keys, + value.__composite_values__()): + setattr(instance, key, value) + + def fdel(instance): + state = attributes.instance_state(instance) + dict_ = attributes.instance_dict(instance) + previous = dict_.pop(self.key, attributes.NO_VALUE) + attr = state.manager[self.key] + attr.dispatch.remove(state, previous, attr.impl) + for key in self._attribute_keys: + setattr(instance, key, None) + + self.descriptor = property(fget, fset, fdel) + + @util.memoized_property + def _comparable_elements(self): + return [ + getattr(self.parent.class_, prop.key) + for prop in self.props + ] + + @util.memoized_property + def props(self): + props = [] + for attr in self.attrs: + if isinstance(attr, str): + prop = self.parent.get_property( + attr, _configure_mappers=False) + elif isinstance(attr, schema.Column): + prop = self.parent._columntoproperty[attr] + elif isinstance(attr, attributes.InstrumentedAttribute): + prop = attr.property + else: + raise sa_exc.ArgumentError( + "Composite expects Column objects or mapped " + "attributes/attribute names as arguments, got: %r" + % (attr,)) + props.append(prop) + return props + + @property + def columns(self): + return [a for a in self.attrs if isinstance(a, schema.Column)] + + def _setup_arguments_on_columns(self): + """Propagate configuration arguments made on this composite + to the target columns, for those that apply. + + """ + for prop in self.props: + prop.active_history = self.active_history + if self.deferred: + prop.deferred = self.deferred + prop.strategy_key = ( + ("deferred", True), + ("instrument", True)) + prop.group = self.group + + def _setup_event_handlers(self): + """Establish events that populate/expire the composite attribute.""" + + def load_handler(state, *args): + dict_ = state.dict + + if self.key in dict_: + return + + # if column elements aren't loaded, skip. + # __get__() will initiate a load for those + # columns + for k in self._attribute_keys: + if k not in dict_: + return + + # assert self.key not in dict_ + dict_[self.key] = self.composite_class( + *[state.dict[key] for key in + self._attribute_keys] + ) + + def expire_handler(state, keys): + if keys is None or set(self._attribute_keys).intersection(keys): + state.dict.pop(self.key, None) + + def insert_update_handler(mapper, connection, state): + """After an insert or update, some columns may be expired due + to server side defaults, or re-populated due to client side + defaults. Pop out the composite value here so that it + recreates. + + """ + + state.dict.pop(self.key, None) + + event.listen(self.parent, 'after_insert', + insert_update_handler, raw=True) + event.listen(self.parent, 'after_update', + insert_update_handler, raw=True) + event.listen(self.parent, 'load', + load_handler, raw=True, propagate=True) + event.listen(self.parent, 'refresh', + load_handler, raw=True, propagate=True) + event.listen(self.parent, 'expire', + expire_handler, raw=True, propagate=True) + + # TODO: need a deserialize hook here + + @util.memoized_property + def _attribute_keys(self): + return [ + prop.key for prop in self.props + ] + + def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): + """Provided for userland code that uses attributes.get_history().""" + + added = [] + deleted = [] + + has_history = False + for prop in self.props: + key = prop.key + hist = state.manager[key].impl.get_history(state, dict_) + if hist.has_changes(): + has_history = True + + non_deleted = hist.non_deleted() + if non_deleted: + added.extend(non_deleted) + else: + added.append(None) + if hist.deleted: + deleted.extend(hist.deleted) + else: + deleted.append(None) + + if has_history: + return attributes.History( + [self.composite_class(*added)], + (), + [self.composite_class(*deleted)] + ) + else: + return attributes.History( + (), [self.composite_class(*added)], () + ) + + def _comparator_factory(self, mapper): + return self.comparator_factory(self, mapper) + + class CompositeBundle(query.Bundle): + def __init__(self, property, expr): + self.property = property + super(CompositeProperty.CompositeBundle, self).__init__( + property.key, *expr) + + def create_row_processor(self, query, procs, labels): + def proc(row): + return self.property.composite_class( + *[proc(row) for proc in procs]) + return proc + + class Comparator(PropComparator): + """Produce boolean, comparison, and other operators for + :class:`.CompositeProperty` attributes. + + See the example in :ref:`composite_operations` for an overview + of usage , as well as the documentation for :class:`.PropComparator`. + + See also: + + :class:`.PropComparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + __hash__ = None + + @property + def clauses(self): + return self.__clause_element__() + + def __clause_element__(self): + return expression.ClauseList( + group=False, *self._comparable_elements) + + def _query_clause_element(self): + return CompositeProperty.CompositeBundle( + self.prop, self.__clause_element__()) + + @util.memoized_property + def _comparable_elements(self): + if self._adapt_to_entity: + return [ + getattr( + self._adapt_to_entity.entity, + prop.key + ) for prop in self.prop._comparable_elements + ] + else: + return self.prop._comparable_elements + + def __eq__(self, other): + if other is None: + values = [None] * len(self.prop._comparable_elements) + else: + values = other.__composite_values__() + comparisons = [ + a == b + for a, b in zip(self.prop._comparable_elements, values) + ] + if self._adapt_to_entity: + comparisons = [self.adapter(x) for x in comparisons] + return sql.and_(*comparisons) + + def __ne__(self, other): + return sql.not_(self.__eq__(other)) + + def __str__(self): + return str(self.parent.class_.__name__) + "." + self.key + + +@util.langhelpers.dependency_for("sqlalchemy.orm.properties") +class ConcreteInheritedProperty(DescriptorProperty): + """A 'do nothing' :class:`.MapperProperty` that disables + an attribute on a concrete subclass that is only present + on the inherited mapper, not the concrete classes' mapper. + + Cases where this occurs include: + + * When the superclass mapper is mapped against a + "polymorphic union", which includes all attributes from + all subclasses. + * When a relationship() is configured on an inherited mapper, + but not on the subclass mapper. Concrete mappers require + that relationship() is configured explicitly on each + subclass. + + """ + + def _comparator_factory(self, mapper): + comparator_callable = None + + for m in self.parent.iterate_to_root(): + p = m._props[self.key] + if not isinstance(p, ConcreteInheritedProperty): + comparator_callable = p.comparator_factory + break + return comparator_callable + + def __init__(self): + super(ConcreteInheritedProperty, self).__init__() + def warn(): + raise AttributeError("Concrete %s does not implement " + "attribute %r at the instance level. Add " + "this property explicitly to %s." % + (self.parent, self.key, self.parent)) + + class NoninheritedConcreteProp(object): + def __set__(s, obj, value): + warn() + + def __delete__(s, obj): + warn() + + def __get__(s, obj, owner): + if obj is None: + return self.descriptor + warn() + self.descriptor = NoninheritedConcreteProp() + + +@util.langhelpers.dependency_for("sqlalchemy.orm.properties") +class SynonymProperty(DescriptorProperty): + + def __init__(self, name, map_column=None, + descriptor=None, comparator_factory=None, + doc=None, info=None): + """Denote an attribute name as a synonym to a mapped property, + in that the attribute will mirror the value and expression behavior + of another attribute. + + :param name: the name of the existing mapped property. This + can refer to the string name of any :class:`.MapperProperty` + configured on the class, including column-bound attributes + and relationships. + + :param descriptor: a Python :term:`descriptor` that will be used + as a getter (and potentially a setter) when this attribute is + accessed at the instance level. + + :param map_column: if ``True``, the :func:`.synonym` construct will + locate the existing named :class:`.MapperProperty` based on the + attribute name of this :func:`.synonym`, and assign it to a new + attribute linked to the name of this :func:`.synonym`. + That is, given a mapping like:: + + class MyClass(Base): + __tablename__ = 'my_table' + + id = Column(Integer, primary_key=True) + job_status = Column(String(50)) + + job_status = synonym("_job_status", map_column=True) + + The above class ``MyClass`` will now have the ``job_status`` + :class:`.Column` object mapped to the attribute named + ``_job_status``, and the attribute named ``job_status`` will refer + to the synonym itself. This feature is typically used in + conjunction with the ``descriptor`` argument in order to link a + user-defined descriptor as a "wrapper" for an existing column. + + :param info: Optional data dictionary which will be populated into the + :attr:`.InspectionAttr.info` attribute of this object. + + .. versionadded:: 1.0.0 + + :param comparator_factory: A subclass of :class:`.PropComparator` + that will provide custom comparison behavior at the SQL expression + level. + + .. note:: + + For the use case of providing an attribute which redefines both + Python-level and SQL-expression level behavior of an attribute, + please refer to the Hybrid attribute introduced at + :ref:`mapper_hybrids` for a more effective technique. + + .. seealso:: + + :ref:`synonyms` - examples of functionality. + + :ref:`mapper_hybrids` - Hybrids provide a better approach for + more complicated attribute-wrapping schemes than synonyms. + + """ + super(SynonymProperty, self).__init__() + + self.name = name + self.map_column = map_column + self.descriptor = descriptor + self.comparator_factory = comparator_factory + self.doc = doc or (descriptor and descriptor.__doc__) or None + if info: + self.info = info + + util.set_creation_order(self) + + # TODO: when initialized, check _proxied_property, + # emit a warning if its not a column-based property + + @util.memoized_property + def _proxied_property(self): + return getattr(self.parent.class_, self.name).property + + def _comparator_factory(self, mapper): + prop = self._proxied_property + + if self.comparator_factory: + comp = self.comparator_factory(prop, mapper) + else: + comp = prop.comparator_factory(prop, mapper) + return comp + + def set_parent(self, parent, init): + if self.map_column: + # implement the 'map_column' option. + if self.key not in parent.mapped_table.c: + raise sa_exc.ArgumentError( + "Can't compile synonym '%s': no column on table " + "'%s' named '%s'" + % (self.name, parent.mapped_table.description, self.key)) + elif parent.mapped_table.c[self.key] in \ + parent._columntoproperty and \ + parent._columntoproperty[ + parent.mapped_table.c[self.key] + ].key == self.name: + raise sa_exc.ArgumentError( + "Can't call map_column=True for synonym %r=%r, " + "a ColumnProperty already exists keyed to the name " + "%r for column %r" % + (self.key, self.name, self.name, self.key) + ) + p = properties.ColumnProperty(parent.mapped_table.c[self.key]) + parent._configure_property( + self.name, p, + init=init, + setparent=True) + p._mapped_by_synonym = self.key + + self.parent = parent + + +@util.langhelpers.dependency_for("sqlalchemy.orm.properties") +class ComparableProperty(DescriptorProperty): + """Instruments a Python property for use in query expressions.""" + + def __init__( + self, comparator_factory, descriptor=None, doc=None, info=None): + """Provides a method of applying a :class:`.PropComparator` + to any Python descriptor attribute. + + .. versionchanged:: 0.7 + :func:`.comparable_property` is superseded by + the :mod:`~sqlalchemy.ext.hybrid` extension. See the example + at :ref:`hybrid_custom_comparators`. + + Allows any Python descriptor to behave like a SQL-enabled + attribute when used at the class level in queries, allowing + redefinition of expression operator behavior. + + In the example below we redefine :meth:`.PropComparator.operate` + to wrap both sides of an expression in ``func.lower()`` to produce + case-insensitive comparison:: + + from sqlalchemy.orm import comparable_property + from sqlalchemy.orm.interfaces import PropComparator + from sqlalchemy.sql import func + from sqlalchemy import Integer, String, Column + from sqlalchemy.ext.declarative import declarative_base + + class CaseInsensitiveComparator(PropComparator): + def __clause_element__(self): + return self.prop + + def operate(self, op, other): + return op( + func.lower(self.__clause_element__()), + func.lower(other) + ) + + Base = declarative_base() + + class SearchWord(Base): + __tablename__ = 'search_word' + id = Column(Integer, primary_key=True) + word = Column(String) + word_insensitive = comparable_property(lambda prop, mapper: + CaseInsensitiveComparator( + mapper.c.word, mapper) + ) + + + A mapping like the above allows the ``word_insensitive`` attribute + to render an expression like:: + + >>> print SearchWord.word_insensitive == "Trucks" + lower(search_word.word) = lower(:lower_1) + + :param comparator_factory: + A PropComparator subclass or factory that defines operator behavior + for this property. + + :param descriptor: + Optional when used in a ``properties={}`` declaration. The Python + descriptor or property to layer comparison behavior on top of. + + The like-named descriptor will be automatically retrieved from the + mapped class if left blank in a ``properties`` declaration. + + :param info: Optional data dictionary which will be populated into the + :attr:`.InspectionAttr.info` attribute of this object. + + .. versionadded:: 1.0.0 + + """ + super(ComparableProperty, self).__init__() + self.descriptor = descriptor + self.comparator_factory = comparator_factory + self.doc = doc or (descriptor and descriptor.__doc__) or None + if info: + self.info = info + util.set_creation_order(self) + + def _comparator_factory(self, mapper): + return self.comparator_factory(self, mapper) diff --git a/app/lib/sqlalchemy/orm/dynamic.py b/app/lib/sqlalchemy/orm/dynamic.py new file mode 100644 index 0000000..9f99740 --- /dev/null +++ b/app/lib/sqlalchemy/orm/dynamic.py @@ -0,0 +1,367 @@ +# orm/dynamic.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Dynamic collection API. + +Dynamic collections act like Query() objects for read operations and support +basic add/delete mutation. + +""" + +from .. import log, util, exc +from ..sql import operators +from . import ( + attributes, object_session, util as orm_util, strategies, + object_mapper, exc as orm_exc, properties +) +from .query import Query + + +@log.class_logger +@properties.RelationshipProperty.strategy_for(lazy="dynamic") +class DynaLoader(strategies.AbstractRelationshipLoader): + def init_class_attribute(self, mapper): + self.is_class_level = True + if not self.uselist: + raise exc.InvalidRequestError( + "On relationship %s, 'dynamic' loaders cannot be used with " + "many-to-one/one-to-one relationships and/or " + "uselist=False." % self.parent_property) + strategies._register_attribute( + self.parent_property, + mapper, + useobject=True, + impl_class=DynamicAttributeImpl, + target_mapper=self.parent_property.mapper, + order_by=self.parent_property.order_by, + query_class=self.parent_property.query_class, + ) + + +class DynamicAttributeImpl(attributes.AttributeImpl): + uses_objects = True + accepts_scalar_loader = False + supports_population = False + collection = False + + def __init__(self, class_, key, typecallable, + dispatch, + target_mapper, order_by, query_class=None, **kw): + super(DynamicAttributeImpl, self).\ + __init__(class_, key, typecallable, dispatch, **kw) + self.target_mapper = target_mapper + self.order_by = order_by + if not query_class: + self.query_class = AppenderQuery + elif AppenderMixin in query_class.mro(): + self.query_class = query_class + else: + self.query_class = mixin_user_query(query_class) + + def get(self, state, dict_, passive=attributes.PASSIVE_OFF): + if not passive & attributes.SQL_OK: + return self._get_collection_history( + state, attributes.PASSIVE_NO_INITIALIZE).added_items + else: + return self.query_class(self, state) + + def get_collection(self, state, dict_, user_data=None, + passive=attributes.PASSIVE_NO_INITIALIZE): + if not passive & attributes.SQL_OK: + return self._get_collection_history(state, + passive).added_items + else: + history = self._get_collection_history(state, passive) + return history.added_plus_unchanged + + @util.memoized_property + def _append_token(self): + return attributes.Event(self, attributes.OP_APPEND) + + @util.memoized_property + def _remove_token(self): + return attributes.Event(self, attributes.OP_REMOVE) + + def fire_append_event(self, state, dict_, value, initiator, + collection_history=None): + if collection_history is None: + collection_history = self._modified_event(state, dict_) + + collection_history.add_added(value) + + for fn in self.dispatch.append: + value = fn(state, value, initiator or self._append_token) + + if self.trackparent and value is not None: + self.sethasparent(attributes.instance_state(value), state, True) + + def fire_remove_event(self, state, dict_, value, initiator, + collection_history=None): + if collection_history is None: + collection_history = self._modified_event(state, dict_) + + collection_history.add_removed(value) + + if self.trackparent and value is not None: + self.sethasparent(attributes.instance_state(value), state, False) + + for fn in self.dispatch.remove: + fn(state, value, initiator or self._remove_token) + + def _modified_event(self, state, dict_): + + if self.key not in state.committed_state: + state.committed_state[self.key] = CollectionHistory(self, state) + + state._modified_event(dict_, + self, + attributes.NEVER_SET) + + # this is a hack to allow the fixtures.ComparableEntity fixture + # to work + dict_[self.key] = True + return state.committed_state[self.key] + + def set(self, state, dict_, value, initiator=None, + passive=attributes.PASSIVE_OFF, + check_old=None, pop=False, _adapt=True): + if initiator and initiator.parent_token is self.parent_token: + return + + if pop and value is None: + return + + iterable = value + new_values = list(iterable) + if state.has_identity: + old_collection = util.IdentitySet(self.get(state, dict_)) + + collection_history = self._modified_event(state, dict_) + if not state.has_identity: + old_collection = collection_history.added_items + else: + old_collection = old_collection.union( + collection_history.added_items) + + idset = util.IdentitySet + constants = old_collection.intersection(new_values) + additions = idset(new_values).difference(constants) + removals = old_collection.difference(constants) + + for member in new_values: + if member in additions: + self.fire_append_event(state, dict_, member, None, + collection_history=collection_history) + + for member in removals: + self.fire_remove_event(state, dict_, member, None, + collection_history=collection_history) + + def delete(self, *args, **kwargs): + raise NotImplementedError() + + def set_committed_value(self, state, dict_, value): + raise NotImplementedError("Dynamic attributes don't support " + "collection population.") + + def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): + c = self._get_collection_history(state, passive) + return c.as_history() + + def get_all_pending(self, state, dict_, + passive=attributes.PASSIVE_NO_INITIALIZE): + c = self._get_collection_history( + state, passive) + return [ + (attributes.instance_state(x), x) + for x in + c.all_items + ] + + def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF): + if self.key in state.committed_state: + c = state.committed_state[self.key] + else: + c = CollectionHistory(self, state) + + if state.has_identity and (passive & attributes.INIT_OK): + return CollectionHistory(self, state, apply_to=c) + else: + return c + + def append(self, state, dict_, value, initiator, + passive=attributes.PASSIVE_OFF): + if initiator is not self: + self.fire_append_event(state, dict_, value, initiator) + + def remove(self, state, dict_, value, initiator, + passive=attributes.PASSIVE_OFF): + if initiator is not self: + self.fire_remove_event(state, dict_, value, initiator) + + def pop(self, state, dict_, value, initiator, + passive=attributes.PASSIVE_OFF): + self.remove(state, dict_, value, initiator, passive=passive) + + +class AppenderMixin(object): + query_class = None + + def __init__(self, attr, state): + super(AppenderMixin, self).__init__(attr.target_mapper, None) + self.instance = instance = state.obj() + self.attr = attr + + mapper = object_mapper(instance) + prop = mapper._props[self.attr.key] + self._criterion = prop._with_parent( + instance, + alias_secondary=False) + + if self.attr.order_by: + self._order_by = self.attr.order_by + + def session(self): + sess = object_session(self.instance) + if sess is not None and self.autoflush and sess.autoflush \ + and self.instance in sess: + sess.flush() + if not orm_util.has_identity(self.instance): + return None + else: + return sess + session = property(session, lambda s, x: None) + + def __iter__(self): + sess = self.session + if sess is None: + return iter(self.attr._get_collection_history( + attributes.instance_state(self.instance), + attributes.PASSIVE_NO_INITIALIZE).added_items) + else: + return iter(self._clone(sess)) + + def __getitem__(self, index): + sess = self.session + if sess is None: + return self.attr._get_collection_history( + attributes.instance_state(self.instance), + attributes.PASSIVE_NO_INITIALIZE).indexed(index) + else: + return self._clone(sess).__getitem__(index) + + def count(self): + sess = self.session + if sess is None: + return len(self.attr._get_collection_history( + attributes.instance_state(self.instance), + attributes.PASSIVE_NO_INITIALIZE).added_items) + else: + return self._clone(sess).count() + + def _clone(self, sess=None): + # note we're returning an entirely new Query class instance + # here without any assignment capabilities; the class of this + # query is determined by the session. + instance = self.instance + if sess is None: + sess = object_session(instance) + if sess is None: + raise orm_exc.DetachedInstanceError( + "Parent instance %s is not bound to a Session, and no " + "contextual session is established; lazy load operation " + "of attribute '%s' cannot proceed" % ( + orm_util.instance_str(instance), self.attr.key)) + + if self.query_class: + query = self.query_class(self.attr.target_mapper, session=sess) + else: + query = sess.query(self.attr.target_mapper) + + query._criterion = self._criterion + query._order_by = self._order_by + + return query + + def extend(self, iterator): + for item in iterator: + self.attr.append( + attributes.instance_state(self.instance), + attributes.instance_dict(self.instance), item, None) + + def append(self, item): + self.attr.append( + attributes.instance_state(self.instance), + attributes.instance_dict(self.instance), item, None) + + def remove(self, item): + self.attr.remove( + attributes.instance_state(self.instance), + attributes.instance_dict(self.instance), item, None) + + +class AppenderQuery(AppenderMixin, Query): + """A dynamic query that supports basic collection storage operations.""" + + +def mixin_user_query(cls): + """Return a new class with AppenderQuery functionality layered over.""" + name = 'Appender' + cls.__name__ + return type(name, (AppenderMixin, cls), {'query_class': cls}) + + +class CollectionHistory(object): + """Overrides AttributeHistory to receive append/remove events directly.""" + + def __init__(self, attr, state, apply_to=None): + if apply_to: + coll = AppenderQuery(attr, state).autoflush(False) + self.unchanged_items = util.OrderedIdentitySet(coll) + self.added_items = apply_to.added_items + self.deleted_items = apply_to.deleted_items + self._reconcile_collection = True + else: + self.deleted_items = util.OrderedIdentitySet() + self.added_items = util.OrderedIdentitySet() + self.unchanged_items = util.OrderedIdentitySet() + self._reconcile_collection = False + + @property + def added_plus_unchanged(self): + return list(self.added_items.union(self.unchanged_items)) + + @property + def all_items(self): + return list(self.added_items.union( + self.unchanged_items).union(self.deleted_items)) + + def as_history(self): + if self._reconcile_collection: + added = self.added_items.difference(self.unchanged_items) + deleted = self.deleted_items.intersection(self.unchanged_items) + unchanged = self.unchanged_items.difference(deleted) + else: + added, unchanged, deleted = self.added_items,\ + self.unchanged_items,\ + self.deleted_items + return attributes.History( + list(added), + list(unchanged), + list(deleted), + ) + + def indexed(self, index): + return list(self.added_items)[index] + + def add_added(self, value): + self.added_items.add(value) + + def add_removed(self, value): + if value in self.added_items: + self.added_items.remove(value) + else: + self.deleted_items.add(value) diff --git a/app/lib/sqlalchemy/orm/evaluator.py b/app/lib/sqlalchemy/orm/evaluator.py new file mode 100644 index 0000000..95a9e9b --- /dev/null +++ b/app/lib/sqlalchemy/orm/evaluator.py @@ -0,0 +1,137 @@ +# orm/evaluator.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import operator +from ..sql import operators + + +class UnevaluatableError(Exception): + pass + +_straight_ops = set(getattr(operators, op) + for op in ('add', 'mul', 'sub', + 'div', + 'mod', 'truediv', + 'lt', 'le', 'ne', 'gt', 'ge', 'eq')) + + +_notimplemented_ops = set(getattr(operators, op) + for op in ('like_op', 'notlike_op', 'ilike_op', + 'notilike_op', 'between_op', 'in_op', + 'notin_op', 'endswith_op', 'concat_op')) + + +class EvaluatorCompiler(object): + def __init__(self, target_cls=None): + self.target_cls = target_cls + + def process(self, clause): + meth = getattr(self, "visit_%s" % clause.__visit_name__, None) + if not meth: + raise UnevaluatableError( + "Cannot evaluate %s" % type(clause).__name__) + return meth(clause) + + def visit_grouping(self, clause): + return self.process(clause.element) + + def visit_null(self, clause): + return lambda obj: None + + def visit_false(self, clause): + return lambda obj: False + + def visit_true(self, clause): + return lambda obj: True + + def visit_column(self, clause): + if 'parentmapper' in clause._annotations: + parentmapper = clause._annotations['parentmapper'] + if self.target_cls and not issubclass( + self.target_cls, parentmapper.class_): + raise UnevaluatableError( + "Can't evaluate criteria against alternate class %s" % + parentmapper.class_ + ) + key = parentmapper._columntoproperty[clause].key + else: + key = clause.key + + get_corresponding_attr = operator.attrgetter(key) + return lambda obj: get_corresponding_attr(obj) + + def visit_clauselist(self, clause): + evaluators = list(map(self.process, clause.clauses)) + if clause.operator is operators.or_: + def evaluate(obj): + has_null = False + for sub_evaluate in evaluators: + value = sub_evaluate(obj) + if value: + return True + has_null = has_null or value is None + if has_null: + return None + return False + elif clause.operator is operators.and_: + def evaluate(obj): + for sub_evaluate in evaluators: + value = sub_evaluate(obj) + if not value: + if value is None: + return None + return False + return True + else: + raise UnevaluatableError( + "Cannot evaluate clauselist with operator %s" % + clause.operator) + + return evaluate + + def visit_binary(self, clause): + eval_left, eval_right = list(map(self.process, + [clause.left, clause.right])) + operator = clause.operator + if operator is operators.is_: + def evaluate(obj): + return eval_left(obj) == eval_right(obj) + elif operator is operators.isnot: + def evaluate(obj): + return eval_left(obj) != eval_right(obj) + elif operator in _straight_ops: + def evaluate(obj): + left_val = eval_left(obj) + right_val = eval_right(obj) + if left_val is None or right_val is None: + return None + return operator(eval_left(obj), eval_right(obj)) + else: + raise UnevaluatableError( + "Cannot evaluate %s with operator %s" % + (type(clause).__name__, clause.operator)) + return evaluate + + def visit_unary(self, clause): + eval_inner = self.process(clause.element) + if clause.operator is operators.inv: + def evaluate(obj): + value = eval_inner(obj) + if value is None: + return None + return not value + return evaluate + raise UnevaluatableError( + "Cannot evaluate %s with operator %s" % + (type(clause).__name__, clause.operator)) + + def visit_bindparam(self, clause): + if clause.callable: + val = clause.callable() + else: + val = clause.value + return lambda obj: val diff --git a/app/lib/sqlalchemy/orm/events.py b/app/lib/sqlalchemy/orm/events.py new file mode 100644 index 0000000..d2ccec4 --- /dev/null +++ b/app/lib/sqlalchemy/orm/events.py @@ -0,0 +1,2187 @@ +# orm/events.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""ORM event interfaces. + +""" +from .. import event, exc, util +from .base import _mapper_or_none +import inspect +import weakref +from . import interfaces +from . import mapperlib, instrumentation +from .session import Session, sessionmaker +from .scoping import scoped_session +from .attributes import QueryableAttribute +from .query import Query +from sqlalchemy.util.compat import inspect_getargspec + +class InstrumentationEvents(event.Events): + """Events related to class instrumentation events. + + The listeners here support being established against + any new style class, that is any object that is a subclass + of 'type'. Events will then be fired off for events + against that class. If the "propagate=True" flag is passed + to event.listen(), the event will fire off for subclasses + of that class as well. + + The Python ``type`` builtin is also accepted as a target, + which when used has the effect of events being emitted + for all classes. + + Note the "propagate" flag here is defaulted to ``True``, + unlike the other class level events where it defaults + to ``False``. This means that new subclasses will also + be the subject of these events, when a listener + is established on a superclass. + + .. versionchanged:: 0.8 - events here will emit based + on comparing the incoming class to the type of class + passed to :func:`.event.listen`. Previously, the + event would fire for any class unconditionally regardless + of what class was sent for listening, despite + documentation which stated the contrary. + + """ + + _target_class_doc = "SomeBaseClass" + _dispatch_target = instrumentation.InstrumentationFactory + + @classmethod + def _accept_with(cls, target): + if isinstance(target, type): + return _InstrumentationEventsHold(target) + else: + return None + + @classmethod + def _listen(cls, event_key, propagate=True, **kw): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn + + def listen(target_cls, *arg): + listen_cls = target() + if propagate and issubclass(target_cls, listen_cls): + return fn(target_cls, *arg) + elif not propagate and target_cls is listen_cls: + return fn(target_cls, *arg) + + def remove(ref): + key = event.registry._EventKey( + None, identifier, listen, + instrumentation._instrumentation_factory) + getattr(instrumentation._instrumentation_factory.dispatch, + identifier).remove(key) + + target = weakref.ref(target.class_, remove) + + event_key.\ + with_dispatch_target(instrumentation._instrumentation_factory).\ + with_wrapper(listen).base_listen(**kw) + + @classmethod + def _clear(cls): + super(InstrumentationEvents, cls)._clear() + instrumentation._instrumentation_factory.dispatch._clear() + + def class_instrument(self, cls): + """Called after the given class is instrumented. + + To get at the :class:`.ClassManager`, use + :func:`.manager_of_class`. + + """ + + def class_uninstrument(self, cls): + """Called before the given class is uninstrumented. + + To get at the :class:`.ClassManager`, use + :func:`.manager_of_class`. + + """ + + def attribute_instrument(self, cls, key, inst): + """Called when an attribute is instrumented.""" + + +class _InstrumentationEventsHold(object): + """temporary marker object used to transfer from _accept_with() to + _listen() on the InstrumentationEvents class. + + """ + + def __init__(self, class_): + self.class_ = class_ + + dispatch = event.dispatcher(InstrumentationEvents) + + +class InstanceEvents(event.Events): + """Define events specific to object lifecycle. + + e.g.:: + + from sqlalchemy import event + + def my_load_listener(target, context): + print "on load!" + + event.listen(SomeClass, 'load', my_load_listener) + + Available targets include: + + * mapped classes + * unmapped superclasses of mapped or to-be-mapped classes + (using the ``propagate=True`` flag) + * :class:`.Mapper` objects + * the :class:`.Mapper` class itself and the :func:`.mapper` + function indicate listening for all mappers. + + .. versionchanged:: 0.8.0 instance events can be associated with + unmapped superclasses of mapped classes. + + Instance events are closely related to mapper events, but + are more specific to the instance and its instrumentation, + rather than its system of persistence. + + When using :class:`.InstanceEvents`, several modifiers are + available to the :func:`.event.listen` function. + + :param propagate=False: When True, the event listener should + be applied to all inheriting classes as well as the + class which is the target of this listener. + :param raw=False: When True, the "target" argument passed + to applicable event listener functions will be the + instance's :class:`.InstanceState` management + object, rather than the mapped instance itself. + + """ + + _target_class_doc = "SomeClass" + + _dispatch_target = instrumentation.ClassManager + + @classmethod + def _new_classmanager_instance(cls, class_, classmanager): + _InstanceEventsHold.populate(class_, classmanager) + + @classmethod + @util.dependencies("sqlalchemy.orm") + def _accept_with(cls, orm, target): + if isinstance(target, instrumentation.ClassManager): + return target + elif isinstance(target, mapperlib.Mapper): + return target.class_manager + elif target is orm.mapper: + return instrumentation.ClassManager + elif isinstance(target, type): + if issubclass(target, mapperlib.Mapper): + return instrumentation.ClassManager + else: + manager = instrumentation.manager_of_class(target) + if manager: + return manager + else: + return _InstanceEventsHold(target) + return None + + @classmethod + def _listen(cls, event_key, raw=False, propagate=False, **kw): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn + + if not raw: + def wrap(state, *arg, **kw): + return fn(state.obj(), *arg, **kw) + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(propagate=propagate, **kw) + + if propagate: + for mgr in target.subclass_managers(True): + event_key.with_dispatch_target(mgr).base_listen( + propagate=True) + + @classmethod + def _clear(cls): + super(InstanceEvents, cls)._clear() + _InstanceEventsHold._clear() + + def first_init(self, manager, cls): + """Called when the first instance of a particular mapping is called. + + This event is called when the ``__init__`` method of a class + is called the first time for that particular class. The event + invokes before ``__init__`` actually proceeds as well as before + the :meth:`.InstanceEvents.init` event is invoked. + + """ + + def init(self, target, args, kwargs): + """Receive an instance when its constructor is called. + + This method is only called during a userland construction of + an object, in conjunction with the object's constructor, e.g. + its ``__init__`` method. It is not called when an object is + loaded from the database; see the :meth:`.InstanceEvents.load` + event in order to intercept a database load. + + The event is called before the actual ``__init__`` constructor + of the object is called. The ``kwargs`` dictionary may be + modified in-place in order to affect what is passed to + ``__init__``. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param args: positional arguments passed to the ``__init__`` method. + This is passed as a tuple and is currently immutable. + :param kwargs: keyword arguments passed to the ``__init__`` method. + This structure *can* be altered in place. + + .. seealso:: + + :meth:`.InstanceEvents.init_failure` + + :meth:`.InstanceEvents.load` + + """ + + def init_failure(self, target, args, kwargs): + """Receive an instance when its constructor has been called, + and raised an exception. + + This method is only called during a userland construction of + an object, in conjunction with the object's constructor, e.g. + its ``__init__`` method. It is not called when an object is loaded + from the database. + + The event is invoked after an exception raised by the ``__init__`` + method is caught. After the event + is invoked, the original exception is re-raised outwards, so that + the construction of the object still raises an exception. The + actual exception and stack trace raised should be present in + ``sys.exc_info()``. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param args: positional arguments that were passed to the ``__init__`` + method. + :param kwargs: keyword arguments that were passed to the ``__init__`` + method. + + .. seealso:: + + :meth:`.InstanceEvents.init` + + :meth:`.InstanceEvents.load` + + """ + + def load(self, target, context): + """Receive an object instance after it has been created via + ``__new__``, and after initial attribute population has + occurred. + + This typically occurs when the instance is created based on + incoming result rows, and is only called once for that + instance's lifetime. + + Note that during a result-row load, this method is called upon + the first row received for this instance. Note that some + attributes and collections may or may not be loaded or even + initialized, depending on what's present in the result rows. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param context: the :class:`.QueryContext` corresponding to the + current :class:`.Query` in progress. This argument may be + ``None`` if the load does not correspond to a :class:`.Query`, + such as during :meth:`.Session.merge`. + + .. seealso:: + + :meth:`.InstanceEvents.init` + + :meth:`.InstanceEvents.refresh` + + :meth:`.SessionEvents.loaded_as_persistent` + + """ + + def refresh(self, target, context, attrs): + """Receive an object instance after one or more attributes have + been refreshed from a query. + + Contrast this to the :meth:`.InstanceEvents.load` method, which + is invoked when the object is first loaded from a query. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param context: the :class:`.QueryContext` corresponding to the + current :class:`.Query` in progress. + :param attrs: sequence of attribute names which + were populated, or None if all column-mapped, non-deferred + attributes were populated. + + .. seealso:: + + :meth:`.InstanceEvents.load` + + """ + + def refresh_flush(self, target, flush_context, attrs): + """Receive an object instance after one or more attributes have + been refreshed within the persistence of the object. + + This event is the same as :meth:`.InstanceEvents.refresh` except + it is invoked within the unit of work flush process, and the values + here typically come from the process of handling an INSERT or + UPDATE, such as via the RETURNING clause or from Python-side default + values. + + .. versionadded:: 1.0.5 + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + :param attrs: sequence of attribute names which + were populated. + + """ + + def expire(self, target, attrs): + """Receive an object instance after its attributes or some subset + have been expired. + + 'keys' is a list of attribute names. If None, the entire + state was expired. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param attrs: sequence of attribute + names which were expired, or None if all attributes were + expired. + + """ + + def pickle(self, target, state_dict): + """Receive an object instance when its associated state is + being pickled. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param state_dict: the dictionary returned by + :class:`.InstanceState.__getstate__`, containing the state + to be pickled. + + """ + + def unpickle(self, target, state_dict): + """Receive an object instance after its associated state has + been unpickled. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param state_dict: the dictionary sent to + :class:`.InstanceState.__setstate__`, containing the state + dictionary which was pickled. + + """ + + +class _EventsHold(event.RefCollection): + """Hold onto listeners against unmapped, uninstrumented classes. + + Establish _listen() for that class' mapper/instrumentation when + those objects are created for that class. + + """ + + def __init__(self, class_): + self.class_ = class_ + + @classmethod + def _clear(cls): + cls.all_holds.clear() + + class HoldEvents(object): + _dispatch_target = None + + @classmethod + def _listen(cls, event_key, raw=False, propagate=False, **kw): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, event_key.fn + + if target.class_ in target.all_holds: + collection = target.all_holds[target.class_] + else: + collection = target.all_holds[target.class_] = {} + + event.registry._stored_in_collection(event_key, target) + collection[event_key._key] = (event_key, raw, propagate) + + if propagate: + stack = list(target.class_.__subclasses__()) + while stack: + subclass = stack.pop(0) + stack.extend(subclass.__subclasses__()) + subject = target.resolve(subclass) + if subject is not None: + # we are already going through __subclasses__() + # so leave generic propagate flag False + event_key.with_dispatch_target(subject).\ + listen(raw=raw, propagate=False, **kw) + + def remove(self, event_key): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, event_key.fn + + if isinstance(target, _EventsHold): + collection = target.all_holds[target.class_] + del collection[event_key._key] + + @classmethod + def populate(cls, class_, subject): + for subclass in class_.__mro__: + if subclass in cls.all_holds: + collection = cls.all_holds[subclass] + for event_key, raw, propagate in collection.values(): + if propagate or subclass is class_: + # since we can't be sure in what order different + # classes in a hierarchy are triggered with + # populate(), we rely upon _EventsHold for all event + # assignment, instead of using the generic propagate + # flag. + event_key.with_dispatch_target(subject).\ + listen(raw=raw, propagate=False) + + +class _InstanceEventsHold(_EventsHold): + all_holds = weakref.WeakKeyDictionary() + + def resolve(self, class_): + return instrumentation.manager_of_class(class_) + + class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents): + pass + + dispatch = event.dispatcher(HoldInstanceEvents) + + +class MapperEvents(event.Events): + """Define events specific to mappings. + + e.g.:: + + from sqlalchemy import event + + def my_before_insert_listener(mapper, connection, target): + # execute a stored procedure upon INSERT, + # apply the value to the row to be inserted + target.calculated_value = connection.scalar( + "select my_special_function(%d)" + % target.special_number) + + # associate the listener function with SomeClass, + # to execute during the "before_insert" hook + event.listen( + SomeClass, 'before_insert', my_before_insert_listener) + + Available targets include: + + * mapped classes + * unmapped superclasses of mapped or to-be-mapped classes + (using the ``propagate=True`` flag) + * :class:`.Mapper` objects + * the :class:`.Mapper` class itself and the :func:`.mapper` + function indicate listening for all mappers. + + .. versionchanged:: 0.8.0 mapper events can be associated with + unmapped superclasses of mapped classes. + + Mapper events provide hooks into critical sections of the + mapper, including those related to object instrumentation, + object loading, and object persistence. In particular, the + persistence methods :meth:`~.MapperEvents.before_insert`, + and :meth:`~.MapperEvents.before_update` are popular + places to augment the state being persisted - however, these + methods operate with several significant restrictions. The + user is encouraged to evaluate the + :meth:`.SessionEvents.before_flush` and + :meth:`.SessionEvents.after_flush` methods as more + flexible and user-friendly hooks in which to apply + additional database state during a flush. + + When using :class:`.MapperEvents`, several modifiers are + available to the :func:`.event.listen` function. + + :param propagate=False: When True, the event listener should + be applied to all inheriting mappers and/or the mappers of + inheriting classes, as well as any + mapper which is the target of this listener. + :param raw=False: When True, the "target" argument passed + to applicable event listener functions will be the + instance's :class:`.InstanceState` management + object, rather than the mapped instance itself. + :param retval=False: when True, the user-defined event function + must have a return value, the purpose of which is either to + control subsequent event propagation, or to otherwise alter + the operation in progress by the mapper. Possible return + values are: + + * ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event + processing normally. + * ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent + event handlers in the chain. + * other values - the return value specified by specific listeners. + + """ + + _target_class_doc = "SomeClass" + _dispatch_target = mapperlib.Mapper + + @classmethod + def _new_mapper_instance(cls, class_, mapper): + _MapperEventsHold.populate(class_, mapper) + + @classmethod + @util.dependencies("sqlalchemy.orm") + def _accept_with(cls, orm, target): + if target is orm.mapper: + return mapperlib.Mapper + elif isinstance(target, type): + if issubclass(target, mapperlib.Mapper): + return target + else: + mapper = _mapper_or_none(target) + if mapper is not None: + return mapper + else: + return _MapperEventsHold(target) + else: + return target + + @classmethod + def _listen( + cls, event_key, raw=False, retval=False, propagate=False, **kw): + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn + + if identifier in ("before_configured", "after_configured") and \ + target is not mapperlib.Mapper: + util.warn( + "'before_configured' and 'after_configured' ORM events " + "only invoke with the mapper() function or Mapper class " + "as the target.") + + if not raw or not retval: + if not raw: + meth = getattr(cls, identifier) + try: + target_index = \ + inspect_getargspec(meth)[0].index('target') - 1 + except ValueError: + target_index = None + + def wrap(*arg, **kw): + if not raw and target_index is not None: + arg = list(arg) + arg[target_index] = arg[target_index].obj() + if not retval: + fn(*arg, **kw) + return interfaces.EXT_CONTINUE + else: + return fn(*arg, **kw) + event_key = event_key.with_wrapper(wrap) + + if propagate: + for mapper in target.self_and_descendants: + event_key.with_dispatch_target(mapper).base_listen( + propagate=True, **kw) + else: + event_key.base_listen(**kw) + + @classmethod + def _clear(cls): + super(MapperEvents, cls)._clear() + _MapperEventsHold._clear() + + def instrument_class(self, mapper, class_): + r"""Receive a class when the mapper is first constructed, + before instrumentation is applied to the mapped class. + + This event is the earliest phase of mapper construction. + Most attributes of the mapper are not yet initialized. + + This listener can either be applied to the :class:`.Mapper` + class overall, or to any un-mapped class which serves as a base + for classes that will be mapped (using the ``propagate=True`` flag):: + + Base = declarative_base() + + @event.listens_for(Base, "instrument_class", propagate=True) + def on_new_class(mapper, cls_): + " ... " + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param class\_: the mapped class. + + """ + + def mapper_configured(self, mapper, class_): + r"""Called when a specific mapper has completed its own configuration + within the scope of the :func:`.configure_mappers` call. + + The :meth:`.MapperEvents.mapper_configured` event is invoked + for each mapper that is encountered when the + :func:`.orm.configure_mappers` function proceeds through the current + list of not-yet-configured mappers. + :func:`.orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + When the event is called, the mapper should be in its final + state, but **not including backrefs** that may be invoked from + other mappers; they might still be pending within the + configuration operation. Bidirectional relationships that + are instead configured via the + :paramref:`.orm.relationship.back_populates` argument + *will* be fully available, since this style of relationship does not + rely upon other possibly-not-configured mappers to know that they + exist. + + For an event that is guaranteed to have **all** mappers ready + to go including backrefs that are defined only on other + mappings, use the :meth:`.MapperEvents.after_configured` + event; this event invokes only after all known mappings have been + fully configured. + + The :meth:`.MapperEvents.mapper_configured` event, unlike + :meth:`.MapperEvents.before_configured` or + :meth:`.MapperEvents.after_configured`, + is called for each mapper/class individually, and the mapper is + passed to the event itself. It also is called exactly once for + a particular mapper. The event is therefore useful for + configurational steps that benefit from being invoked just once + on a specific mapper basis, which don't require that "backref" + configurations are necessarily ready yet. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param class\_: the mapped class. + + .. seealso:: + + :meth:`.MapperEvents.before_configured` + + :meth:`.MapperEvents.after_configured` + + """ + # TODO: need coverage for this event + + def before_configured(self): + """Called before a series of mappers have been configured. + + The :meth:`.MapperEvents.before_configured` event is invoked + each time the :func:`.orm.configure_mappers` function is + invoked, before the function has done any of its work. + :func:`.orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + This event can **only** be applied to the :class:`.Mapper` class + or :func:`.mapper` function, and not to individual mappings or + mapped classes. It is only invoked for all mappings as a whole:: + + from sqlalchemy.orm import mapper + + @event.listens_for(mapper, "before_configured") + def go(): + # ... + + Constrast this event to :meth:`.MapperEvents.after_configured`, + which is invoked after the series of mappers has been configured, + as well as :meth:`.MapperEvents.mapper_configured`, which is invoked + on a per-mapper basis as each one is configured to the extent possible. + + Theoretically this event is called once per + application, but is actually called any time new mappers + are to be affected by a :func:`.orm.configure_mappers` + call. If new mappings are constructed after existing ones have + already been used, this event will likely be called again. To ensure + that a particular event is only called once and no further, the + ``once=True`` argument (new in 0.9.4) can be applied:: + + from sqlalchemy.orm import mapper + + @event.listens_for(mapper, "before_configured", once=True) + def go(): + # ... + + + .. versionadded:: 0.9.3 + + + .. seealso:: + + :meth:`.MapperEvents.mapper_configured` + + :meth:`.MapperEvents.after_configured` + + """ + + def after_configured(self): + """Called after a series of mappers have been configured. + + The :meth:`.MapperEvents.after_configured` event is invoked + each time the :func:`.orm.configure_mappers` function is + invoked, after the function has completed its work. + :func:`.orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + Contrast this event to the :meth:`.MapperEvents.mapper_configured` + event, which is called on a per-mapper basis while the configuration + operation proceeds; unlike that event, when this event is invoked, + all cross-configurations (e.g. backrefs) will also have been made + available for any mappers that were pending. + Also constrast to :meth:`.MapperEvents.before_configured`, + which is invoked before the series of mappers has been configured. + + This event can **only** be applied to the :class:`.Mapper` class + or :func:`.mapper` function, and not to individual mappings or + mapped classes. It is only invoked for all mappings as a whole:: + + from sqlalchemy.orm import mapper + + @event.listens_for(mapper, "after_configured") + def go(): + # ... + + Theoretically this event is called once per + application, but is actually called any time new mappers + have been affected by a :func:`.orm.configure_mappers` + call. If new mappings are constructed after existing ones have + already been used, this event will likely be called again. To ensure + that a particular event is only called once and no further, the + ``once=True`` argument (new in 0.9.4) can be applied:: + + from sqlalchemy.orm import mapper + + @event.listens_for(mapper, "after_configured", once=True) + def go(): + # ... + + .. seealso:: + + :meth:`.MapperEvents.mapper_configured` + + :meth:`.MapperEvents.before_configured` + + """ + + def before_insert(self, mapper, connection, target): + """Receive an object instance before an INSERT statement + is emitted corresponding to that instance. + + This event is used to modify local, non-object related + attributes on the instance before an INSERT occurs, as well + as to emit additional SQL statements on the given + connection. + + The event is often called for a batch of objects of the + same class before their INSERT statements are emitted at + once in a later step. In the extremely rare case that + this is not desirable, the :func:`.mapper` can be + configured with ``batch=False``, which will cause + batches of instances to be broken up into individual + (and more poorly performing) event->persist->event + steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param connection: the :class:`.Connection` being used to + emit INSERT statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def after_insert(self, mapper, connection, target): + """Receive an object instance after an INSERT statement + is emitted corresponding to that instance. + + This event is used to modify in-Python-only + state on the instance after an INSERT occurs, as well + as to emit additional SQL statements on the given + connection. + + The event is often called for a batch of objects of the + same class after their INSERT statements have been + emitted at once in a previous step. In the extremely + rare case that this is not desirable, the + :func:`.mapper` can be configured with ``batch=False``, + which will cause batches of instances to be broken up + into individual (and more poorly performing) + event->persist->event steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param connection: the :class:`.Connection` being used to + emit INSERT statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def before_update(self, mapper, connection, target): + """Receive an object instance before an UPDATE statement + is emitted corresponding to that instance. + + This event is used to modify local, non-object related + attributes on the instance before an UPDATE occurs, as well + as to emit additional SQL statements on the given + connection. + + This method is called for all instances that are + marked as "dirty", *even those which have no net changes + to their column-based attributes*. An object is marked + as dirty when any of its column-based attributes have a + "set attribute" operation called or when any of its + collections are modified. If, at update time, no + column-based attributes have any net changes, no UPDATE + statement will be issued. This means that an instance + being sent to :meth:`~.MapperEvents.before_update` is + *not* a guarantee that an UPDATE statement will be + issued, although you can affect the outcome here by + modifying attributes so that a net change in value does + exist. + + To detect if the column-based attributes on the object have net + changes, and will therefore generate an UPDATE statement, use + ``object_session(instance).is_modified(instance, + include_collections=False)``. + + The event is often called for a batch of objects of the + same class before their UPDATE statements are emitted at + once in a later step. In the extremely rare case that + this is not desirable, the :func:`.mapper` can be + configured with ``batch=False``, which will cause + batches of instances to be broken up into individual + (and more poorly performing) event->persist->event + steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param connection: the :class:`.Connection` being used to + emit UPDATE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def after_update(self, mapper, connection, target): + """Receive an object instance after an UPDATE statement + is emitted corresponding to that instance. + + This event is used to modify in-Python-only + state on the instance after an UPDATE occurs, as well + as to emit additional SQL statements on the given + connection. + + This method is called for all instances that are + marked as "dirty", *even those which have no net changes + to their column-based attributes*, and for which + no UPDATE statement has proceeded. An object is marked + as dirty when any of its column-based attributes have a + "set attribute" operation called or when any of its + collections are modified. If, at update time, no + column-based attributes have any net changes, no UPDATE + statement will be issued. This means that an instance + being sent to :meth:`~.MapperEvents.after_update` is + *not* a guarantee that an UPDATE statement has been + issued. + + To detect if the column-based attributes on the object have net + changes, and therefore resulted in an UPDATE statement, use + ``object_session(instance).is_modified(instance, + include_collections=False)``. + + The event is often called for a batch of objects of the + same class after their UPDATE statements have been emitted at + once in a previous step. In the extremely rare case that + this is not desirable, the :func:`.mapper` can be + configured with ``batch=False``, which will cause + batches of instances to be broken up into individual + (and more poorly performing) event->persist->event + steps. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param connection: the :class:`.Connection` being used to + emit UPDATE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being persisted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def before_delete(self, mapper, connection, target): + """Receive an object instance before a DELETE statement + is emitted corresponding to that instance. + + This event is used to emit additional SQL statements on + the given connection as well as to perform application + specific bookkeeping related to a deletion event. + + The event is often called for a batch of objects of the + same class before their DELETE statements are emitted at + once in a later step. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param connection: the :class:`.Connection` being used to + emit DELETE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being deleted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + def after_delete(self, mapper, connection, target): + """Receive an object instance after a DELETE statement + has been emitted corresponding to that instance. + + This event is used to emit additional SQL statements on + the given connection as well as to perform application + specific bookkeeping related to a deletion event. + + The event is often called for a batch of objects of the + same class after their DELETE statements have been emitted at + once in a previous step. + + .. warning:: + + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. + + :param mapper: the :class:`.Mapper` which is the target + of this event. + :param connection: the :class:`.Connection` being used to + emit DELETE statements for this instance. This + provides a handle into the current transaction on the + target database specific to this instance. + :param target: the mapped instance being deleted. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + + """ + + +class _MapperEventsHold(_EventsHold): + all_holds = weakref.WeakKeyDictionary() + + def resolve(self, class_): + return _mapper_or_none(class_) + + class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents): + pass + + dispatch = event.dispatcher(HoldMapperEvents) + + +class SessionEvents(event.Events): + """Define events specific to :class:`.Session` lifecycle. + + e.g.:: + + from sqlalchemy import event + from sqlalchemy.orm import sessionmaker + + def my_before_commit(session): + print "before commit!" + + Session = sessionmaker() + + event.listen(Session, "before_commit", my_before_commit) + + The :func:`~.event.listen` function will accept + :class:`.Session` objects as well as the return result + of :class:`~.sessionmaker()` and :class:`~.scoped_session()`. + + Additionally, it accepts the :class:`.Session` class which + will apply listeners to all :class:`.Session` instances + globally. + + """ + + _target_class_doc = "SomeSessionOrFactory" + + _dispatch_target = Session + + @classmethod + def _accept_with(cls, target): + if isinstance(target, scoped_session): + + target = target.session_factory + if not isinstance(target, sessionmaker) and \ + ( + not isinstance(target, type) or + not issubclass(target, Session) + ): + raise exc.ArgumentError( + "Session event listen on a scoped_session " + "requires that its creation callable " + "is associated with the Session class.") + + if isinstance(target, sessionmaker): + return target.class_ + elif isinstance(target, type): + if issubclass(target, scoped_session): + return Session + elif issubclass(target, Session): + return target + elif isinstance(target, Session): + return target + else: + return None + + def after_transaction_create(self, session, transaction): + """Execute when a new :class:`.SessionTransaction` is created. + + This event differs from :meth:`~.SessionEvents.after_begin` + in that it occurs for each :class:`.SessionTransaction` + overall, as opposed to when transactions are begun + on individual database connections. It is also invoked + for nested transactions and subtransactions, and is always + matched by a corresponding + :meth:`~.SessionEvents.after_transaction_end` event + (assuming normal operation of the :class:`.Session`). + + :param session: the target :class:`.Session`. + :param transaction: the target :class:`.SessionTransaction`. + + To detect if this is the outermost + :class:`.SessionTransaction`, as opposed to a "subtransaction" or a + SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute + is ``None``:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_create(session, transaction): + if transaction.parent is None: + # work with top-level transaction + + To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the + :attr:`.SessionTransaction.nested` attribute:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_create(session, transaction): + if transaction.nested: + # work with SAVEPOINT transaction + + + .. seealso:: + + :class:`.SessionTransaction` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def after_transaction_end(self, session, transaction): + """Execute when the span of a :class:`.SessionTransaction` ends. + + This event differs from :meth:`~.SessionEvents.after_commit` + in that it corresponds to all :class:`.SessionTransaction` + objects in use, including those for nested transactions + and subtransactions, and is always matched by a corresponding + :meth:`~.SessionEvents.after_transaction_create` event. + + :param session: the target :class:`.Session`. + :param transaction: the target :class:`.SessionTransaction`. + + To detect if this is the outermost + :class:`.SessionTransaction`, as opposed to a "subtransaction" or a + SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute + is ``None``:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_end(session, transaction): + if transaction.parent is None: + # work with top-level transaction + + To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the + :attr:`.SessionTransaction.nested` attribute:: + + @event.listens_for(session, "after_transaction_create") + def after_transaction_end(session, transaction): + if transaction.nested: + # work with SAVEPOINT transaction + + + .. seealso:: + + :class:`.SessionTransaction` + + :meth:`~.SessionEvents.after_transaction_create` + + """ + + def before_commit(self, session): + """Execute before commit is called. + + .. note:: + + The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush, + that is, the :class:`.Session` can emit SQL to the database + many times within the scope of a transaction. + For interception of these events, use the + :meth:`~.SessionEvents.before_flush`, + :meth:`~.SessionEvents.after_flush`, or + :meth:`~.SessionEvents.after_flush_postexec` + events. + + :param session: The target :class:`.Session`. + + .. seealso:: + + :meth:`~.SessionEvents.after_commit` + + :meth:`~.SessionEvents.after_begin` + + :meth:`~.SessionEvents.after_transaction_create` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def after_commit(self, session): + """Execute after a commit has occurred. + + .. note:: + + The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush, + that is, the :class:`.Session` can emit SQL to the database + many times within the scope of a transaction. + For interception of these events, use the + :meth:`~.SessionEvents.before_flush`, + :meth:`~.SessionEvents.after_flush`, or + :meth:`~.SessionEvents.after_flush_postexec` + events. + + .. note:: + + The :class:`.Session` is not in an active transaction + when the :meth:`~.SessionEvents.after_commit` event is invoked, + and therefore can not emit SQL. To emit SQL corresponding to + every transaction, use the :meth:`~.SessionEvents.before_commit` + event. + + :param session: The target :class:`.Session`. + + .. seealso:: + + :meth:`~.SessionEvents.before_commit` + + :meth:`~.SessionEvents.after_begin` + + :meth:`~.SessionEvents.after_transaction_create` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def after_rollback(self, session): + """Execute after a real DBAPI rollback has occurred. + + Note that this event only fires when the *actual* rollback against + the database occurs - it does *not* fire each time the + :meth:`.Session.rollback` method is called, if the underlying + DBAPI transaction has already been rolled back. In many + cases, the :class:`.Session` will not be in + an "active" state during this event, as the current + transaction is not valid. To acquire a :class:`.Session` + which is active after the outermost rollback has proceeded, + use the :meth:`.SessionEvents.after_soft_rollback` event, checking the + :attr:`.Session.is_active` flag. + + :param session: The target :class:`.Session`. + + """ + + def after_soft_rollback(self, session, previous_transaction): + """Execute after any rollback has occurred, including "soft" + rollbacks that don't actually emit at the DBAPI level. + + This corresponds to both nested and outer rollbacks, i.e. + the innermost rollback that calls the DBAPI's + rollback() method, as well as the enclosing rollback + calls that only pop themselves from the transaction stack. + + The given :class:`.Session` can be used to invoke SQL and + :meth:`.Session.query` operations after an outermost rollback + by first checking the :attr:`.Session.is_active` flag:: + + @event.listens_for(Session, "after_soft_rollback") + def do_something(session, previous_transaction): + if session.is_active: + session.execute("select * from some_table") + + :param session: The target :class:`.Session`. + :param previous_transaction: The :class:`.SessionTransaction` + transactional marker object which was just closed. The current + :class:`.SessionTransaction` for the given :class:`.Session` is + available via the :attr:`.Session.transaction` attribute. + + .. versionadded:: 0.7.3 + + """ + + def before_flush(self, session, flush_context, instances): + """Execute before flush process has started. + + :param session: The target :class:`.Session`. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + :param instances: Usually ``None``, this is the collection of + objects which can be passed to the :meth:`.Session.flush` method + (note this usage is deprecated). + + .. seealso:: + + :meth:`~.SessionEvents.after_flush` + + :meth:`~.SessionEvents.after_flush_postexec` + + :ref:`session_persistence_events` + + """ + + def after_flush(self, session, flush_context): + """Execute after flush has completed, but before commit has been + called. + + Note that the session's state is still in pre-flush, i.e. 'new', + 'dirty', and 'deleted' lists still show pre-flush state as well + as the history settings on instance attributes. + + :param session: The target :class:`.Session`. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + + .. seealso:: + + :meth:`~.SessionEvents.before_flush` + + :meth:`~.SessionEvents.after_flush_postexec` + + :ref:`session_persistence_events` + + """ + + def after_flush_postexec(self, session, flush_context): + """Execute after flush has completed, and after the post-exec + state occurs. + + This will be when the 'new', 'dirty', and 'deleted' lists are in + their final state. An actual commit() may or may not have + occurred, depending on whether or not the flush started its own + transaction or participated in a larger transaction. + + :param session: The target :class:`.Session`. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + + + .. seealso:: + + :meth:`~.SessionEvents.before_flush` + + :meth:`~.SessionEvents.after_flush` + + :ref:`session_persistence_events` + + """ + + def after_begin(self, session, transaction, connection): + """Execute after a transaction is begun on a connection + + :param session: The target :class:`.Session`. + :param transaction: The :class:`.SessionTransaction`. + :param connection: The :class:`~.engine.Connection` object + which will be used for SQL statements. + + .. seealso:: + + :meth:`~.SessionEvents.before_commit` + + :meth:`~.SessionEvents.after_commit` + + :meth:`~.SessionEvents.after_transaction_create` + + :meth:`~.SessionEvents.after_transaction_end` + + """ + + def before_attach(self, session, instance): + """Execute before an instance is attached to a session. + + This is called before an add, delete or merge causes + the object to be part of the session. + + .. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach` + now fires off after the item is part of the session. + :meth:`.before_attach` is provided for those cases where + the item should not yet be part of the session state. + + .. seealso:: + + :meth:`~.SessionEvents.after_attach` + + :ref:`session_lifecycle_events` + + """ + + def after_attach(self, session, instance): + """Execute after an instance is attached to a session. + + This is called after an add, delete or merge. + + .. note:: + + As of 0.8, this event fires off *after* the item + has been fully associated with the session, which is + different than previous releases. For event + handlers that require the object not yet + be part of session state (such as handlers which + may autoflush while the target object is not + yet complete) consider the + new :meth:`.before_attach` event. + + .. seealso:: + + :meth:`~.SessionEvents.before_attach` + + :ref:`session_lifecycle_events` + + """ + + @event._legacy_signature("0.9", + ["session", "query", "query_context", "result"], + lambda update_context: ( + update_context.session, + update_context.query, + update_context.context, + update_context.result)) + def after_bulk_update(self, update_context): + """Execute after a bulk update operation to the session. + + This is called as a result of the :meth:`.Query.update` method. + + :param update_context: an "update context" object which contains + details about the update, including these attributes: + + * ``session`` - the :class:`.Session` involved + * ``query`` -the :class:`.Query` object that this update operation + was called upon. + * ``context`` The :class:`.QueryContext` object, corresponding + to the invocation of an ORM query. + * ``result`` the :class:`.ResultProxy` returned as a result of the + bulk UPDATE operation. + + + """ + + @event._legacy_signature("0.9", + ["session", "query", "query_context", "result"], + lambda delete_context: ( + delete_context.session, + delete_context.query, + delete_context.context, + delete_context.result)) + def after_bulk_delete(self, delete_context): + """Execute after a bulk delete operation to the session. + + This is called as a result of the :meth:`.Query.delete` method. + + :param delete_context: a "delete context" object which contains + details about the update, including these attributes: + + * ``session`` - the :class:`.Session` involved + * ``query`` -the :class:`.Query` object that this update operation + was called upon. + * ``context`` The :class:`.QueryContext` object, corresponding + to the invocation of an ORM query. + * ``result`` the :class:`.ResultProxy` returned as a result of the + bulk DELETE operation. + + + """ + + def transient_to_pending(self, session, instance): + """Intercept the "transient to pending" transition for a specific object. + + This event is a specialization of the + :meth:`.SessionEvents.after_attach` event which is only invoked + for this specific transition. It is invoked typically during the + :meth:`.Session.add` call. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def pending_to_transient(self, session, instance): + """Intercept the "pending to transient" transition for a specific object. + + This less common transition occurs when an pending object that has + not been flushed is evicted from the session; this can occur + when the :meth:`.Session.rollback` method rolls back the transaction, + or when the :meth:`.Session.expunge` method is used. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def persistent_to_transient(self, session, instance): + """Intercept the "persistent to transient" transition for a specific object. + + This less common transition occurs when an pending object that has + has been flushed is evicted from the session; this can occur + when the :meth:`.Session.rollback` method rolls back the transaction. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def pending_to_persistent(self, session, instance): + """Intercept the "pending to persistent"" transition for a specific object. + + This event is invoked within the flush process, and is + similar to scanning the :attr:`.Session.new` collection within + the :meth:`.SessionEvents.after_flush` event. However, in this + case the object has already been moved to the persistent state + when the event is called. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def detached_to_persistent(self, session, instance): + """Intercept the "detached to persistent" transition for a specific object. + + This event is a specialization of the + :meth:`.SessionEvents.after_attach` event which is only invoked + for this specific transition. It is invoked typically during the + :meth:`.Session.add` call, as well as during the + :meth:`.Session.delete` call if the object was not previously + associated with the + :class:`.Session` (note that an object marked as "deleted" remains + in the "persistent" state until the flush proceeds). + + .. note:: + + If the object becomes persistent as part of a call to + :meth:`.Session.delete`, the object is **not** yet marked as + deleted when this event is called. To detect deleted objects, + check the ``deleted`` flag sent to the + :meth:`.SessionEvents.persistent_to_detached` to event after the + flush proceeds, or check the :attr:`.Session.deleted` collection + within the :meth:`.SessionEvents.before_flush` event if deleted + objects need to be intercepted before the flush. + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def loaded_as_persistent(self, session, instance): + """Intercept the "loaded as persistent" transition for a specific object. + + This event is invoked within the ORM loading process, and is invoked + very similarly to the :meth:`.InstanceEvents.load` event. However, + the event here is linkable to a :class:`.Session` class or instance, + rather than to a mapper or class hierarchy, and integrates + with the other session lifecycle events smoothly. The object + is guaranteed to be present in the session's identity map when + this event is called. + + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def persistent_to_deleted(self, session, instance): + """Intercept the "persistent to deleted" transition for a specific object. + + This event is invoked when a persistent object's identity + is deleted from the database within a flush, however the object + still remains associated with the :class:`.Session` until the + transaction completes. + + If the transaction is rolled back, the object moves again + to the persistent state, and the + :meth:`.SessionEvents.deleted_to_persistent` event is called. + If the transaction is committed, the object becomes detached, + which will emit the :meth:`.SessionEvents.deleted_to_detached` + event. + + Note that while the :meth:`.Session.delete` method is the primary + public interface to mark an object as deleted, many objects + get deleted due to cascade rules, which are not always determined + until flush time. Therefore, there's no way to catch + every object that will be deleted until the flush has proceeded. + the :meth:`.SessionEvents.persistent_to_deleted` event is therefore + invoked at the end of a flush. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def deleted_to_persistent(self, session, instance): + """Intercept the "deleted to persistent" transition for a specific object. + + This transition occurs only when an object that's been deleted + successfully in a flush is restored due to a call to + :meth:`.Session.rollback`. The event is not called under + any other circumstances. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def deleted_to_detached(self, session, instance): + """Intercept the "deleted to detached" transition for a specific object. + + This event is invoked when a deleted object is evicted + from the session. The typical case when this occurs is when + the transaction for a :class:`.Session` in which the object + was deleted is committed; the object moves from the deleted + state to the detached state. + + It is also invoked for objects that were deleted in a flush + when the :meth:`.Session.expunge_all` or :meth:`.Session.close` + events are called, as well as if the object is individually + expunged from its deleted state via :meth:`.Session.expunge`. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + def persistent_to_detached(self, session, instance): + """Intercept the "persistent to detached" transition for a specific object. + + This event is invoked when a persistent object is evicted + from the session. There are many conditions that cause this + to happen, including: + + * using a method such as :meth:`.Session.expunge` + or :meth:`.Session.close` + + * Calling the :meth:`.Session.rollback` method, when the object + was part of an INSERT statement for that session's transaction + + + :param session: target :class:`.Session` + + :param instance: the ORM-mapped instance being operated upon. + + :param deleted: boolean. If True, indicates this object moved + to the detached state because it was marked as deleted and flushed. + + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_lifecycle_events` + + """ + + +class AttributeEvents(event.Events): + """Define events for object attributes. + + These are typically defined on the class-bound descriptor for the + target class. + + e.g.:: + + from sqlalchemy import event + + def my_append_listener(target, value, initiator): + print "received append event for target: %s" % target + + event.listen(MyClass.collection, 'append', my_append_listener) + + Listeners have the option to return a possibly modified version + of the value, when the ``retval=True`` flag is passed + to :func:`~.event.listen`:: + + def validate_phone(target, value, oldvalue, initiator): + "Strip non-numeric characters from a phone number" + + return re.sub(r'\D', '', value) + + # setup listener on UserContact.phone attribute, instructing + # it to use the return value + listen(UserContact.phone, 'set', validate_phone, retval=True) + + A validation function like the above can also raise an exception + such as :exc:`ValueError` to halt the operation. + + Several modifiers are available to the :func:`~.event.listen` function. + + :param active_history=False: When True, indicates that the + "set" event would like to receive the "old" value being + replaced unconditionally, even if this requires firing off + database loads. Note that ``active_history`` can also be + set directly via :func:`.column_property` and + :func:`.relationship`. + + :param propagate=False: When True, the listener function will + be established not just for the class attribute given, but + for attributes of the same name on all current subclasses + of that class, as well as all future subclasses of that + class, using an additional listener that listens for + instrumentation events. + :param raw=False: When True, the "target" argument to the + event will be the :class:`.InstanceState` management + object, rather than the mapped instance itself. + :param retval=False: when True, the user-defined event + listening must return the "value" argument from the + function. This gives the listening function the opportunity + to change the value that is ultimately used for a "set" + or "append" event. + + """ + + _target_class_doc = "SomeClass.some_attribute" + _dispatch_target = QueryableAttribute + + @staticmethod + def _set_dispatch(cls, dispatch_cls): + dispatch = event.Events._set_dispatch(cls, dispatch_cls) + dispatch_cls._active_history = False + return dispatch + + @classmethod + def _accept_with(cls, target): + # TODO: coverage + if isinstance(target, interfaces.MapperProperty): + return getattr(target.parent.class_, target.key) + else: + return target + + @classmethod + def _listen(cls, event_key, active_history=False, + raw=False, retval=False, + propagate=False): + + target, identifier, fn = \ + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn + + if active_history: + target.dispatch._active_history = True + + if not raw or not retval: + def wrap(target, value, *arg): + if not raw: + target = target.obj() + if not retval: + fn(target, value, *arg) + return value + else: + return fn(target, value, *arg) + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(propagate=propagate) + + if propagate: + manager = instrumentation.manager_of_class(target.class_) + + for mgr in manager.subclass_managers(True): + event_key.with_dispatch_target( + mgr[target.key]).base_listen(propagate=True) + + def append(self, target, value, initiator): + """Receive a collection append event. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value being appended. If this listener + is registered with ``retval=True``, the listener + function must return this value, or a new value which + replaces it. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation. + + .. versionchanged:: 0.9.0 the ``initiator`` argument is now + passed as a :class:`.attributes.Event` object, and may be + modified by backref handlers within a chain of backref-linked + events. + + :return: if the event was registered with ``retval=True``, + the given value, or a new effective value, should be returned. + + """ + + def remove(self, target, value, initiator): + """Receive a collection remove event. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value being removed. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation. + + .. versionchanged:: 0.9.0 the ``initiator`` argument is now + passed as a :class:`.attributes.Event` object, and may be + modified by backref handlers within a chain of backref-linked + events. + + :return: No return value is defined for this event. + """ + + def set(self, target, value, oldvalue, initiator): + """Receive a scalar set event. + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value being set. If this listener + is registered with ``retval=True``, the listener + function must return this value, or a new value which + replaces it. + :param oldvalue: the previous value being replaced. This + may also be the symbol ``NEVER_SET`` or ``NO_VALUE``. + If the listener is registered with ``active_history=True``, + the previous value of the attribute will be loaded from + the database if the existing value is currently unloaded + or expired. + :param initiator: An instance of :class:`.attributes.Event` + representing the initiation of the event. May be modified + from its original value by backref handlers in order to control + chained event propagation. + + .. versionchanged:: 0.9.0 the ``initiator`` argument is now + passed as a :class:`.attributes.Event` object, and may be + modified by backref handlers within a chain of backref-linked + events. + + :return: if the event was registered with ``retval=True``, + the given value, or a new effective value, should be returned. + + """ + + def init_scalar(self, target, value, dict_): + """Receive a scalar "init" event. + + This event is invoked when an uninitialized, unpersisted scalar + attribute is accessed. A value of ``None`` is typically returned + in this case; no changes are made to the object's state. + + The event handler can alter this behavior in two ways. + One is that a value other than ``None`` may be returned. The other + is that the value may be established as part of the object's state, + which will also have the effect that it is persisted. + + Typical use is to establish a specific default value of an attribute + upon access:: + + SOME_CONSTANT = 3.1415926 + + @event.listens_for( + MyClass.some_attribute, "init_scalar", + retval=True, propagate=True) + def _init_some_attribute(target, dict_, value): + dict_['some_attribute'] = SOME_CONSTANT + return SOME_CONSTANT + + Above, we initialize the attribute ``MyClass.some_attribute`` to the + value of ``SOME_CONSTANT``. The above code includes the following + features: + + * By setting the value ``SOME_CONSTANT`` in the given ``dict_``, + we indicate that the value is to be persisted to the database. + **The given value is only persisted to the database if we + explicitly associate it with the object**. The ``dict_`` given + is the ``__dict__`` element of the mapped object, assuming the + default attribute instrumentation system is in place. + + * By establishing the ``retval=True`` flag, the value we return + from the function will be returned by the attribute getter. + Without this flag, the event is assumed to be a passive observer + and the return value of our function is ignored. + + * The ``propagate=True`` flag is significant if the mapped class + includes inheriting subclasses, which would also make use of this + event listener. Without this flag, an inheriting subclass will + not use our event handler. + + When we establish the value in the given dictionary, the value will + be used in the INSERT statement established by the unit of work. + Normally, the default returned value of ``None`` is not established as + part of the object, to avoid the issue of mutations occurring to the + object in response to a normally passive "get" operation, and also + sidesteps the issue of whether or not the :meth:`.AttributeEvents.set` + event should be awkwardly fired off during an attribute access + operation. This does not impact the INSERT operation since the + ``None`` value matches the value of ``NULL`` that goes into the + database in any case; note that ``None`` is skipped during the INSERT + to ensure that column and SQL-level default functions can fire off. + + The attribute set event :meth:`.AttributeEvents.set` as well as the + related validation feature provided by :obj:`.orm.validates` is + **not** invoked when we apply our value to the given ``dict_``. To + have these events to invoke in response to our newly generated + value, apply the value to the given object as a normal attribute + set operation:: + + SOME_CONSTANT = 3.1415926 + + @event.listens_for( + MyClass.some_attribute, "init_scalar", + retval=True, propagate=True) + def _init_some_attribute(target, dict_, value): + # will also fire off attribute set events + target.some_attribute = SOME_CONSTANT + return SOME_CONSTANT + + When multiple listeners are set up, the generation of the value + is "chained" from one listener to the next by passing the value + returned by the previous listener that specifies ``retval=True`` + as the ``value`` argument of the next listener. + + The :meth:`.AttributeEvents.init_scalar` event may be used to + extract values from the default values and/or callables established on + mapped :class:`.Column` objects. See the "active column defaults" + example in :ref:`examples_instrumentation` for an example of this. + + .. versionadded:: 1.1 + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param value: the value that is to be returned before this event + listener were invoked. This value begins as the value ``None``, + however will be the return value of the previous event handler + function if multiple listeners are present. + :param dict_: the attribute dictionary of this mapped object. + This is normally the ``__dict__`` of the object, but in all cases + represents the destination that the attribute system uses to get + at the actual value of this attribute. Placing the value in this + dictionary has the effect that the value will be used in the + INSERT statement generated by the unit of work. + + + .. seealso:: + + :ref:`examples_instrumentation` - see the + ``active_column_defaults.py`` example. + + """ + + def init_collection(self, target, collection, collection_adapter): + """Receive a 'collection init' event. + + This event is triggered for a collection-based attribute, when + the initial "empty collection" is first generated for a blank + attribute, as well as for when the collection is replaced with + a new one, such as via a set event. + + E.g., given that ``User.addresses`` is a relationship-based + collection, the event is triggered here:: + + u1 = User() + u1.addresses.append(a1) # <- new collection + + and also during replace operations:: + + u1.addresses = [a2, a3] # <- new collection + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param collection: the new collection. This will always be generated + from what was specified as + :paramref:`.RelationshipProperty.collection_class`, and will always + be empty. + :param collection_adpater: the :class:`.CollectionAdapter` that will + mediate internal access to the collection. + + .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection` + and :meth:`.AttributeEvents.dispose_collection` events supersede + the :class:`.collection.linker` hook. + + """ + + def dispose_collection(self, target, collection, collection_adpater): + """Receive a 'collection dispose' event. + + This event is triggered for a collection-based attribute when + a collection is replaced, that is:: + + u1.addresses.append(a1) + + u1.addresses = [a2, a3] # <- old collection is disposed + + The mechanics of the event will typically include that the given + collection is empty, even if it stored objects while being replaced. + + .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection` + and :meth:`.AttributeEvents.dispose_collection` events supersede + the :class:`.collection.linker` hook. + + """ + + +class QueryEvents(event.Events): + """Represent events within the construction of a :class:`.Query` object. + + The events here are intended to be used with an as-yet-unreleased + inspection system for :class:`.Query`. Some very basic operations + are possible now, however the inspection system is intended to allow + complex query manipulations to be automated. + + .. versionadded:: 1.0.0 + + """ + + _target_class_doc = "SomeQuery" + _dispatch_target = Query + + def before_compile(self, query): + """Receive the :class:`.Query` object before it is composed into a + core :class:`.Select` object. + + This event is intended to allow changes to the query given:: + + @event.listens_for(Query, "before_compile", retval=True) + def no_deleted(query): + for desc in query.column_descriptions: + if desc['type'] is User: + entity = desc['entity'] + query = query.filter(entity.deleted == False) + return query + + The event should normally be listened with the ``retval=True`` + parameter set, so that the modified query may be returned. + + + """ + + @classmethod + def _listen( + cls, event_key, retval=False, **kw): + fn = event_key._listen_fn + + if not retval: + def wrap(*arg, **kw): + if not retval: + query = arg[0] + fn(*arg, **kw) + return query + else: + return fn(*arg, **kw) + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(**kw) diff --git a/app/lib/sqlalchemy/orm/exc.py b/app/lib/sqlalchemy/orm/exc.py new file mode 100644 index 0000000..c13bb67 --- /dev/null +++ b/app/lib/sqlalchemy/orm/exc.py @@ -0,0 +1,165 @@ +# orm/exc.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""SQLAlchemy ORM exceptions.""" +from .. import exc as sa_exc, util + +NO_STATE = (AttributeError, KeyError) +"""Exception types that may be raised by instrumentation implementations.""" + + +class StaleDataError(sa_exc.SQLAlchemyError): + """An operation encountered database state that is unaccounted for. + + Conditions which cause this to happen include: + + * A flush may have attempted to update or delete rows + and an unexpected number of rows were matched during + the UPDATE or DELETE statement. Note that when + version_id_col is used, rows in UPDATE or DELETE statements + are also matched against the current known version + identifier. + + * A mapped object with version_id_col was refreshed, + and the version number coming back from the database does + not match that of the object itself. + + * A object is detached from its parent object, however + the object was previously attached to a different parent + identity which was garbage collected, and a decision + cannot be made if the new parent was really the most + recent "parent". + + .. versionadded:: 0.7.4 + + """ + +ConcurrentModificationError = StaleDataError + + +class FlushError(sa_exc.SQLAlchemyError): + """A invalid condition was detected during flush().""" + + +class UnmappedError(sa_exc.InvalidRequestError): + """Base for exceptions that involve expected mappings not present.""" + + +class ObjectDereferencedError(sa_exc.SQLAlchemyError): + """An operation cannot complete due to an object being garbage + collected. + + """ + + +class DetachedInstanceError(sa_exc.SQLAlchemyError): + """An attempt to access unloaded attributes on a + mapped instance that is detached.""" + + +class UnmappedInstanceError(UnmappedError): + """An mapping operation was requested for an unknown instance.""" + + @util.dependencies("sqlalchemy.orm.base") + def __init__(self, base, obj, msg=None): + if not msg: + try: + base.class_mapper(type(obj)) + name = _safe_cls_name(type(obj)) + msg = ("Class %r is mapped, but this instance lacks " + "instrumentation. This occurs when the instance" + "is created before sqlalchemy.orm.mapper(%s) " + "was called." % (name, name)) + except UnmappedClassError: + msg = _default_unmapped(type(obj)) + if isinstance(obj, type): + msg += ( + '; was a class (%s) supplied where an instance was ' + 'required?' % _safe_cls_name(obj)) + UnmappedError.__init__(self, msg) + + def __reduce__(self): + return self.__class__, (None, self.args[0]) + + +class UnmappedClassError(UnmappedError): + """An mapping operation was requested for an unknown class.""" + + def __init__(self, cls, msg=None): + if not msg: + msg = _default_unmapped(cls) + UnmappedError.__init__(self, msg) + + def __reduce__(self): + return self.__class__, (None, self.args[0]) + + +class ObjectDeletedError(sa_exc.InvalidRequestError): + """A refresh operation failed to retrieve the database + row corresponding to an object's known primary key identity. + + A refresh operation proceeds when an expired attribute is + accessed on an object, or when :meth:`.Query.get` is + used to retrieve an object which is, upon retrieval, detected + as expired. A SELECT is emitted for the target row + based on primary key; if no row is returned, this + exception is raised. + + The true meaning of this exception is simply that + no row exists for the primary key identifier associated + with a persistent object. The row may have been + deleted, or in some cases the primary key updated + to a new value, outside of the ORM's management of the target + object. + + """ + @util.dependencies("sqlalchemy.orm.base") + def __init__(self, base, state, msg=None): + if not msg: + msg = "Instance '%s' has been deleted, or its "\ + "row is otherwise not present." % base.state_str(state) + + sa_exc.InvalidRequestError.__init__(self, msg) + + def __reduce__(self): + return self.__class__, (None, self.args[0]) + + +class UnmappedColumnError(sa_exc.InvalidRequestError): + """Mapping operation was requested on an unknown column.""" + + +class NoResultFound(sa_exc.InvalidRequestError): + """A database result was required but none was found.""" + + +class MultipleResultsFound(sa_exc.InvalidRequestError): + """A single database result was required but more than one were found.""" + + +def _safe_cls_name(cls): + try: + cls_name = '.'.join((cls.__module__, cls.__name__)) + except AttributeError: + cls_name = getattr(cls, '__name__', None) + if cls_name is None: + cls_name = repr(cls) + return cls_name + + +@util.dependencies("sqlalchemy.orm.base") +def _default_unmapped(base, cls): + try: + mappers = base.manager_of_class(cls).mappers + except NO_STATE: + mappers = {} + except TypeError: + mappers = {} + name = _safe_cls_name(cls) + + if not mappers: + return "Class '%s' is not mapped" % name diff --git a/app/lib/sqlalchemy/orm/identity.py b/app/lib/sqlalchemy/orm/identity.py new file mode 100644 index 0000000..ca87fa2 --- /dev/null +++ b/app/lib/sqlalchemy/orm/identity.py @@ -0,0 +1,321 @@ +# orm/identity.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import weakref +from . import attributes +from .. import util +from .. import exc as sa_exc +from . import util as orm_util + +class IdentityMap(object): + def __init__(self): + self._dict = {} + self._modified = set() + self._wr = weakref.ref(self) + + def keys(self): + return self._dict.keys() + + def replace(self, state): + raise NotImplementedError() + + def add(self, state): + raise NotImplementedError() + + def _add_unpresent(self, state, key): + """optional inlined form of add() which can assume item isn't present + in the map""" + self.add(state) + + def update(self, dict): + raise NotImplementedError("IdentityMap uses add() to insert data") + + def clear(self): + raise NotImplementedError("IdentityMap uses remove() to remove data") + + def _manage_incoming_state(self, state): + state._instance_dict = self._wr + + if state.modified: + self._modified.add(state) + + def _manage_removed_state(self, state): + del state._instance_dict + if state.modified: + self._modified.discard(state) + + def _dirty_states(self): + return self._modified + + def check_modified(self): + """return True if any InstanceStates present have been marked + as 'modified'. + + """ + return bool(self._modified) + + def has_key(self, key): + return key in self + + def popitem(self): + raise NotImplementedError("IdentityMap uses remove() to remove data") + + def pop(self, key, *args): + raise NotImplementedError("IdentityMap uses remove() to remove data") + + def setdefault(self, key, default=None): + raise NotImplementedError("IdentityMap uses add() to insert data") + + def __len__(self): + return len(self._dict) + + def copy(self): + raise NotImplementedError() + + def __setitem__(self, key, value): + raise NotImplementedError("IdentityMap uses add() to insert data") + + def __delitem__(self, key): + raise NotImplementedError("IdentityMap uses remove() to remove data") + + +class WeakInstanceDict(IdentityMap): + + def __getitem__(self, key): + state = self._dict[key] + o = state.obj() + if o is None: + raise KeyError(key) + return o + + def __contains__(self, key): + try: + if key in self._dict: + state = self._dict[key] + o = state.obj() + else: + return False + except KeyError: + return False + else: + return o is not None + + def contains_state(self, state): + return state.key in self._dict and self._dict[state.key] is state + + def replace(self, state): + if state.key in self._dict: + existing = self._dict[state.key] + if existing is not state: + self._manage_removed_state(existing) + else: + return + + self._dict[state.key] = state + self._manage_incoming_state(state) + + def add(self, state): + key = state.key + # inline of self.__contains__ + if key in self._dict: + try: + existing_state = self._dict[key] + if existing_state is not state: + o = existing_state.obj() + if o is not None: + raise sa_exc.InvalidRequestError( + "Can't attach instance " + "%s; another instance with key %s is already " + "present in this session." % ( + orm_util.state_str(state), state.key)) + else: + return False + except KeyError: + pass + self._dict[key] = state + self._manage_incoming_state(state) + return True + + def _add_unpresent(self, state, key): + # inlined form of add() called by loading.py + self._dict[key] = state + state._instance_dict = self._wr + + def get(self, key, default=None): + if key not in self._dict: + return default + state = self._dict[key] + o = state.obj() + if o is None: + return default + return o + + def items(self): + values = self.all_states() + result = [] + for state in values: + value = state.obj() + if value is not None: + result.append((state.key, value)) + return result + + def values(self): + values = self.all_states() + result = [] + for state in values: + value = state.obj() + if value is not None: + result.append(value) + + return result + + def __iter__(self): + return iter(self.keys()) + + if util.py2k: + + def iteritems(self): + return iter(self.items()) + + def itervalues(self): + return iter(self.values()) + + def all_states(self): + if util.py2k: + return self._dict.values() + else: + return list(self._dict.values()) + + def _fast_discard(self, state): + self._dict.pop(state.key, None) + + def discard(self, state): + st = self._dict.pop(state.key, None) + if st: + assert st is state + self._manage_removed_state(state) + + def safe_discard(self, state): + if state.key in self._dict: + st = self._dict[state.key] + if st is state: + self._dict.pop(state.key, None) + self._manage_removed_state(state) + + def prune(self): + return 0 + + +class StrongInstanceDict(IdentityMap): + """A 'strong-referencing' version of the identity map. + + .. deprecated 1.1:: + The strong + reference identity map is legacy. See the + recipe at :ref:`session_referencing_behavior` for + an event-based approach to maintaining strong identity + references. + + + """ + + if util.py2k: + def itervalues(self): + return self._dict.itervalues() + + def iteritems(self): + return self._dict.iteritems() + + def __iter__(self): + return iter(self.dict_) + + def __getitem__(self, key): + return self._dict[key] + + def __contains__(self, key): + return key in self._dict + + def get(self, key, default=None): + return self._dict.get(key, default) + + def values(self): + return self._dict.values() + + def items(self): + return self._dict.items() + + def all_states(self): + return [attributes.instance_state(o) for o in self.values()] + + def contains_state(self, state): + return ( + state.key in self and + attributes.instance_state(self[state.key]) is state) + + def replace(self, state): + if state.key in self._dict: + existing = self._dict[state.key] + existing = attributes.instance_state(existing) + if existing is not state: + self._manage_removed_state(existing) + else: + return + + self._dict[state.key] = state.obj() + self._manage_incoming_state(state) + + def add(self, state): + if state.key in self: + if attributes.instance_state(self._dict[state.key]) is not state: + raise sa_exc.InvalidRequestError( + "Can't attach instance " + "%s; another instance with key %s is already " + "present in this session." % ( + orm_util.state_str(state), state.key)) + return False + else: + self._dict[state.key] = state.obj() + self._manage_incoming_state(state) + return True + + def _add_unpresent(self, state, key): + # inlined form of add() called by loading.py + self._dict[key] = state.obj() + state._instance_dict = self._wr + + def _fast_discard(self, state): + self._dict.pop(state.key, None) + + def discard(self, state): + obj = self._dict.pop(state.key, None) + if obj is not None: + self._manage_removed_state(state) + st = attributes.instance_state(obj) + assert st is state + + def safe_discard(self, state): + if state.key in self._dict: + obj = self._dict[state.key] + st = attributes.instance_state(obj) + if st is state: + self._dict.pop(state.key, None) + self._manage_removed_state(state) + + def prune(self): + """prune unreferenced, non-dirty states.""" + + ref_count = len(self) + dirty = [s.obj() for s in self.all_states() if s.modified] + + # work around http://bugs.python.org/issue6149 + keepers = weakref.WeakValueDictionary() + keepers.update(self) + + self._dict.clear() + self._dict.update(keepers) + self.modified = bool(dirty) + return ref_count - len(self) diff --git a/app/lib/sqlalchemy/orm/instrumentation.py b/app/lib/sqlalchemy/orm/instrumentation.py new file mode 100644 index 0000000..4ac9b13 --- /dev/null +++ b/app/lib/sqlalchemy/orm/instrumentation.py @@ -0,0 +1,528 @@ +# orm/instrumentation.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Defines SQLAlchemy's system of class instrumentation. + +This module is usually not directly visible to user applications, but +defines a large part of the ORM's interactivity. + +instrumentation.py deals with registration of end-user classes +for state tracking. It interacts closely with state.py +and attributes.py which establish per-instance and per-class-attribute +instrumentation, respectively. + +The class instrumentation system can be customized on a per-class +or global basis using the :mod:`sqlalchemy.ext.instrumentation` +module, which provides the means to build and specify +alternate instrumentation forms. + +.. versionchanged: 0.8 + The instrumentation extension system was moved out of the + ORM and into the external :mod:`sqlalchemy.ext.instrumentation` + package. When that package is imported, it installs + itself within sqlalchemy.orm so that its more comprehensive + resolution mechanics take effect. + +""" + + +from . import exc, collections, interfaces, state +from .. import util +from . import base + + +_memoized_key_collection = util.group_expirable_memoized_property() + + +class ClassManager(dict): + """tracks state information at the class level.""" + + MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR + STATE_ATTR = base.DEFAULT_STATE_ATTR + + _state_setter = staticmethod(util.attrsetter(STATE_ATTR)) + + deferred_scalar_loader = None + + original_init = object.__init__ + + factory = None + + def __init__(self, class_): + self.class_ = class_ + self.info = {} + self.new_init = None + self.local_attrs = {} + self.originals = {} + + self._bases = [mgr for mgr in [ + manager_of_class(base) + for base in self.class_.__bases__ + if isinstance(base, type) + ] if mgr is not None] + + for base in self._bases: + self.update(base) + + self.dispatch._events._new_classmanager_instance(class_, self) + # events._InstanceEventsHold.populate(class_, self) + + for basecls in class_.__mro__: + mgr = manager_of_class(basecls) + if mgr is not None: + self.dispatch._update(mgr.dispatch) + self.manage() + self._instrument_init() + + if '__del__' in class_.__dict__: + util.warn("__del__() method on class %s will " + "cause unreachable cycles and memory leaks, " + "as SQLAlchemy instrumentation often creates " + "reference cycles. Please remove this method." % + class_) + + def __hash__(self): + return id(self) + + def __eq__(self, other): + return other is self + + @property + def is_mapped(self): + return 'mapper' in self.__dict__ + + @_memoized_key_collection + def _all_key_set(self): + return frozenset(self) + + @_memoized_key_collection + def _collection_impl_keys(self): + return frozenset([ + attr.key for attr in self.values() if attr.impl.collection]) + + @_memoized_key_collection + def _scalar_loader_impls(self): + return frozenset([ + attr.impl for attr in + self.values() if attr.impl.accepts_scalar_loader]) + + @util.memoized_property + def mapper(self): + # raises unless self.mapper has been assigned + raise exc.UnmappedClassError(self.class_) + + def _all_sqla_attributes(self, exclude=None): + """return an iterator of all classbound attributes that are + implement :class:`.InspectionAttr`. + + This includes :class:`.QueryableAttribute` as well as extension + types such as :class:`.hybrid_property` and + :class:`.AssociationProxy`. + + """ + if exclude is None: + exclude = set() + for supercls in self.class_.__mro__: + for key in set(supercls.__dict__).difference(exclude): + exclude.add(key) + val = supercls.__dict__[key] + if isinstance(val, interfaces.InspectionAttr): + yield key, val + + def _attr_has_impl(self, key): + """Return True if the given attribute is fully initialized. + + i.e. has an impl. + """ + + return key in self and self[key].impl is not None + + def _subclass_manager(self, cls): + """Create a new ClassManager for a subclass of this ClassManager's + class. + + This is called automatically when attributes are instrumented so that + the attributes can be propagated to subclasses against their own + class-local manager, without the need for mappers etc. to have already + pre-configured managers for the full class hierarchy. Mappers + can post-configure the auto-generated ClassManager when needed. + + """ + manager = manager_of_class(cls) + if manager is None: + manager = _instrumentation_factory.create_manager_for_cls(cls) + return manager + + def _instrument_init(self): + # TODO: self.class_.__init__ is often the already-instrumented + # __init__ from an instrumented superclass. We still need to make + # our own wrapper, but it would + # be nice to wrap the original __init__ and not our existing wrapper + # of such, since this adds method overhead. + self.original_init = self.class_.__init__ + self.new_init = _generate_init(self.class_, self) + self.install_member('__init__', self.new_init) + + def _uninstrument_init(self): + if self.new_init: + self.uninstall_member('__init__') + self.new_init = None + + @util.memoized_property + def _state_constructor(self): + self.dispatch.first_init(self, self.class_) + return state.InstanceState + + def manage(self): + """Mark this instance as the manager for its class.""" + + setattr(self.class_, self.MANAGER_ATTR, self) + + def dispose(self): + """Dissasociate this manager from its class.""" + + delattr(self.class_, self.MANAGER_ATTR) + + @util.hybridmethod + def manager_getter(self): + return _default_manager_getter + + @util.hybridmethod + def state_getter(self): + """Return a (instance) -> InstanceState callable. + + "state getter" callables should raise either KeyError or + AttributeError if no InstanceState could be found for the + instance. + """ + + return _default_state_getter + + @util.hybridmethod + def dict_getter(self): + return _default_dict_getter + + def instrument_attribute(self, key, inst, propagated=False): + if propagated: + if key in self.local_attrs: + return # don't override local attr with inherited attr + else: + self.local_attrs[key] = inst + self.install_descriptor(key, inst) + _memoized_key_collection.expire_instance(self) + self[key] = inst + + for cls in self.class_.__subclasses__(): + manager = self._subclass_manager(cls) + manager.instrument_attribute(key, inst, True) + + def subclass_managers(self, recursive): + for cls in self.class_.__subclasses__(): + mgr = manager_of_class(cls) + if mgr is not None and mgr is not self: + yield mgr + if recursive: + for m in mgr.subclass_managers(True): + yield m + + def post_configure_attribute(self, key): + _instrumentation_factory.dispatch.\ + attribute_instrument(self.class_, key, self[key]) + + def uninstrument_attribute(self, key, propagated=False): + if key not in self: + return + if propagated: + if key in self.local_attrs: + return # don't get rid of local attr + else: + del self.local_attrs[key] + self.uninstall_descriptor(key) + _memoized_key_collection.expire_instance(self) + del self[key] + for cls in self.class_.__subclasses__(): + manager = manager_of_class(cls) + if manager: + manager.uninstrument_attribute(key, True) + + def unregister(self): + """remove all instrumentation established by this ClassManager.""" + + self._uninstrument_init() + + self.mapper = self.dispatch = None + self.info.clear() + + for key in list(self): + if key in self.local_attrs: + self.uninstrument_attribute(key) + + def install_descriptor(self, key, inst): + if key in (self.STATE_ATTR, self.MANAGER_ATTR): + raise KeyError("%r: requested attribute name conflicts with " + "instrumentation attribute of the same name." % + key) + setattr(self.class_, key, inst) + + def uninstall_descriptor(self, key): + delattr(self.class_, key) + + def install_member(self, key, implementation): + if key in (self.STATE_ATTR, self.MANAGER_ATTR): + raise KeyError("%r: requested attribute name conflicts with " + "instrumentation attribute of the same name." % + key) + self.originals.setdefault(key, getattr(self.class_, key, None)) + setattr(self.class_, key, implementation) + + def uninstall_member(self, key): + original = self.originals.pop(key, None) + if original is not None: + setattr(self.class_, key, original) + + def instrument_collection_class(self, key, collection_class): + return collections.prepare_instrumentation(collection_class) + + def initialize_collection(self, key, state, factory): + user_data = factory() + adapter = collections.CollectionAdapter( + self.get_impl(key), state, user_data) + return adapter, user_data + + def is_instrumented(self, key, search=False): + if search: + return key in self + else: + return key in self.local_attrs + + def get_impl(self, key): + return self[key].impl + + @property + def attributes(self): + return iter(self.values()) + + # InstanceState management + + def new_instance(self, state=None): + instance = self.class_.__new__(self.class_) + if state is None: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) + return instance + + def setup_instance(self, instance, state=None): + if state is None: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) + + def teardown_instance(self, instance): + delattr(instance, self.STATE_ATTR) + + def _serialize(self, state, state_dict): + return _SerializeManager(state, state_dict) + + def _new_state_if_none(self, instance): + """Install a default InstanceState if none is present. + + A private convenience method used by the __init__ decorator. + + """ + if hasattr(instance, self.STATE_ATTR): + return False + elif self.class_ is not instance.__class__ and \ + self.is_mapped: + # this will create a new ClassManager for the + # subclass, without a mapper. This is likely a + # user error situation but allow the object + # to be constructed, so that it is usable + # in a non-ORM context at least. + return self._subclass_manager(instance.__class__).\ + _new_state_if_none(instance) + else: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) + return state + + def has_state(self, instance): + return hasattr(instance, self.STATE_ATTR) + + def has_parent(self, state, key, optimistic=False): + """TODO""" + return self.get_impl(key).hasparent(state, optimistic=optimistic) + + def __bool__(self): + """All ClassManagers are non-zero regardless of attribute state.""" + return True + + __nonzero__ = __bool__ + + def __repr__(self): + return '<%s of %r at %x>' % ( + self.__class__.__name__, self.class_, id(self)) + + +class _SerializeManager(object): + """Provide serialization of a :class:`.ClassManager`. + + The :class:`.InstanceState` uses ``__init__()`` on serialize + and ``__call__()`` on deserialize. + + """ + + def __init__(self, state, d): + self.class_ = state.class_ + manager = state.manager + manager.dispatch.pickle(state, d) + + def __call__(self, state, inst, state_dict): + state.manager = manager = manager_of_class(self.class_) + if manager is None: + raise exc.UnmappedInstanceError( + inst, + "Cannot deserialize object of type %r - " + "no mapper() has " + "been configured for this class within the current " + "Python process!" % + self.class_) + elif manager.is_mapped and not manager.mapper.configured: + manager.mapper._configure_all() + + # setup _sa_instance_state ahead of time so that + # unpickle events can access the object normally. + # see [ticket:2362] + if inst is not None: + manager.setup_instance(inst, state) + manager.dispatch.unpickle(state, state_dict) + + +class InstrumentationFactory(object): + """Factory for new ClassManager instances.""" + + def create_manager_for_cls(self, class_): + assert class_ is not None + assert manager_of_class(class_) is None + + # give a more complicated subclass + # a chance to do what it wants here + manager, factory = self._locate_extended_factory(class_) + + if factory is None: + factory = ClassManager + manager = factory(class_) + + self._check_conflicts(class_, factory) + + manager.factory = factory + + self.dispatch.class_instrument(class_) + return manager + + def _locate_extended_factory(self, class_): + """Overridden by a subclass to do an extended lookup.""" + return None, None + + def _check_conflicts(self, class_, factory): + """Overridden by a subclass to test for conflicting factories.""" + return + + def unregister(self, class_): + manager = manager_of_class(class_) + manager.unregister() + manager.dispose() + self.dispatch.class_uninstrument(class_) + if ClassManager.MANAGER_ATTR in class_.__dict__: + delattr(class_, ClassManager.MANAGER_ATTR) + +# this attribute is replaced by sqlalchemy.ext.instrumentation +# when importred. +_instrumentation_factory = InstrumentationFactory() + +# these attributes are replaced by sqlalchemy.ext.instrumentation +# when a non-standard InstrumentationManager class is first +# used to instrument a class. +instance_state = _default_state_getter = base.instance_state + +instance_dict = _default_dict_getter = base.instance_dict + +manager_of_class = _default_manager_getter = base.manager_of_class + + +def register_class(class_): + """Register class instrumentation. + + Returns the existing or newly created class manager. + + """ + + manager = manager_of_class(class_) + if manager is None: + manager = _instrumentation_factory.create_manager_for_cls(class_) + return manager + + +def unregister_class(class_): + """Unregister class instrumentation.""" + + _instrumentation_factory.unregister(class_) + + +def is_instrumented(instance, key): + """Return True if the given attribute on the given instance is + instrumented by the attributes package. + + This function may be used regardless of instrumentation + applied directly to the class, i.e. no descriptors are required. + + """ + return manager_of_class(instance.__class__).\ + is_instrumented(key, search=True) + + +def _generate_init(class_, class_manager): + """Build an __init__ decorator that triggers ClassManager events.""" + + # TODO: we should use the ClassManager's notion of the + # original '__init__' method, once ClassManager is fixed + # to always reference that. + original__init__ = class_.__init__ + assert original__init__ + + # Go through some effort here and don't change the user's __init__ + # calling signature, including the unlikely case that it has + # a return value. + # FIXME: need to juggle local names to avoid constructor argument + # clashes. + func_body = """\ +def __init__(%(apply_pos)s): + new_state = class_manager._new_state_if_none(%(self_arg)s) + if new_state: + return new_state._initialize_instance(%(apply_kw)s) + else: + return original__init__(%(apply_kw)s) +""" + func_vars = util.format_argspec_init(original__init__, grouped=False) + func_text = func_body % func_vars + + if util.py2k: + func = getattr(original__init__, 'im_func', original__init__) + func_defaults = getattr(func, 'func_defaults', None) + else: + func_defaults = getattr(original__init__, '__defaults__', None) + func_kw_defaults = getattr(original__init__, '__kwdefaults__', None) + + env = locals().copy() + exec(func_text, env) + __init__ = env['__init__'] + __init__.__doc__ = original__init__.__doc__ + + if func_defaults: + __init__.__defaults__ = func_defaults + if not util.py2k and func_kw_defaults: + __init__.__kwdefaults__ = func_kw_defaults + + return __init__ diff --git a/app/lib/sqlalchemy/orm/interfaces.py b/app/lib/sqlalchemy/orm/interfaces.py new file mode 100644 index 0000000..fbe8f50 --- /dev/null +++ b/app/lib/sqlalchemy/orm/interfaces.py @@ -0,0 +1,655 @@ +# orm/interfaces.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +Contains various base classes used throughout the ORM. + +Defines some key base classes prominent within the internals, +as well as the now-deprecated ORM extension classes. + +Other than the deprecated extensions, this module and the +classes within are mostly private, though some attributes +are exposed when inspecting mappings. + +""" + +from __future__ import absolute_import + +from .. import util +from ..sql import operators +from .base import (ONETOMANY, MANYTOONE, MANYTOMANY, + EXT_CONTINUE, EXT_STOP, NOT_EXTENSION) +from .base import (InspectionAttr, InspectionAttr, + InspectionAttrInfo, _MappedAttribute) +import collections +from .. import inspect +from . import path_registry + +# imported later +MapperExtension = SessionExtension = AttributeExtension = None + +__all__ = ( + 'AttributeExtension', + 'EXT_CONTINUE', + 'EXT_STOP', + 'ONETOMANY', + 'MANYTOMANY', + 'MANYTOONE', + 'NOT_EXTENSION', + 'LoaderStrategy', + 'MapperExtension', + 'MapperOption', + 'MapperProperty', + 'PropComparator', + 'SessionExtension', + 'StrategizedProperty', +) + + +class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots): + """Represent a particular class attribute mapped by :class:`.Mapper`. + + The most common occurrences of :class:`.MapperProperty` are the + mapped :class:`.Column`, which is represented in a mapping as + an instance of :class:`.ColumnProperty`, + and a reference to another class produced by :func:`.relationship`, + represented in the mapping as an instance of + :class:`.RelationshipProperty`. + + """ + + __slots__ = ( + '_configure_started', '_configure_finished', 'parent', 'key', + 'info' + ) + + cascade = frozenset() + """The set of 'cascade' attribute names. + + This collection is checked before the 'cascade_iterator' method is called. + + The collection typically only applies to a RelationshipProperty. + + """ + + is_property = True + """Part of the InspectionAttr interface; states this object is a + mapper property. + + """ + + def _memoized_attr_info(self): + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`.relationship`, or :func:`.composite` + functions. + + .. versionadded:: 0.8 Added support for .info to all + :class:`.MapperProperty` subclasses. + + .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also + available on extension types via the + :attr:`.InspectionAttrInfo.info` attribute, so that it can apply + to a wider variety of ORM and extension constructs. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + return {} + + def setup(self, context, entity, path, adapter, **kwargs): + """Called by Query for the purposes of constructing a SQL statement. + + Each MapperProperty associated with the target mapper processes the + statement referenced by the query context, adding columns and/or + criterion as appropriate. + + """ + + def create_row_processor(self, context, path, + mapper, result, adapter, populators): + """Produce row processing functions and append to the given + set of populators lists. + + """ + + def cascade_iterator(self, type_, state, visited_instances=None, + halt_on=None): + """Iterate through instances related to the given instance for + a particular 'cascade', starting with this MapperProperty. + + Return an iterator3-tuples (instance, mapper, state). + + Note that the 'cascade' collection on this MapperProperty is + checked first for the given type before cascade_iterator is called. + + This method typically only applies to RelationshipProperty. + + """ + + return iter(()) + + def set_parent(self, parent, init): + """Set the parent mapper that references this MapperProperty. + + This method is overridden by some subclasses to perform extra + setup when the mapper is first known. + + """ + self.parent = parent + + def instrument_class(self, mapper): + """Hook called by the Mapper to the property to initiate + instrumentation of the class attribute managed by this + MapperProperty. + + The MapperProperty here will typically call out to the + attributes module to set up an InstrumentedAttribute. + + This step is the first of two steps to set up an InstrumentedAttribute, + and is called early in the mapper setup process. + + The second step is typically the init_class_attribute step, + called from StrategizedProperty via the post_instrument_class() + hook. This step assigns additional state to the InstrumentedAttribute + (specifically the "impl") which has been determined after the + MapperProperty has determined what kind of persistence + management it needs to do (e.g. scalar, object, collection, etc). + + """ + + def __init__(self): + self._configure_started = False + self._configure_finished = False + + def init(self): + """Called after all mappers are created to assemble + relationships between mappers and perform other post-mapper-creation + initialization steps. + + """ + self._configure_started = True + self.do_init() + self._configure_finished = True + + @property + def class_attribute(self): + """Return the class-bound descriptor corresponding to this + :class:`.MapperProperty`. + + This is basically a ``getattr()`` call:: + + return getattr(self.parent.class_, self.key) + + I.e. if this :class:`.MapperProperty` were named ``addresses``, + and the class to which it is mapped is ``User``, this sequence + is possible:: + + >>> from sqlalchemy import inspect + >>> mapper = inspect(User) + >>> addresses_property = mapper.attrs.addresses + >>> addresses_property.class_attribute is User.addresses + True + >>> User.addresses.property is addresses_property + True + + + """ + + return getattr(self.parent.class_, self.key) + + def do_init(self): + """Perform subclass-specific initialization post-mapper-creation + steps. + + This is a template method called by the ``MapperProperty`` + object's init() method. + + """ + + def post_instrument_class(self, mapper): + """Perform instrumentation adjustments that need to occur + after init() has completed. + + The given Mapper is the Mapper invoking the operation, which + may not be the same Mapper as self.parent in an inheritance + scenario; however, Mapper will always at least be a sub-mapper of + self.parent. + + This method is typically used by StrategizedProperty, which delegates + it to LoaderStrategy.init_class_attribute() to perform final setup + on the class-bound InstrumentedAttribute. + + """ + + def merge(self, session, source_state, source_dict, dest_state, + dest_dict, load, _recursive, _resolve_conflict_map): + """Merge the attribute represented by this ``MapperProperty`` + from source to destination object. + + """ + + def __repr__(self): + return '<%s at 0x%x; %s>' % ( + self.__class__.__name__, + id(self), getattr(self, 'key', 'no key')) + + +class PropComparator(operators.ColumnOperators): + r"""Defines SQL operators for :class:`.MapperProperty` objects. + + SQLAlchemy allows for operators to + be redefined at both the Core and ORM level. :class:`.PropComparator` + is the base class of operator redefinition for ORM-level operations, + including those of :class:`.ColumnProperty`, + :class:`.RelationshipProperty`, and :class:`.CompositeProperty`. + + .. note:: With the advent of Hybrid properties introduced in SQLAlchemy + 0.7, as well as Core-level operator redefinition in + SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator` + instances is extremely rare. See :ref:`hybrids_toplevel` as well + as :ref:`types_operators`. + + User-defined subclasses of :class:`.PropComparator` may be created. The + built-in Python comparison and math operator methods, such as + :meth:`.operators.ColumnOperators.__eq__`, + :meth:`.operators.ColumnOperators.__lt__`, and + :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide + new operator behavior. The custom :class:`.PropComparator` is passed to + the :class:`.MapperProperty` instance via the ``comparator_factory`` + argument. In each case, + the appropriate subclass of :class:`.PropComparator` should be used:: + + # definition of custom PropComparator subclasses + + from sqlalchemy.orm.properties import \ + ColumnProperty,\ + CompositeProperty,\ + RelationshipProperty + + class MyColumnComparator(ColumnProperty.Comparator): + def __eq__(self, other): + return self.__clause_element__() == other + + class MyRelationshipComparator(RelationshipProperty.Comparator): + def any(self, expression): + "define the 'any' operation" + # ... + + class MyCompositeComparator(CompositeProperty.Comparator): + def __gt__(self, other): + "redefine the 'greater than' operation" + + return sql.and_(*[a>b for a, b in + zip(self.__clause_element__().clauses, + other.__composite_values__())]) + + + # application of custom PropComparator subclasses + + from sqlalchemy.orm import column_property, relationship, composite + from sqlalchemy import Column, String + + class SomeMappedClass(Base): + some_column = column_property(Column("some_column", String), + comparator_factory=MyColumnComparator) + + some_relationship = relationship(SomeOtherClass, + comparator_factory=MyRelationshipComparator) + + some_composite = composite( + Column("a", String), Column("b", String), + comparator_factory=MyCompositeComparator + ) + + Note that for column-level operator redefinition, it's usually + simpler to define the operators at the Core level, using the + :attr:`.TypeEngine.comparator_factory` attribute. See + :ref:`types_operators` for more detail. + + See also: + + :class:`.ColumnProperty.Comparator` + + :class:`.RelationshipProperty.Comparator` + + :class:`.CompositeProperty.Comparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + __slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity' + + def __init__(self, prop, parentmapper, adapt_to_entity=None): + self.prop = self.property = prop + self._parententity = adapt_to_entity or parentmapper + self._adapt_to_entity = adapt_to_entity + + def __clause_element__(self): + raise NotImplementedError("%r" % self) + + def _query_clause_element(self): + return self.__clause_element__() + + def adapt_to_entity(self, adapt_to_entity): + """Return a copy of this PropComparator which will use the given + :class:`.AliasedInsp` to produce corresponding expressions. + """ + return self.__class__(self.prop, self._parententity, adapt_to_entity) + + @property + def _parentmapper(self): + """legacy; this is renamed to _parententity to be + compatible with QueryableAttribute.""" + return inspect(self._parententity).mapper + + @property + def adapter(self): + """Produce a callable that adapts column expressions + to suit an aliased version of this comparator. + + """ + if self._adapt_to_entity is None: + return None + else: + return self._adapt_to_entity._adapt_element + + @property + def info(self): + return self.property.info + + @staticmethod + def any_op(a, b, **kwargs): + return a.any(b, **kwargs) + + @staticmethod + def has_op(a, b, **kwargs): + return a.has(b, **kwargs) + + @staticmethod + def of_type_op(a, class_): + return a.of_type(class_) + + def of_type(self, class_): + r"""Redefine this object in terms of a polymorphic subclass. + + Returns a new PropComparator from which further criterion can be + evaluated. + + e.g.:: + + query.join(Company.employees.of_type(Engineer)).\ + filter(Engineer.name=='foo') + + :param \class_: a class or mapper indicating that criterion will be + against this specific subclass. + + + """ + + return self.operate(PropComparator.of_type_op, class_) + + def any(self, criterion=None, **kwargs): + r"""Return true if this collection contains any member that meets the + given criterion. + + The usual implementation of ``any()`` is + :meth:`.RelationshipProperty.Comparator.any`. + + :param criterion: an optional ClauseElement formulated against the + member class' table or attributes. + + :param \**kwargs: key/value pairs corresponding to member class + attribute names which will be compared via equality to the + corresponding values. + + """ + + return self.operate(PropComparator.any_op, criterion, **kwargs) + + def has(self, criterion=None, **kwargs): + r"""Return true if this element references a member which meets the + given criterion. + + The usual implementation of ``has()`` is + :meth:`.RelationshipProperty.Comparator.has`. + + :param criterion: an optional ClauseElement formulated against the + member class' table or attributes. + + :param \**kwargs: key/value pairs corresponding to member class + attribute names which will be compared via equality to the + corresponding values. + + """ + + return self.operate(PropComparator.has_op, criterion, **kwargs) + + +class StrategizedProperty(MapperProperty): + """A MapperProperty which uses selectable strategies to affect + loading behavior. + + There is a single strategy selected by default. Alternate + strategies can be selected at Query time through the usage of + ``StrategizedOption`` objects via the Query.options() method. + + The mechanics of StrategizedProperty are used for every Query + invocation for every mapped attribute participating in that Query, + to determine first how the attribute will be rendered in SQL + and secondly how the attribute will retrieve a value from a result + row and apply it to a mapped object. The routines here are very + performance-critical. + + """ + + __slots__ = ( + '_strategies', 'strategy', + '_wildcard_token', '_default_path_loader_key' + ) + + strategy_wildcard_key = None + + def _memoized_attr__wildcard_token(self): + return ("%s:%s" % ( + self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN), ) + + def _memoized_attr__default_path_loader_key(self): + return ( + "loader", + ("%s:%s" % ( + self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN), ) + ) + + def _get_context_loader(self, context, path): + load = None + + # use EntityRegistry.__getitem__()->PropRegistry here so + # that the path is stated in terms of our base + search_path = dict.__getitem__(path, self) + + # search among: exact match, "attr.*", "default" strategy + # if any. + for path_key in ( + search_path._loader_key, + search_path._wildcard_path_loader_key, + search_path._default_path_loader_key + ): + if path_key in context.attributes: + load = context.attributes[path_key] + break + + return load + + def _get_strategy(self, key): + try: + return self._strategies[key] + except KeyError: + cls = self._strategy_lookup(*key) + self._strategies[key] = self._strategies[ + cls] = strategy = cls(self, key) + return strategy + + def setup( + self, context, entity, path, adapter, **kwargs): + loader = self._get_context_loader(context, path) + if loader and loader.strategy: + strat = self._get_strategy(loader.strategy) + else: + strat = self.strategy + strat.setup_query(context, entity, path, loader, adapter, **kwargs) + + def create_row_processor( + self, context, path, mapper, + result, adapter, populators): + loader = self._get_context_loader(context, path) + if loader and loader.strategy: + strat = self._get_strategy(loader.strategy) + else: + strat = self.strategy + strat.create_row_processor( + context, path, loader, + mapper, result, adapter, populators) + + def do_init(self): + self._strategies = {} + self.strategy = self._get_strategy(self.strategy_key) + + def post_instrument_class(self, mapper): + if not self.parent.non_primary and \ + not mapper.class_manager._attr_has_impl(self.key): + self.strategy.init_class_attribute(mapper) + + _all_strategies = collections.defaultdict(dict) + + @classmethod + def strategy_for(cls, **kw): + def decorate(dec_cls): + # ensure each subclass of the strategy has its + # own _strategy_keys collection + if '_strategy_keys' not in dec_cls.__dict__: + dec_cls._strategy_keys = [] + key = tuple(sorted(kw.items())) + cls._all_strategies[cls][key] = dec_cls + dec_cls._strategy_keys.append(key) + return dec_cls + return decorate + + @classmethod + def _strategy_lookup(cls, *key): + for prop_cls in cls.__mro__: + if prop_cls in cls._all_strategies: + strategies = cls._all_strategies[prop_cls] + try: + return strategies[key] + except KeyError: + pass + raise Exception("can't locate strategy for %s %s" % (cls, key)) + + +class MapperOption(object): + """Describe a modification to a Query.""" + + propagate_to_loaders = False + """if True, indicate this option should be carried along + to "secondary" Query objects produced during lazy loads + or refresh operations. + + """ + + def process_query(self, query): + """Apply a modification to the given :class:`.Query`.""" + + def process_query_conditionally(self, query): + """same as process_query(), except that this option may not + apply to the given query. + + This is typically used during a lazy load or scalar refresh + operation to propagate options stated in the original Query to the + new Query being used for the load. It occurs for those options that + specify propagate_to_loaders=True. + + """ + + self.process_query(query) + + +class LoaderStrategy(object): + """Describe the loading behavior of a StrategizedProperty object. + + The ``LoaderStrategy`` interacts with the querying process in three + ways: + + * it controls the configuration of the ``InstrumentedAttribute`` + placed on a class to handle the behavior of the attribute. this + may involve setting up class-level callable functions to fire + off a select operation when the attribute is first accessed + (i.e. a lazy load) + + * it processes the ``QueryContext`` at statement construction time, + where it can modify the SQL statement that is being produced. + For example, simple column attributes will add their represented + column to the list of selected columns, a joined eager loader + may establish join clauses to add to the statement. + + * It produces "row processor" functions at result fetching time. + These "row processor" functions populate a particular attribute + on a particular mapped instance. + + """ + + __slots__ = 'parent_property', 'is_class_level', 'parent', 'key', \ + 'strategy_key', 'strategy_opts' + + def __init__(self, parent, strategy_key): + self.parent_property = parent + self.is_class_level = False + self.parent = self.parent_property.parent + self.key = self.parent_property.key + self.strategy_key = strategy_key + self.strategy_opts = dict(strategy_key) + + def init_class_attribute(self, mapper): + pass + + def setup_query(self, context, entity, path, loadopt, adapter, **kwargs): + """Establish column and other state for a given QueryContext. + + This method fulfills the contract specified by MapperProperty.setup(). + + StrategizedProperty delegates its setup() method + directly to this method. + + """ + + def create_row_processor(self, context, path, loadopt, mapper, + result, adapter, populators): + """Establish row processing functions for a given QueryContext. + + This method fulfills the contract specified by + MapperProperty.create_row_processor(). + + StrategizedProperty delegates its create_row_processor() method + directly to this method. + + """ + + def __str__(self): + return str(self.parent_property) diff --git a/app/lib/sqlalchemy/orm/loading.py b/app/lib/sqlalchemy/orm/loading.py new file mode 100644 index 0000000..f749cdd --- /dev/null +++ b/app/lib/sqlalchemy/orm/loading.py @@ -0,0 +1,703 @@ +# orm/loading.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""private module containing functions used to convert database +rows into object instances and associated state. + +the functions here are called primarily by Query, Mapper, +as well as some of the attribute loading strategies. + +""" +from __future__ import absolute_import + +from .. import util +from . import attributes, exc as orm_exc +from ..sql import util as sql_util +from . import strategy_options + +from .util import _none_set, state_str +from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE +from .. import exc as sa_exc +import collections + +_new_runid = util.counter() + + +def instances(query, cursor, context): + """Return an ORM result as an iterator.""" + + context.runid = _new_runid() + + filtered = query._has_mapper_entities + + single_entity = len(query._entities) == 1 and \ + query._entities[0].supports_single_entity + + if filtered: + if single_entity: + filter_fn = id + else: + def filter_fn(row): + return tuple( + id(item) + if ent.use_id_for_hash + else item + for ent, item in zip(query._entities, row) + ) + + try: + (process, labels) = \ + list(zip(*[ + query_entity.row_processor(query, + context, cursor) + for query_entity in query._entities + ])) + + if not single_entity: + keyed_tuple = util.lightweight_named_tuple('result', labels) + + while True: + context.partials = {} + + if query._yield_per: + fetch = cursor.fetchmany(query._yield_per) + if not fetch: + break + else: + fetch = cursor.fetchall() + + if single_entity: + proc = process[0] + rows = [proc(row) for row in fetch] + else: + rows = [keyed_tuple([proc(row) for proc in process]) + for row in fetch] + + if filtered: + rows = util.unique_list(rows, filter_fn) + + for row in rows: + yield row + + if not query._yield_per: + break + except Exception as err: + cursor.close() + util.raise_from_cause(err) + + +@util.dependencies("sqlalchemy.orm.query") +def merge_result(querylib, query, iterator, load=True): + """Merge a result into this :class:`.Query` object's Session.""" + + session = query.session + if load: + # flush current contents if we expect to load data + session._autoflush() + + autoflush = session.autoflush + try: + session.autoflush = False + single_entity = len(query._entities) == 1 + if single_entity: + if isinstance(query._entities[0], querylib._MapperEntity): + result = [session._merge( + attributes.instance_state(instance), + attributes.instance_dict(instance), + load=load, _recursive={}, _resolve_conflict_map={}) + for instance in iterator] + else: + result = list(iterator) + else: + mapped_entities = [i for i, e in enumerate(query._entities) + if isinstance(e, querylib._MapperEntity)] + result = [] + keys = [ent._label_name for ent in query._entities] + keyed_tuple = util.lightweight_named_tuple('result', keys) + for row in iterator: + newrow = list(row) + for i in mapped_entities: + if newrow[i] is not None: + newrow[i] = session._merge( + attributes.instance_state(newrow[i]), + attributes.instance_dict(newrow[i]), + load=load, _recursive={}, _resolve_conflict_map={}) + result.append(keyed_tuple(newrow)) + + return iter(result) + finally: + session.autoflush = autoflush + + +def get_from_identity(session, key, passive): + """Look up the given key in the given session's identity map, + check the object for expired state if found. + + """ + instance = session.identity_map.get(key) + if instance is not None: + + state = attributes.instance_state(instance) + + # expired - ensure it still exists + if state.expired: + if not passive & attributes.SQL_OK: + # TODO: no coverage here + return attributes.PASSIVE_NO_RESULT + elif not passive & attributes.RELATED_OBJECT_OK: + # this mode is used within a flush and the instance's + # expired state will be checked soon enough, if necessary + return instance + try: + state._load_expired(state, passive) + except orm_exc.ObjectDeletedError: + session._remove_newly_deleted([state]) + return None + return instance + else: + return None + + +def load_on_ident(query, key, + refresh_state=None, lockmode=None, + only_load_props=None): + """Load the given identity key from the database.""" + + if key is not None: + ident = key[1] + else: + ident = None + + if refresh_state is None: + q = query._clone() + q._get_condition() + else: + q = query._clone() + + if ident is not None: + mapper = query._mapper_zero() + + (_get_clause, _get_params) = mapper._get_clause + + # None present in ident - turn those comparisons + # into "IS NULL" + if None in ident: + nones = set([ + _get_params[col].key for col, value in + zip(mapper.primary_key, ident) if value is None + ]) + _get_clause = sql_util.adapt_criterion_to_null( + _get_clause, nones) + + _get_clause = q._adapt_clause(_get_clause, True, False) + q._criterion = _get_clause + + params = dict([ + (_get_params[primary_key].key, id_val) + for id_val, primary_key in zip(ident, mapper.primary_key) + ]) + + q._params = params + + if lockmode is not None: + version_check = True + q = q.with_lockmode(lockmode) + elif query._for_update_arg is not None: + version_check = True + q._for_update_arg = query._for_update_arg + else: + version_check = False + + q._get_options( + populate_existing=bool(refresh_state), + version_check=version_check, + only_load_props=only_load_props, + refresh_state=refresh_state) + q._order_by = None + + try: + return q.one() + except orm_exc.NoResultFound: + return None + + +def _setup_entity_query( + context, mapper, query_entity, + path, adapter, column_collection, + with_polymorphic=None, only_load_props=None, + polymorphic_discriminator=None, **kw): + + if with_polymorphic: + poly_properties = mapper._iterate_polymorphic_properties( + with_polymorphic) + else: + poly_properties = mapper._polymorphic_properties + + quick_populators = {} + + path.set( + context.attributes, + "memoized_setups", + quick_populators) + + for value in poly_properties: + if only_load_props and \ + value.key not in only_load_props: + continue + value.setup( + context, + query_entity, + path, + adapter, + only_load_props=only_load_props, + column_collection=column_collection, + memoized_populators=quick_populators, + **kw + ) + + if polymorphic_discriminator is not None and \ + polymorphic_discriminator \ + is not mapper.polymorphic_on: + + if adapter: + pd = adapter.columns[polymorphic_discriminator] + else: + pd = polymorphic_discriminator + column_collection.append(pd) + + +def _instance_processor( + mapper, context, result, path, adapter, + only_load_props=None, refresh_state=None, + polymorphic_discriminator=None, + _polymorphic_from=None): + """Produce a mapper level row processor callable + which processes rows into mapped instances.""" + + # note that this method, most of which exists in a closure + # called _instance(), resists being broken out, as + # attempts to do so tend to add significant function + # call overhead. _instance() is the most + # performance-critical section in the whole ORM. + + pk_cols = mapper.primary_key + + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + + identity_class = mapper._identity_class + + populators = collections.defaultdict(list) + + props = mapper._prop_set + if only_load_props is not None: + props = props.intersection( + mapper._props[k] for k in only_load_props) + + quick_populators = path.get( + context.attributes, "memoized_setups", _none_set) + + for prop in props: + if prop in quick_populators: + # this is an inlined path just for column-based attributes. + col = quick_populators[prop] + if col is _DEFER_FOR_STATE: + populators["new"].append( + (prop.key, prop._deferred_column_loader)) + elif col is _SET_DEFERRED_EXPIRED: + # note that in this path, we are no longer + # searching in the result to see if the column might + # be present in some unexpected way. + populators["expire"].append((prop.key, False)) + else: + if adapter: + col = adapter.columns[col] + getter = result._getter(col, False) + if getter: + populators["quick"].append((prop.key, getter)) + else: + # fall back to the ColumnProperty itself, which + # will iterate through all of its columns + # to see if one fits + prop.create_row_processor( + context, path, mapper, result, adapter, populators) + else: + prop.create_row_processor( + context, path, mapper, result, adapter, populators) + + propagate_options = context.propagate_options + load_path = context.query._current_path + path \ + if context.query._current_path.path else path + + session_identity_map = context.session.identity_map + + populate_existing = context.populate_existing or mapper.always_refresh + load_evt = bool(mapper.class_manager.dispatch.load) + refresh_evt = bool(mapper.class_manager.dispatch.refresh) + persistent_evt = bool(context.session.dispatch.loaded_as_persistent) + if persistent_evt: + loaded_as_persistent = context.session.dispatch.loaded_as_persistent + instance_state = attributes.instance_state + instance_dict = attributes.instance_dict + session_id = context.session.hash_key + version_check = context.version_check + runid = context.runid + + if refresh_state: + refresh_identity_key = refresh_state.key + if refresh_identity_key is None: + # super-rare condition; a refresh is being called + # on a non-instance-key instance; this is meant to only + # occur within a flush() + refresh_identity_key = \ + mapper._identity_key_from_state(refresh_state) + else: + refresh_identity_key = None + + if mapper.allow_partial_pks: + is_not_primary_key = _none_set.issuperset + else: + is_not_primary_key = _none_set.intersection + + def _instance(row): + + # determine the state that we'll be populating + if refresh_identity_key: + # fixed state that we're refreshing + state = refresh_state + instance = state.obj() + dict_ = instance_dict(instance) + isnew = state.runid != runid + currentload = True + loaded_instance = False + else: + # look at the row, see if that identity is in the + # session, or we have to create a new one + identitykey = ( + identity_class, + tuple([row[column] for column in pk_cols]) + ) + + instance = session_identity_map.get(identitykey) + + if instance is not None: + # existing instance + state = instance_state(instance) + dict_ = instance_dict(instance) + + isnew = state.runid != runid + currentload = not isnew + loaded_instance = False + + if version_check and not currentload: + _validate_version_id(mapper, state, dict_, row, adapter) + + else: + # create a new instance + + # check for non-NULL values in the primary key columns, + # else no entity is returned for the row + if is_not_primary_key(identitykey[1]): + return None + + isnew = True + currentload = True + loaded_instance = True + + instance = mapper.class_manager.new_instance() + + dict_ = instance_dict(instance) + state = instance_state(instance) + state.key = identitykey + + # attach instance to session. + state.session_id = session_id + session_identity_map._add_unpresent(state, identitykey) + + # populate. this looks at whether this state is new + # for this load or was existing, and whether or not this + # row is the first row with this identity. + if currentload or populate_existing: + # full population routines. Objects here are either + # just created, or we are doing a populate_existing + + # be conservative about setting load_path when populate_existing + # is in effect; want to maintain options from the original + # load. see test_expire->test_refresh_maintains_deferred_options + if isnew and (propagate_options or not populate_existing): + state.load_options = propagate_options + state.load_path = load_path + + _populate_full( + context, row, state, dict_, isnew, load_path, + loaded_instance, populate_existing, populators) + + if isnew: + if loaded_instance: + if load_evt: + state.manager.dispatch.load(state, context) + if persistent_evt: + loaded_as_persistent(context.session, state.obj()) + elif refresh_evt: + state.manager.dispatch.refresh( + state, context, only_load_props) + + if populate_existing or state.modified: + if refresh_state and only_load_props: + state._commit(dict_, only_load_props) + else: + state._commit_all(dict_, session_identity_map) + + else: + # partial population routines, for objects that were already + # in the Session, but a row matches them; apply eager loaders + # on existing objects, etc. + unloaded = state.unloaded + isnew = state not in context.partials + + if not isnew or unloaded or populators["eager"]: + # state is having a partial set of its attributes + # refreshed. Populate those attributes, + # and add to the "context.partials" collection. + + to_load = _populate_partial( + context, row, state, dict_, isnew, load_path, + unloaded, populators) + + if isnew: + if refresh_evt: + state.manager.dispatch.refresh( + state, context, to_load) + + state._commit(dict_, to_load) + + return instance + + if mapper.polymorphic_map and not _polymorphic_from and not refresh_state: + # if we are doing polymorphic, dispatch to a different _instance() + # method specific to the subclass mapper + _instance = _decorate_polymorphic_switch( + _instance, context, mapper, result, path, + polymorphic_discriminator, adapter) + + return _instance + + +def _populate_full( + context, row, state, dict_, isnew, load_path, + loaded_instance, populate_existing, populators): + if isnew: + # first time we are seeing a row with this identity. + state.runid = context.runid + + for key, getter in populators["quick"]: + dict_[key] = getter(row) + if populate_existing: + for key, set_callable in populators["expire"]: + dict_.pop(key, None) + if set_callable: + state.expired_attributes.add(key) + else: + for key, set_callable in populators["expire"]: + if set_callable: + state.expired_attributes.add(key) + for key, populator in populators["new"]: + populator(state, dict_, row) + for key, populator in populators["delayed"]: + populator(state, dict_, row) + elif load_path != state.load_path: + # new load path, e.g. object is present in more than one + # column position in a series of rows + state.load_path = load_path + + # if we have data, and the data isn't in the dict, OK, let's put + # it in. + for key, getter in populators["quick"]: + if key not in dict_: + dict_[key] = getter(row) + + # otherwise treat like an "already seen" row + for key, populator in populators["existing"]: + populator(state, dict_, row) + # TODO: allow "existing" populator to know this is + # a new path for the state: + # populator(state, dict_, row, new_path=True) + + else: + # have already seen rows with this identity in this same path. + for key, populator in populators["existing"]: + populator(state, dict_, row) + + # TODO: same path + # populator(state, dict_, row, new_path=False) + + +def _populate_partial( + context, row, state, dict_, isnew, load_path, + unloaded, populators): + + if not isnew: + to_load = context.partials[state] + for key, populator in populators["existing"]: + if key in to_load: + populator(state, dict_, row) + else: + to_load = unloaded + context.partials[state] = to_load + + for key, getter in populators["quick"]: + if key in to_load: + dict_[key] = getter(row) + for key, set_callable in populators["expire"]: + if key in to_load: + dict_.pop(key, None) + if set_callable: + state.expired_attributes.add(key) + for key, populator in populators["new"]: + if key in to_load: + populator(state, dict_, row) + for key, populator in populators["delayed"]: + if key in to_load: + populator(state, dict_, row) + for key, populator in populators["eager"]: + if key not in unloaded: + populator(state, dict_, row) + + return to_load + + +def _validate_version_id(mapper, state, dict_, row, adapter): + + version_id_col = mapper.version_id_col + + if version_id_col is None: + return + + if adapter: + version_id_col = adapter.columns[version_id_col] + + if mapper._get_state_attr_by_column( + state, dict_, mapper.version_id_col) != row[version_id_col]: + raise orm_exc.StaleDataError( + "Instance '%s' has version id '%s' which " + "does not match database-loaded version id '%s'." + % (state_str(state), mapper._get_state_attr_by_column( + state, dict_, mapper.version_id_col), + row[version_id_col])) + + +def _decorate_polymorphic_switch( + instance_fn, context, mapper, result, path, + polymorphic_discriminator, adapter): + if polymorphic_discriminator is not None: + polymorphic_on = polymorphic_discriminator + else: + polymorphic_on = mapper.polymorphic_on + if polymorphic_on is None: + return instance_fn + + if adapter: + polymorphic_on = adapter.columns[polymorphic_on] + + def configure_subclass_mapper(discriminator): + try: + sub_mapper = mapper.polymorphic_map[discriminator] + except KeyError: + raise AssertionError( + "No such polymorphic_identity %r is defined" % + discriminator) + else: + if sub_mapper is mapper: + return None + + return _instance_processor( + sub_mapper, context, result, + path, adapter, _polymorphic_from=mapper) + + polymorphic_instances = util.PopulateDict( + configure_subclass_mapper + ) + + def polymorphic_instance(row): + discriminator = row[polymorphic_on] + if discriminator is not None: + _instance = polymorphic_instances[discriminator] + if _instance: + return _instance(row) + return instance_fn(row) + return polymorphic_instance + + +def load_scalar_attributes(mapper, state, attribute_names): + """initiate a column-based attribute refresh operation.""" + + # assert mapper is _state_mapper(state) + session = state.session + if not session: + raise orm_exc.DetachedInstanceError( + "Instance %s is not bound to a Session; " + "attribute refresh operation cannot proceed" % + (state_str(state))) + + has_key = bool(state.key) + + result = False + + if mapper.inherits and not mapper.concrete: + # because we are using Core to produce a select() that we + # pass to the Query, we aren't calling setup() for mapped + # attributes; in 1.0 this means deferred attrs won't get loaded + # by default + statement = mapper._optimized_get_statement(state, attribute_names) + if statement is not None: + result = load_on_ident( + session.query(mapper). + options( + strategy_options.Load(mapper).undefer("*") + ).from_statement(statement), + None, + only_load_props=attribute_names, + refresh_state=state + ) + + if result is False: + if has_key: + identity_key = state.key + else: + # this codepath is rare - only valid when inside a flush, and the + # object is becoming persistent but hasn't yet been assigned + # an identity_key. + # check here to ensure we have the attrs we need. + pk_attrs = [mapper._columntoproperty[col].key + for col in mapper.primary_key] + if state.expired_attributes.intersection(pk_attrs): + raise sa_exc.InvalidRequestError( + "Instance %s cannot be refreshed - it's not " + " persistent and does not " + "contain a full primary key." % state_str(state)) + identity_key = mapper._identity_key_from_state(state) + + if (_none_set.issubset(identity_key) and + not mapper.allow_partial_pks) or \ + _none_set.issuperset(identity_key): + util.warn_limited( + "Instance %s to be refreshed doesn't " + "contain a full primary key - can't be refreshed " + "(and shouldn't be expired, either).", + state_str(state)) + return + + result = load_on_ident( + session.query(mapper), + identity_key, + refresh_state=state, + only_load_props=attribute_names) + + # if instance is pending, a refresh operation + # may not complete (even if PK attributes are assigned) + if has_key and result is None: + raise orm_exc.ObjectDeletedError(state) diff --git a/app/lib/sqlalchemy/orm/mapper.py b/app/lib/sqlalchemy/orm/mapper.py new file mode 100644 index 0000000..962486d --- /dev/null +++ b/app/lib/sqlalchemy/orm/mapper.py @@ -0,0 +1,3007 @@ +# orm/mapper.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Logic to map Python classes to and from selectables. + +Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central +configurational unit which associates a class with a database table. + +This is a semi-private module; the main configurational API of the ORM is +available in :class:`~sqlalchemy.orm.`. + +""" +from __future__ import absolute_import + +import types +import weakref +from itertools import chain +from collections import deque + +from .. import sql, util, log, exc as sa_exc, event, schema, inspection +from ..sql import expression, visitors, operators, util as sql_util +from . import instrumentation, attributes, exc as orm_exc, loading +from . import properties +from . import util as orm_util +from .interfaces import MapperProperty, InspectionAttr, _MappedAttribute + +from .base import _class_to_mapper, _state_mapper, class_mapper, \ + state_str, _INSTRUMENTOR +from .path_registry import PathRegistry + +import sys + + +_mapper_registry = weakref.WeakKeyDictionary() +_already_compiling = False + +_memoized_configured_property = util.group_expirable_memoized_property() + + +# a constant returned by _get_attr_by_column to indicate +# this mapper is not handling an attribute for a particular +# column +NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE') + +# lock used to synchronize the "mapper configure" step +_CONFIGURE_MUTEX = util.threading.RLock() + + +@inspection._self_inspects +@log.class_logger +class Mapper(InspectionAttr): + """Define the correlation of class attributes to database table + columns. + + The :class:`.Mapper` object is instantiated using the + :func:`~sqlalchemy.orm.mapper` function. For information + about instantiating new :class:`.Mapper` objects, see + that function's documentation. + + + When :func:`.mapper` is used + explicitly to link a user defined class with table + metadata, this is referred to as *classical mapping*. + Modern SQLAlchemy usage tends to favor the + :mod:`sqlalchemy.ext.declarative` extension for class + configuration, which + makes usage of :func:`.mapper` behind the scenes. + + Given a particular class known to be mapped by the ORM, + the :class:`.Mapper` which maintains it can be acquired + using the :func:`.inspect` function:: + + from sqlalchemy import inspect + + mapper = inspect(MyClass) + + A class which was mapped by the :mod:`sqlalchemy.ext.declarative` + extension will also have its mapper available via the ``__mapper__`` + attribute. + + + """ + + _new_mappers = False + + def __init__(self, + class_, + local_table=None, + properties=None, + primary_key=None, + non_primary=False, + inherits=None, + inherit_condition=None, + inherit_foreign_keys=None, + extension=None, + order_by=False, + always_refresh=False, + version_id_col=None, + version_id_generator=None, + polymorphic_on=None, + _polymorphic_map=None, + polymorphic_identity=None, + concrete=False, + with_polymorphic=None, + allow_partial_pks=True, + batch=True, + column_prefix=None, + include_properties=None, + exclude_properties=None, + passive_updates=True, + passive_deletes=False, + confirm_deleted_rows=True, + eager_defaults=False, + legacy_is_orphan=False, + _compiled_cache_size=100, + ): + r"""Return a new :class:`~.Mapper` object. + + This function is typically used behind the scenes + via the Declarative extension. When using Declarative, + many of the usual :func:`.mapper` arguments are handled + by the Declarative extension itself, including ``class_``, + ``local_table``, ``properties``, and ``inherits``. + Other options are passed to :func:`.mapper` using + the ``__mapper_args__`` class variable:: + + class MyClass(Base): + __tablename__ = 'my_table' + id = Column(Integer, primary_key=True) + type = Column(String(50)) + alt = Column("some_alt", Integer) + + __mapper_args__ = { + 'polymorphic_on' : type + } + + + Explicit use of :func:`.mapper` + is often referred to as *classical mapping*. The above + declarative example is equivalent in classical form to:: + + my_table = Table("my_table", metadata, + Column('id', Integer, primary_key=True), + Column('type', String(50)), + Column("some_alt", Integer) + ) + + class MyClass(object): + pass + + mapper(MyClass, my_table, + polymorphic_on=my_table.c.type, + properties={ + 'alt':my_table.c.some_alt + }) + + .. seealso:: + + :ref:`classical_mapping` - discussion of direct usage of + :func:`.mapper` + + :param class\_: The class to be mapped. When using Declarative, + this argument is automatically passed as the declared class + itself. + + :param local_table: The :class:`.Table` or other selectable + to which the class is mapped. May be ``None`` if + this mapper inherits from another mapper using single-table + inheritance. When using Declarative, this argument is + automatically passed by the extension, based on what + is configured via the ``__table__`` argument or via the + :class:`.Table` produced as a result of the ``__tablename__`` + and :class:`.Column` arguments present. + + :param always_refresh: If True, all query operations for this mapped + class will overwrite all data within object instances that already + exist within the session, erasing any in-memory changes with + whatever information was loaded from the database. Usage of this + flag is highly discouraged; as an alternative, see the method + :meth:`.Query.populate_existing`. + + :param allow_partial_pks: Defaults to True. Indicates that a + composite primary key with some NULL values should be considered as + possibly existing within the database. This affects whether a + mapper will assign an incoming row to an existing identity, as well + as if :meth:`.Session.merge` will check the database first for a + particular primary key value. A "partial primary key" can occur if + one has mapped to an OUTER JOIN, for example. + + :param batch: Defaults to ``True``, indicating that save operations + of multiple entities can be batched together for efficiency. + Setting to False indicates + that an instance will be fully saved before saving the next + instance. This is used in the extremely rare case that a + :class:`.MapperEvents` listener requires being called + in between individual row persistence operations. + + :param column_prefix: A string which will be prepended + to the mapped attribute name when :class:`.Column` + objects are automatically assigned as attributes to the + mapped class. Does not affect explicitly specified + column-based properties. + + See the section :ref:`column_prefix` for an example. + + :param concrete: If True, indicates this mapper should use concrete + table inheritance with its parent mapper. + + See the section :ref:`concrete_inheritance` for an example. + + :param confirm_deleted_rows: defaults to True; when a DELETE occurs + of one more rows based on specific primary keys, a warning is + emitted when the number of rows matched does not equal the number + of rows expected. This parameter may be set to False to handle the + case where database ON DELETE CASCADE rules may be deleting some of + those rows automatically. The warning may be changed to an + exception in a future release. + + .. versionadded:: 0.9.4 - added + :paramref:`.mapper.confirm_deleted_rows` as well as conditional + matched row checking on delete. + + :param eager_defaults: if True, the ORM will immediately fetch the + value of server-generated default values after an INSERT or UPDATE, + rather than leaving them as expired to be fetched on next access. + This can be used for event schemes where the server-generated values + are needed immediately before the flush completes. By default, + this scheme will emit an individual ``SELECT`` statement per row + inserted or updated, which note can add significant performance + overhead. However, if the + target database supports :term:`RETURNING`, the default values will + be returned inline with the INSERT or UPDATE statement, which can + greatly enhance performance for an application that needs frequent + access to just-generated server defaults. + + .. versionchanged:: 0.9.0 The ``eager_defaults`` option can now + make use of :term:`RETURNING` for backends which support it. + + :param exclude_properties: A list or set of string column names to + be excluded from mapping. + + See :ref:`include_exclude_cols` for an example. + + :param extension: A :class:`.MapperExtension` instance or + list of :class:`.MapperExtension` instances which will be applied + to all operations by this :class:`.Mapper`. **Deprecated.** + Please see :class:`.MapperEvents`. + + :param include_properties: An inclusive list or set of string column + names to map. + + See :ref:`include_exclude_cols` for an example. + + :param inherits: A mapped class or the corresponding :class:`.Mapper` + of one indicating a superclass to which this :class:`.Mapper` + should *inherit* from. The mapped class here must be a subclass + of the other mapper's class. When using Declarative, this argument + is passed automatically as a result of the natural class + hierarchy of the declared classes. + + .. seealso:: + + :ref:`inheritance_toplevel` + + :param inherit_condition: For joined table inheritance, a SQL + expression which will + define how the two tables are joined; defaults to a natural join + between the two tables. + + :param inherit_foreign_keys: When ``inherit_condition`` is used and + the columns present are missing a :class:`.ForeignKey` + configuration, this parameter can be used to specify which columns + are "foreign". In most cases can be left as ``None``. + + :param legacy_is_orphan: Boolean, defaults to ``False``. + When ``True``, specifies that "legacy" orphan consideration + is to be applied to objects mapped by this mapper, which means + that a pending (that is, not persistent) object is auto-expunged + from an owning :class:`.Session` only when it is de-associated + from *all* parents that specify a ``delete-orphan`` cascade towards + this mapper. The new default behavior is that the object is + auto-expunged when it is de-associated with *any* of its parents + that specify ``delete-orphan`` cascade. This behavior is more + consistent with that of a persistent object, and allows behavior to + be consistent in more scenarios independently of whether or not an + orphanable object has been flushed yet or not. + + See the change note and example at :ref:`legacy_is_orphan_addition` + for more detail on this change. + + .. versionadded:: 0.8 - the consideration of a pending object as + an "orphan" has been modified to more closely match the + behavior as that of persistent objects, which is that the object + is expunged from the :class:`.Session` as soon as it is + de-associated from any of its orphan-enabled parents. Previously, + the pending object would be expunged only if de-associated + from all of its orphan-enabled parents. The new flag + ``legacy_is_orphan`` is added to :func:`.orm.mapper` which + re-establishes the legacy behavior. + + :param non_primary: Specify that this :class:`.Mapper` is in addition + to the "primary" mapper, that is, the one used for persistence. + The :class:`.Mapper` created here may be used for ad-hoc + mapping of the class to an alternate selectable, for loading + only. + + :paramref:`.Mapper.non_primary` is not an often used option, but + is useful in some specific :func:`.relationship` cases. + + .. seealso:: + + :ref:`relationship_non_primary_mapper` + + :param order_by: A single :class:`.Column` or list of :class:`.Column` + objects for which selection operations should use as the default + ordering for entities. By default mappers have no pre-defined + ordering. + + .. deprecated:: 1.1 The :paramref:`.Mapper.order_by` parameter + is deprecated. Use :meth:`.Query.order_by` to determine the + ordering of a result set. + + :param passive_deletes: Indicates DELETE behavior of foreign key + columns when a joined-table inheritance entity is being deleted. + Defaults to ``False`` for a base mapper; for an inheriting mapper, + defaults to ``False`` unless the value is set to ``True`` + on the superclass mapper. + + When ``True``, it is assumed that ON DELETE CASCADE is configured + on the foreign key relationships that link this mapper's table + to its superclass table, so that when the unit of work attempts + to delete the entity, it need only emit a DELETE statement for the + superclass table, and not this table. + + When ``False``, a DELETE statement is emitted for this mapper's + table individually. If the primary key attributes local to this + table are unloaded, then a SELECT must be emitted in order to + validate these attributes; note that the primary key columns + of a joined-table subclass are not part of the "primary key" of + the object as a whole. + + Note that a value of ``True`` is **always** forced onto the + subclass mappers; that is, it's not possible for a superclass + to specify passive_deletes without this taking effect for + all subclass mappers. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`passive_deletes` - description of similar feature as + used with :func:`.relationship` + + :paramref:`.mapper.passive_updates` - supporting ON UPDATE + CASCADE for joined-table inheritance mappers + + :param passive_updates: Indicates UPDATE behavior of foreign key + columns when a primary key column changes on a joined-table + inheritance mapping. Defaults to ``True``. + + When True, it is assumed that ON UPDATE CASCADE is configured on + the foreign key in the database, and that the database will handle + propagation of an UPDATE from a source column to dependent columns + on joined-table rows. + + When False, it is assumed that the database does not enforce + referential integrity and will not be issuing its own CASCADE + operation for an update. The unit of work process will + emit an UPDATE statement for the dependent columns during a + primary key change. + + .. seealso:: + + :ref:`passive_updates` - description of a similar feature as + used with :func:`.relationship` + + :paramref:`.mapper.passive_deletes` - supporting ON DELETE + CASCADE for joined-table inheritance mappers + + :param polymorphic_on: Specifies the column, attribute, or + SQL expression used to determine the target class for an + incoming row, when inheriting classes are present. + + This value is commonly a :class:`.Column` object that's + present in the mapped :class:`.Table`:: + + class Employee(Base): + __tablename__ = 'employee' + + id = Column(Integer, primary_key=True) + discriminator = Column(String(50)) + + __mapper_args__ = { + "polymorphic_on":discriminator, + "polymorphic_identity":"employee" + } + + It may also be specified + as a SQL expression, as in this example where we + use the :func:`.case` construct to provide a conditional + approach:: + + class Employee(Base): + __tablename__ = 'employee' + + id = Column(Integer, primary_key=True) + discriminator = Column(String(50)) + + __mapper_args__ = { + "polymorphic_on":case([ + (discriminator == "EN", "engineer"), + (discriminator == "MA", "manager"), + ], else_="employee"), + "polymorphic_identity":"employee" + } + + It may also refer to any attribute + configured with :func:`.column_property`, or to the + string name of one:: + + class Employee(Base): + __tablename__ = 'employee' + + id = Column(Integer, primary_key=True) + discriminator = Column(String(50)) + employee_type = column_property( + case([ + (discriminator == "EN", "engineer"), + (discriminator == "MA", "manager"), + ], else_="employee") + ) + + __mapper_args__ = { + "polymorphic_on":employee_type, + "polymorphic_identity":"employee" + } + + .. versionchanged:: 0.7.4 + ``polymorphic_on`` may be specified as a SQL expression, + or refer to any attribute configured with + :func:`.column_property`, or to the string name of one. + + When setting ``polymorphic_on`` to reference an + attribute or expression that's not present in the + locally mapped :class:`.Table`, yet the value + of the discriminator should be persisted to the database, + the value of the + discriminator is not automatically set on new + instances; this must be handled by the user, + either through manual means or via event listeners. + A typical approach to establishing such a listener + looks like:: + + from sqlalchemy import event + from sqlalchemy.orm import object_mapper + + @event.listens_for(Employee, "init", propagate=True) + def set_identity(instance, *arg, **kw): + mapper = object_mapper(instance) + instance.discriminator = mapper.polymorphic_identity + + Where above, we assign the value of ``polymorphic_identity`` + for the mapped class to the ``discriminator`` attribute, + thus persisting the value to the ``discriminator`` column + in the database. + + .. warning:: + + Currently, **only one discriminator column may be set**, typically + on the base-most class in the hierarchy. "Cascading" polymorphic + columns are not yet supported. + + .. seealso:: + + :ref:`inheritance_toplevel` + + :param polymorphic_identity: Specifies the value which + identifies this particular class as returned by the + column expression referred to by the ``polymorphic_on`` + setting. As rows are received, the value corresponding + to the ``polymorphic_on`` column expression is compared + to this value, indicating which subclass should + be used for the newly reconstructed object. + + :param properties: A dictionary mapping the string names of object + attributes to :class:`.MapperProperty` instances, which define the + persistence behavior of that attribute. Note that :class:`.Column` + objects present in + the mapped :class:`.Table` are automatically placed into + ``ColumnProperty`` instances upon mapping, unless overridden. + When using Declarative, this argument is passed automatically, + based on all those :class:`.MapperProperty` instances declared + in the declared class body. + + :param primary_key: A list of :class:`.Column` objects which define + the primary key to be used against this mapper's selectable unit. + This is normally simply the primary key of the ``local_table``, but + can be overridden here. + + :param version_id_col: A :class:`.Column` + that will be used to keep a running version id of rows + in the table. This is used to detect concurrent updates or + the presence of stale data in a flush. The methodology is to + detect if an UPDATE statement does not match the last known + version id, a + :class:`~sqlalchemy.orm.exc.StaleDataError` exception is + thrown. + By default, the column must be of :class:`.Integer` type, + unless ``version_id_generator`` specifies an alternative version + generator. + + .. seealso:: + + :ref:`mapper_version_counter` - discussion of version counting + and rationale. + + :param version_id_generator: Define how new version ids should + be generated. Defaults to ``None``, which indicates that + a simple integer counting scheme be employed. To provide a custom + versioning scheme, provide a callable function of the form:: + + def generate_version(version): + return next_version + + Alternatively, server-side versioning functions such as triggers, + or programmatic versioning schemes outside of the version id + generator may be used, by specifying the value ``False``. + Please see :ref:`server_side_version_counter` for a discussion + of important points when using this option. + + .. versionadded:: 0.9.0 ``version_id_generator`` supports + server-side version number generation. + + .. seealso:: + + :ref:`custom_version_counter` + + :ref:`server_side_version_counter` + + + :param with_polymorphic: A tuple in the form ``(, + )`` indicating the default style of "polymorphic" + loading, that is, which tables are queried at once. is + any single or list of mappers and/or classes indicating the + inherited classes that should be loaded at once. The special value + ``'*'`` may be used to indicate all descending classes should be + loaded immediately. The second tuple argument + indicates a selectable that will be used to query for multiple + classes. + + .. seealso:: + + :ref:`with_polymorphic` - discussion of polymorphic querying + techniques. + + """ + + self.class_ = util.assert_arg_type(class_, type, 'class_') + + self.class_manager = None + + self._primary_key_argument = util.to_list(primary_key) + self.non_primary = non_primary + + if order_by is not False: + self.order_by = util.to_list(order_by) + util.warn_deprecated( + "Mapper.order_by is deprecated." + "Use Query.order_by() in order to affect the ordering of ORM " + "result sets.") + + else: + self.order_by = order_by + + self.always_refresh = always_refresh + + if isinstance(version_id_col, MapperProperty): + self.version_id_prop = version_id_col + self.version_id_col = None + else: + self.version_id_col = version_id_col + if version_id_generator is False: + self.version_id_generator = False + elif version_id_generator is None: + self.version_id_generator = lambda x: (x or 0) + 1 + else: + self.version_id_generator = version_id_generator + + self.concrete = concrete + self.single = False + self.inherits = inherits + self.local_table = local_table + self.inherit_condition = inherit_condition + self.inherit_foreign_keys = inherit_foreign_keys + self._init_properties = properties or {} + self._delete_orphans = [] + self.batch = batch + self.eager_defaults = eager_defaults + self.column_prefix = column_prefix + self.polymorphic_on = expression._clause_element_as_expr( + polymorphic_on) + self._dependency_processors = [] + self.validators = util.immutabledict() + self.passive_updates = passive_updates + self.passive_deletes = passive_deletes + self.legacy_is_orphan = legacy_is_orphan + self._clause_adapter = None + self._requires_row_aliasing = False + self._inherits_equated_pairs = None + self._memoized_values = {} + self._compiled_cache_size = _compiled_cache_size + self._reconstructor = None + self._deprecated_extensions = util.to_list(extension or []) + self.allow_partial_pks = allow_partial_pks + + if self.inherits and not self.concrete: + self.confirm_deleted_rows = False + else: + self.confirm_deleted_rows = confirm_deleted_rows + + self._set_with_polymorphic(with_polymorphic) + + if isinstance(self.local_table, expression.SelectBase): + raise sa_exc.InvalidRequestError( + "When mapping against a select() construct, map against " + "an alias() of the construct instead." + "This because several databases don't allow a " + "SELECT from a subquery that does not have an alias." + ) + + if self.with_polymorphic and \ + isinstance(self.with_polymorphic[1], + expression.SelectBase): + self.with_polymorphic = (self.with_polymorphic[0], + self.with_polymorphic[1].alias()) + + # our 'polymorphic identity', a string name that when located in a + # result set row indicates this Mapper should be used to construct + # the object instance for that row. + self.polymorphic_identity = polymorphic_identity + + # a dictionary of 'polymorphic identity' names, associating those + # names with Mappers that will be used to construct object instances + # upon a select operation. + if _polymorphic_map is None: + self.polymorphic_map = {} + else: + self.polymorphic_map = _polymorphic_map + + if include_properties is not None: + self.include_properties = util.to_set(include_properties) + else: + self.include_properties = None + if exclude_properties: + self.exclude_properties = util.to_set(exclude_properties) + else: + self.exclude_properties = None + + self.configured = False + + # prevent this mapper from being constructed + # while a configure_mappers() is occurring (and defer a + # configure_mappers() until construction succeeds) + _CONFIGURE_MUTEX.acquire() + try: + self.dispatch._events._new_mapper_instance(class_, self) + self._configure_inheritance() + self._configure_legacy_instrument_class() + self._configure_class_instrumentation() + self._configure_listeners() + self._configure_properties() + self._configure_polymorphic_setter() + self._configure_pks() + Mapper._new_mappers = True + self._log("constructed") + self._expire_memoizations() + finally: + _CONFIGURE_MUTEX.release() + + # major attributes initialized at the classlevel so that + # they can be Sphinx-documented. + + is_mapper = True + """Part of the inspection API.""" + + @property + def mapper(self): + """Part of the inspection API. + + Returns self. + + """ + return self + + @property + def entity(self): + r"""Part of the inspection API. + + Returns self.class\_. + + """ + return self.class_ + + local_table = None + """The :class:`.Selectable` which this :class:`.Mapper` manages. + + Typically is an instance of :class:`.Table` or :class:`.Alias`. + May also be ``None``. + + The "local" table is the + selectable that the :class:`.Mapper` is directly responsible for + managing from an attribute access and flush perspective. For + non-inheriting mappers, the local table is the same as the + "mapped" table. For joined-table inheritance mappers, local_table + will be the particular sub-table of the overall "join" which + this :class:`.Mapper` represents. If this mapper is a + single-table inheriting mapper, local_table will be ``None``. + + .. seealso:: + + :attr:`~.Mapper.mapped_table`. + + """ + + mapped_table = None + """The :class:`.Selectable` to which this :class:`.Mapper` is mapped. + + Typically an instance of :class:`.Table`, :class:`.Join`, or + :class:`.Alias`. + + The "mapped" table is the selectable that + the mapper selects from during queries. For non-inheriting + mappers, the mapped table is the same as the "local" table. + For joined-table inheritance mappers, mapped_table references the + full :class:`.Join` representing full rows for this particular + subclass. For single-table inheritance mappers, mapped_table + references the base table. + + .. seealso:: + + :attr:`~.Mapper.local_table`. + + """ + + inherits = None + """References the :class:`.Mapper` which this :class:`.Mapper` + inherits from, if any. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + configured = None + """Represent ``True`` if this :class:`.Mapper` has been configured. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + .. seealso:: + + :func:`.configure_mappers`. + + """ + + concrete = None + """Represent ``True`` if this :class:`.Mapper` is a concrete + inheritance mapper. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + tables = None + """An iterable containing the collection of :class:`.Table` objects + which this :class:`.Mapper` is aware of. + + If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias` + representing a :class:`.Select`, the individual :class:`.Table` + objects that comprise the full construct will be represented here. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + primary_key = None + """An iterable containing the collection of :class:`.Column` objects + which comprise the 'primary key' of the mapped table, from the + perspective of this :class:`.Mapper`. + + This list is against the selectable in :attr:`~.Mapper.mapped_table`. In + the case of inheriting mappers, some columns may be managed by a + superclass mapper. For example, in the case of a :class:`.Join`, the + primary key is determined by all of the primary key columns across all + tables referenced by the :class:`.Join`. + + The list is also not necessarily the same as the primary key column + collection associated with the underlying tables; the :class:`.Mapper` + features a ``primary_key`` argument that can override what the + :class:`.Mapper` considers as primary key columns. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + class_ = None + """The Python class which this :class:`.Mapper` maps. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + class_manager = None + """The :class:`.ClassManager` which maintains event listeners + and class-bound descriptors for this :class:`.Mapper`. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + single = None + """Represent ``True`` if this :class:`.Mapper` is a single table + inheritance mapper. + + :attr:`~.Mapper.local_table` will be ``None`` if this flag is set. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + non_primary = None + """Represent ``True`` if this :class:`.Mapper` is a "non-primary" + mapper, e.g. a mapper that is used only to selet rows but not for + persistence management. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + polymorphic_on = None + """The :class:`.Column` or SQL expression specified as the + ``polymorphic_on`` argument + for this :class:`.Mapper`, within an inheritance scenario. + + This attribute is normally a :class:`.Column` instance but + may also be an expression, such as one derived from + :func:`.cast`. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + polymorphic_map = None + """A mapping of "polymorphic identity" identifiers mapped to + :class:`.Mapper` instances, within an inheritance scenario. + + The identifiers can be of any type which is comparable to the + type of column represented by :attr:`~.Mapper.polymorphic_on`. + + An inheritance chain of mappers will all reference the same + polymorphic map object. The object is used to correlate incoming + result rows to target mappers. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + polymorphic_identity = None + """Represent an identifier which is matched against the + :attr:`~.Mapper.polymorphic_on` column during result row loading. + + Used only with inheritance, this object can be of any type which is + comparable to the type of column represented by + :attr:`~.Mapper.polymorphic_on`. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + base_mapper = None + """The base-most :class:`.Mapper` in an inheritance chain. + + In a non-inheriting scenario, this attribute will always be this + :class:`.Mapper`. In an inheritance scenario, it references + the :class:`.Mapper` which is parent to all other :class:`.Mapper` + objects in the inheritance chain. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + columns = None + """A collection of :class:`.Column` or other scalar expression + objects maintained by this :class:`.Mapper`. + + The collection behaves the same as that of the ``c`` attribute on + any :class:`.Table` object, except that only those columns included in + this mapping are present, and are keyed based on the attribute name + defined in the mapping, not necessarily the ``key`` attribute of the + :class:`.Column` itself. Additionally, scalar expressions mapped + by :func:`.column_property` are also present here. + + This is a *read only* attribute determined during mapper construction. + Behavior is undefined if directly modified. + + """ + + validators = None + """An immutable dictionary of attributes which have been decorated + using the :func:`~.orm.validates` decorator. + + The dictionary contains string attribute names as keys + mapped to the actual validation method. + + """ + + c = None + """A synonym for :attr:`~.Mapper.columns`.""" + + @util.memoized_property + def _path_registry(self): + return PathRegistry.per_mapper(self) + + def _configure_inheritance(self): + """Configure settings related to inherting and/or inherited mappers + being present.""" + + # a set of all mappers which inherit from this one. + self._inheriting_mappers = util.WeakSequence() + + if self.inherits: + if isinstance(self.inherits, type): + self.inherits = class_mapper(self.inherits, configure=False) + if not issubclass(self.class_, self.inherits.class_): + raise sa_exc.ArgumentError( + "Class '%s' does not inherit from '%s'" % + (self.class_.__name__, self.inherits.class_.__name__)) + if self.non_primary != self.inherits.non_primary: + np = not self.non_primary and "primary" or "non-primary" + raise sa_exc.ArgumentError( + "Inheritance of %s mapper for class '%s' is " + "only allowed from a %s mapper" % + (np, self.class_.__name__, np)) + # inherit_condition is optional. + if self.local_table is None: + self.local_table = self.inherits.local_table + self.mapped_table = self.inherits.mapped_table + self.single = True + elif self.local_table is not self.inherits.local_table: + if self.concrete: + self.mapped_table = self.local_table + for mapper in self.iterate_to_root(): + if mapper.polymorphic_on is not None: + mapper._requires_row_aliasing = True + else: + if self.inherit_condition is None: + # figure out inherit condition from our table to the + # immediate table of the inherited mapper, not its + # full table which could pull in other stuff we don't + # want (allows test/inheritance.InheritTest4 to pass) + self.inherit_condition = sql_util.join_condition( + self.inherits.local_table, + self.local_table) + self.mapped_table = sql.join( + self.inherits.mapped_table, + self.local_table, + self.inherit_condition) + + fks = util.to_set(self.inherit_foreign_keys) + self._inherits_equated_pairs = \ + sql_util.criterion_as_pairs( + self.mapped_table.onclause, + consider_as_foreign_keys=fks) + else: + self.mapped_table = self.local_table + + if self.polymorphic_identity is not None and not self.concrete: + self._identity_class = self.inherits._identity_class + else: + self._identity_class = self.class_ + + if self.version_id_col is None: + self.version_id_col = self.inherits.version_id_col + self.version_id_generator = self.inherits.version_id_generator + elif self.inherits.version_id_col is not None and \ + self.version_id_col is not self.inherits.version_id_col: + util.warn( + "Inheriting version_id_col '%s' does not match inherited " + "version_id_col '%s' and will not automatically populate " + "the inherited versioning column. " + "version_id_col should only be specified on " + "the base-most mapper that includes versioning." % + (self.version_id_col.description, + self.inherits.version_id_col.description) + ) + + if self.order_by is False and \ + not self.concrete and \ + self.inherits.order_by is not False: + self.order_by = self.inherits.order_by + + self.polymorphic_map = self.inherits.polymorphic_map + self.batch = self.inherits.batch + self.inherits._inheriting_mappers.append(self) + self.base_mapper = self.inherits.base_mapper + self.passive_updates = self.inherits.passive_updates + self.passive_deletes = self.inherits.passive_deletes or \ + self.passive_deletes + self._all_tables = self.inherits._all_tables + + if self.polymorphic_identity is not None: + if self.polymorphic_identity in self.polymorphic_map: + util.warn( + "Reassigning polymorphic association for identity %r " + "from %r to %r: Check for duplicate use of %r as " + "value for polymorphic_identity." % + (self.polymorphic_identity, + self.polymorphic_map[self.polymorphic_identity], + self, self.polymorphic_identity) + ) + self.polymorphic_map[self.polymorphic_identity] = self + + else: + self._all_tables = set() + self.base_mapper = self + self.mapped_table = self.local_table + if self.polymorphic_identity is not None: + self.polymorphic_map[self.polymorphic_identity] = self + self._identity_class = self.class_ + + if self.mapped_table is None: + raise sa_exc.ArgumentError( + "Mapper '%s' does not have a mapped_table specified." + % self) + + def _set_with_polymorphic(self, with_polymorphic): + if with_polymorphic == '*': + self.with_polymorphic = ('*', None) + elif isinstance(with_polymorphic, (tuple, list)): + if isinstance( + with_polymorphic[0], util.string_types + (tuple, list)): + self.with_polymorphic = with_polymorphic + else: + self.with_polymorphic = (with_polymorphic, None) + elif with_polymorphic is not None: + raise sa_exc.ArgumentError("Invalid setting for with_polymorphic") + else: + self.with_polymorphic = None + + if isinstance(self.local_table, expression.SelectBase): + raise sa_exc.InvalidRequestError( + "When mapping against a select() construct, map against " + "an alias() of the construct instead." + "This because several databases don't allow a " + "SELECT from a subquery that does not have an alias." + ) + + if self.with_polymorphic and \ + isinstance(self.with_polymorphic[1], + expression.SelectBase): + self.with_polymorphic = (self.with_polymorphic[0], + self.with_polymorphic[1].alias()) + if self.configured: + self._expire_memoizations() + + def _set_concrete_base(self, mapper): + """Set the given :class:`.Mapper` as the 'inherits' for this + :class:`.Mapper`, assuming this :class:`.Mapper` is concrete + and does not already have an inherits.""" + + assert self.concrete + assert not self.inherits + assert isinstance(mapper, Mapper) + self.inherits = mapper + self.inherits.polymorphic_map.update(self.polymorphic_map) + self.polymorphic_map = self.inherits.polymorphic_map + for mapper in self.iterate_to_root(): + if mapper.polymorphic_on is not None: + mapper._requires_row_aliasing = True + self.batch = self.inherits.batch + for mp in self.self_and_descendants: + mp.base_mapper = self.inherits.base_mapper + self.inherits._inheriting_mappers.append(self) + self.passive_updates = self.inherits.passive_updates + self._all_tables = self.inherits._all_tables + for key, prop in mapper._props.items(): + if key not in self._props and \ + not self._should_exclude(key, key, local=False, + column=None): + self._adapt_inherited_property(key, prop, False) + + def _set_polymorphic_on(self, polymorphic_on): + self.polymorphic_on = polymorphic_on + self._configure_polymorphic_setter(True) + + def _configure_legacy_instrument_class(self): + + if self.inherits: + self.dispatch._update(self.inherits.dispatch) + super_extensions = set( + chain(*[m._deprecated_extensions + for m in self.inherits.iterate_to_root()])) + else: + super_extensions = set() + + for ext in self._deprecated_extensions: + if ext not in super_extensions: + ext._adapt_instrument_class(self, ext) + + def _configure_listeners(self): + if self.inherits: + super_extensions = set( + chain(*[m._deprecated_extensions + for m in self.inherits.iterate_to_root()])) + else: + super_extensions = set() + + for ext in self._deprecated_extensions: + if ext not in super_extensions: + ext._adapt_listener(self, ext) + + def _configure_class_instrumentation(self): + """If this mapper is to be a primary mapper (i.e. the + non_primary flag is not set), associate this Mapper with the + given class_ and entity name. + + Subsequent calls to ``class_mapper()`` for the class_/entity + name combination will return this mapper. Also decorate the + `__init__` method on the mapped class to include optional + auto-session attachment logic. + + """ + + manager = attributes.manager_of_class(self.class_) + + if self.non_primary: + if not manager or not manager.is_mapped: + raise sa_exc.InvalidRequestError( + "Class %s has no primary mapper configured. Configure " + "a primary mapper first before setting up a non primary " + "Mapper." % self.class_) + self.class_manager = manager + self._identity_class = manager.mapper._identity_class + _mapper_registry[self] = True + return + + if manager is not None: + assert manager.class_ is self.class_ + if manager.is_mapped: + raise sa_exc.ArgumentError( + "Class '%s' already has a primary mapper defined. " + "Use non_primary=True to " + "create a non primary Mapper. clear_mappers() will " + "remove *all* current mappers from all classes." % + self.class_) + # else: + # a ClassManager may already exist as + # ClassManager.instrument_attribute() creates + # new managers for each subclass if they don't yet exist. + + _mapper_registry[self] = True + + # note: this *must be called before instrumentation.register_class* + # to maintain the documented behavior of instrument_class + self.dispatch.instrument_class(self, self.class_) + + if manager is None: + manager = instrumentation.register_class(self.class_) + + self.class_manager = manager + + manager.mapper = self + manager.deferred_scalar_loader = util.partial( + loading.load_scalar_attributes, self) + + # The remaining members can be added by any mapper, + # e_name None or not. + if manager.info.get(_INSTRUMENTOR, False): + return + + event.listen(manager, 'first_init', _event_on_first_init, raw=True) + event.listen(manager, 'init', _event_on_init, raw=True) + + for key, method in util.iterate_attributes(self.class_): + if isinstance(method, types.FunctionType): + if hasattr(method, '__sa_reconstructor__'): + self._reconstructor = method + event.listen(manager, 'load', _event_on_load, raw=True) + elif hasattr(method, '__sa_validators__'): + validation_opts = method.__sa_validation_opts__ + for name in method.__sa_validators__: + if name in self.validators: + raise sa_exc.InvalidRequestError( + "A validation function for mapped " + "attribute %r on mapper %s already exists." % + (name, self)) + self.validators = self.validators.union( + {name: (method, validation_opts)} + ) + + manager.info[_INSTRUMENTOR] = self + + @classmethod + def _configure_all(cls): + """Class-level path to the :func:`.configure_mappers` call. + """ + configure_mappers() + + def dispose(self): + # Disable any attribute-based compilation. + self.configured = True + + if hasattr(self, '_configure_failed'): + del self._configure_failed + + if not self.non_primary and \ + self.class_manager is not None and \ + self.class_manager.is_mapped and \ + self.class_manager.mapper is self: + instrumentation.unregister_class(self.class_) + + def _configure_pks(self): + self.tables = sql_util.find_tables(self.mapped_table) + + self._pks_by_table = {} + self._cols_by_table = {} + + all_cols = util.column_set(chain(*[ + col.proxy_set for col in + self._columntoproperty])) + + pk_cols = util.column_set(c for c in all_cols if c.primary_key) + + # identify primary key columns which are also mapped by this mapper. + tables = set(self.tables + [self.mapped_table]) + self._all_tables.update(tables) + for t in tables: + if t.primary_key and pk_cols.issuperset(t.primary_key): + # ordering is important since it determines the ordering of + # mapper.primary_key (and therefore query.get()) + self._pks_by_table[t] = \ + util.ordered_column_set(t.primary_key).\ + intersection(pk_cols) + self._cols_by_table[t] = \ + util.ordered_column_set(t.c).\ + intersection(all_cols) + + # if explicit PK argument sent, add those columns to the + # primary key mappings + if self._primary_key_argument: + for k in self._primary_key_argument: + if k.table not in self._pks_by_table: + self._pks_by_table[k.table] = util.OrderedSet() + self._pks_by_table[k.table].add(k) + + # otherwise, see that we got a full PK for the mapped table + elif self.mapped_table not in self._pks_by_table or \ + len(self._pks_by_table[self.mapped_table]) == 0: + raise sa_exc.ArgumentError( + "Mapper %s could not assemble any primary " + "key columns for mapped table '%s'" % + (self, self.mapped_table.description)) + elif self.local_table not in self._pks_by_table and \ + isinstance(self.local_table, schema.Table): + util.warn("Could not assemble any primary " + "keys for locally mapped table '%s' - " + "no rows will be persisted in this Table." + % self.local_table.description) + + if self.inherits and \ + not self.concrete and \ + not self._primary_key_argument: + # if inheriting, the "primary key" for this mapper is + # that of the inheriting (unless concrete or explicit) + self.primary_key = self.inherits.primary_key + else: + # determine primary key from argument or mapped_table pks - + # reduce to the minimal set of columns + if self._primary_key_argument: + primary_key = sql_util.reduce_columns( + [self.mapped_table.corresponding_column(c) for c in + self._primary_key_argument], + ignore_nonexistent_tables=True) + else: + primary_key = sql_util.reduce_columns( + self._pks_by_table[self.mapped_table], + ignore_nonexistent_tables=True) + + if len(primary_key) == 0: + raise sa_exc.ArgumentError( + "Mapper %s could not assemble any primary " + "key columns for mapped table '%s'" % + (self, self.mapped_table.description)) + + self.primary_key = tuple(primary_key) + self._log("Identified primary key columns: %s", primary_key) + + # determine cols that aren't expressed within our tables; mark these + # as "read only" properties which are refreshed upon INSERT/UPDATE + self._readonly_props = set( + self._columntoproperty[col] + for col in self._columntoproperty + if self._columntoproperty[col] not in self._identity_key_props and + (not hasattr(col, 'table') or + col.table not in self._cols_by_table)) + + def _configure_properties(self): + # Column and other ClauseElement objects which are mapped + self.columns = self.c = util.OrderedProperties() + + # object attribute names mapped to MapperProperty objects + self._props = util.OrderedDict() + + # table columns mapped to lists of MapperProperty objects + # using a list allows a single column to be defined as + # populating multiple object attributes + self._columntoproperty = _ColumnMapping(self) + + # load custom properties + if self._init_properties: + for key, prop in self._init_properties.items(): + self._configure_property(key, prop, False) + + # pull properties from the inherited mapper if any. + if self.inherits: + for key, prop in self.inherits._props.items(): + if key not in self._props and \ + not self._should_exclude(key, key, local=False, + column=None): + self._adapt_inherited_property(key, prop, False) + + # create properties for each column in the mapped table, + # for those columns which don't already map to a property + for column in self.mapped_table.columns: + if column in self._columntoproperty: + continue + + column_key = (self.column_prefix or '') + column.key + + if self._should_exclude( + column.key, column_key, + local=self.local_table.c.contains_column(column), + column=column + ): + continue + + # adjust the "key" used for this column to that + # of the inheriting mapper + for mapper in self.iterate_to_root(): + if column in mapper._columntoproperty: + column_key = mapper._columntoproperty[column].key + + self._configure_property(column_key, + column, + init=False, + setparent=True) + + def _configure_polymorphic_setter(self, init=False): + """Configure an attribute on the mapper representing the + 'polymorphic_on' column, if applicable, and not + already generated by _configure_properties (which is typical). + + Also create a setter function which will assign this + attribute to the value of the 'polymorphic_identity' + upon instance construction, also if applicable. This + routine will run when an instance is created. + + """ + setter = False + + if self.polymorphic_on is not None: + setter = True + + if isinstance(self.polymorphic_on, util.string_types): + # polymorphic_on specified as a string - link + # it to mapped ColumnProperty + try: + self.polymorphic_on = self._props[self.polymorphic_on] + except KeyError: + raise sa_exc.ArgumentError( + "Can't determine polymorphic_on " + "value '%s' - no attribute is " + "mapped to this name." % self.polymorphic_on) + + if self.polymorphic_on in self._columntoproperty: + # polymorphic_on is a column that is already mapped + # to a ColumnProperty + prop = self._columntoproperty[self.polymorphic_on] + elif isinstance(self.polymorphic_on, MapperProperty): + # polymorphic_on is directly a MapperProperty, + # ensure it's a ColumnProperty + if not isinstance(self.polymorphic_on, + properties.ColumnProperty): + raise sa_exc.ArgumentError( + "Only direct column-mapped " + "property or SQL expression " + "can be passed for polymorphic_on") + prop = self.polymorphic_on + elif not expression._is_column(self.polymorphic_on): + # polymorphic_on is not a Column and not a ColumnProperty; + # not supported right now. + raise sa_exc.ArgumentError( + "Only direct column-mapped " + "property or SQL expression " + "can be passed for polymorphic_on" + ) + else: + # polymorphic_on is a Column or SQL expression and + # doesn't appear to be mapped. this means it can be 1. + # only present in the with_polymorphic selectable or + # 2. a totally standalone SQL expression which we'd + # hope is compatible with this mapper's mapped_table + col = self.mapped_table.corresponding_column( + self.polymorphic_on) + if col is None: + # polymorphic_on doesn't derive from any + # column/expression isn't present in the mapped + # table. we will make a "hidden" ColumnProperty + # for it. Just check that if it's directly a + # schema.Column and we have with_polymorphic, it's + # likely a user error if the schema.Column isn't + # represented somehow in either mapped_table or + # with_polymorphic. Otherwise as of 0.7.4 we + # just go with it and assume the user wants it + # that way (i.e. a CASE statement) + setter = False + instrument = False + col = self.polymorphic_on + if isinstance(col, schema.Column) and ( + self.with_polymorphic is None or + self.with_polymorphic[1]. + corresponding_column(col) is None): + raise sa_exc.InvalidRequestError( + "Could not map polymorphic_on column " + "'%s' to the mapped table - polymorphic " + "loads will not function properly" + % col.description) + else: + # column/expression that polymorphic_on derives from + # is present in our mapped table + # and is probably mapped, but polymorphic_on itself + # is not. This happens when + # the polymorphic_on is only directly present in the + # with_polymorphic selectable, as when use + # polymorphic_union. + # we'll make a separate ColumnProperty for it. + instrument = True + key = getattr(col, 'key', None) + if key: + if self._should_exclude(col.key, col.key, False, col): + raise sa_exc.InvalidRequestError( + "Cannot exclude or override the " + "discriminator column %r" % + col.key) + else: + self.polymorphic_on = col = \ + col.label("_sa_polymorphic_on") + key = col.key + + prop = properties.ColumnProperty(col, _instrument=instrument) + self._configure_property(key, prop, init=init, setparent=True) + + # the actual polymorphic_on should be the first public-facing + # column in the property + self.polymorphic_on = prop.columns[0] + polymorphic_key = prop.key + + else: + # no polymorphic_on was set. + # check inheriting mappers for one. + for mapper in self.iterate_to_root(): + # determine if polymorphic_on of the parent + # should be propagated here. If the col + # is present in our mapped table, or if our mapped + # table is the same as the parent (i.e. single table + # inheritance), we can use it + if mapper.polymorphic_on is not None: + if self.mapped_table is mapper.mapped_table: + self.polymorphic_on = mapper.polymorphic_on + else: + self.polymorphic_on = \ + self.mapped_table.corresponding_column( + mapper.polymorphic_on) + # we can use the parent mapper's _set_polymorphic_identity + # directly; it ensures the polymorphic_identity of the + # instance's mapper is used so is portable to subclasses. + if self.polymorphic_on is not None: + self._set_polymorphic_identity = \ + mapper._set_polymorphic_identity + self._validate_polymorphic_identity = \ + mapper._validate_polymorphic_identity + else: + self._set_polymorphic_identity = None + return + + if setter: + def _set_polymorphic_identity(state): + dict_ = state.dict + state.get_impl(polymorphic_key).set( + state, dict_, + state.manager.mapper.polymorphic_identity, + None) + + def _validate_polymorphic_identity(mapper, state, dict_): + if polymorphic_key in dict_ and \ + dict_[polymorphic_key] not in \ + mapper._acceptable_polymorphic_identities: + util.warn_limited( + "Flushing object %s with " + "incompatible polymorphic identity %r; the " + "object may not refresh and/or load correctly", + (state_str(state), dict_[polymorphic_key]) + ) + + self._set_polymorphic_identity = _set_polymorphic_identity + self._validate_polymorphic_identity = \ + _validate_polymorphic_identity + else: + self._set_polymorphic_identity = None + + _validate_polymorphic_identity = None + + @_memoized_configured_property + def _version_id_prop(self): + if self.version_id_col is not None: + return self._columntoproperty[self.version_id_col] + else: + return None + + @_memoized_configured_property + def _acceptable_polymorphic_identities(self): + identities = set() + + stack = deque([self]) + while stack: + item = stack.popleft() + if item.mapped_table is self.mapped_table: + identities.add(item.polymorphic_identity) + stack.extend(item._inheriting_mappers) + + return identities + + @_memoized_configured_property + def _prop_set(self): + return frozenset(self._props.values()) + + def _adapt_inherited_property(self, key, prop, init): + if not self.concrete: + self._configure_property(key, prop, init=False, setparent=False) + elif key not in self._props: + self._configure_property( + key, + properties.ConcreteInheritedProperty(), + init=init, setparent=True) + + def _configure_property(self, key, prop, init=True, setparent=True): + self._log("_configure_property(%s, %s)", key, prop.__class__.__name__) + + if not isinstance(prop, MapperProperty): + prop = self._property_from_column(key, prop) + + if isinstance(prop, properties.ColumnProperty): + col = self.mapped_table.corresponding_column(prop.columns[0]) + + # if the column is not present in the mapped table, + # test if a column has been added after the fact to the + # parent table (or their parent, etc.) [ticket:1570] + if col is None and self.inherits: + path = [self] + for m in self.inherits.iterate_to_root(): + col = m.local_table.corresponding_column(prop.columns[0]) + if col is not None: + for m2 in path: + m2.mapped_table._reset_exported() + col = self.mapped_table.corresponding_column( + prop.columns[0]) + break + path.append(m) + + # subquery expression, column not present in the mapped + # selectable. + if col is None: + col = prop.columns[0] + + # column is coming in after _readonly_props was + # initialized; check for 'readonly' + if hasattr(self, '_readonly_props') and \ + (not hasattr(col, 'table') or + col.table not in self._cols_by_table): + self._readonly_props.add(prop) + + else: + # if column is coming in after _cols_by_table was + # initialized, ensure the col is in the right set + if hasattr(self, '_cols_by_table') and \ + col.table in self._cols_by_table and \ + col not in self._cols_by_table[col.table]: + self._cols_by_table[col.table].add(col) + + # if this properties.ColumnProperty represents the "polymorphic + # discriminator" column, mark it. We'll need this when rendering + # columns in SELECT statements. + if not hasattr(prop, '_is_polymorphic_discriminator'): + prop._is_polymorphic_discriminator = \ + (col is self.polymorphic_on or + prop.columns[0] is self.polymorphic_on) + + self.columns[key] = col + for col in prop.columns + prop._orig_columns: + for col in col.proxy_set: + self._columntoproperty[col] = prop + + prop.key = key + + if setparent: + prop.set_parent(self, init) + + if key in self._props and \ + getattr(self._props[key], '_mapped_by_synonym', False): + syn = self._props[key]._mapped_by_synonym + raise sa_exc.ArgumentError( + "Can't call map_column=True for synonym %r=%r, " + "a ColumnProperty already exists keyed to the name " + "%r for column %r" % (syn, key, key, syn) + ) + + if key in self._props and \ + not isinstance(prop, properties.ColumnProperty) and \ + not isinstance( + self._props[key], + ( + properties.ColumnProperty, + properties.ConcreteInheritedProperty) + ): + util.warn("Property %s on %s being replaced with new " + "property %s; the old property will be discarded" % ( + self._props[key], + self, + prop, + )) + oldprop = self._props[key] + self._path_registry.pop(oldprop, None) + + self._props[key] = prop + + if not self.non_primary: + prop.instrument_class(self) + + for mapper in self._inheriting_mappers: + mapper._adapt_inherited_property(key, prop, init) + + if init: + prop.init() + prop.post_instrument_class(self) + + if self.configured: + self._expire_memoizations() + + def _property_from_column(self, key, prop): + """generate/update a :class:`.ColumnProprerty` given a + :class:`.Column` object. """ + + # we were passed a Column or a list of Columns; + # generate a properties.ColumnProperty + columns = util.to_list(prop) + column = columns[0] + if not expression._is_column(column): + raise sa_exc.ArgumentError( + "%s=%r is not an instance of MapperProperty or Column" + % (key, prop)) + + prop = self._props.get(key, None) + + if isinstance(prop, properties.ColumnProperty): + if ( + not self._inherits_equated_pairs or + (prop.columns[0], column) not in self._inherits_equated_pairs + ) and \ + not prop.columns[0].shares_lineage(column) and \ + prop.columns[0] is not self.version_id_col and \ + column is not self.version_id_col: + warn_only = prop.parent is not self + msg = ("Implicitly combining column %s with column " + "%s under attribute '%s'. Please configure one " + "or more attributes for these same-named columns " + "explicitly." % (prop.columns[-1], column, key)) + if warn_only: + util.warn(msg) + else: + raise sa_exc.InvalidRequestError(msg) + + # existing properties.ColumnProperty from an inheriting + # mapper. make a copy and append our column to it + prop = prop.copy() + prop.columns.insert(0, column) + self._log("inserting column to existing list " + "in properties.ColumnProperty %s" % (key)) + return prop + elif prop is None or isinstance(prop, + properties.ConcreteInheritedProperty): + mapped_column = [] + for c in columns: + mc = self.mapped_table.corresponding_column(c) + if mc is None: + mc = self.local_table.corresponding_column(c) + if mc is not None: + # if the column is in the local table but not the + # mapped table, this corresponds to adding a + # column after the fact to the local table. + # [ticket:1523] + self.mapped_table._reset_exported() + mc = self.mapped_table.corresponding_column(c) + if mc is None: + raise sa_exc.ArgumentError( + "When configuring property '%s' on %s, " + "column '%s' is not represented in the mapper's " + "table. Use the `column_property()` function to " + "force this column to be mapped as a read-only " + "attribute." % (key, self, c)) + mapped_column.append(mc) + return properties.ColumnProperty(*mapped_column) + else: + raise sa_exc.ArgumentError( + "WARNING: when configuring property '%s' on %s, " + "column '%s' conflicts with property '%r'. " + "To resolve this, map the column to the class under a " + "different name in the 'properties' dictionary. Or, " + "to remove all awareness of the column entirely " + "(including its availability as a foreign key), " + "use the 'include_properties' or 'exclude_properties' " + "mapper arguments to control specifically which table " + "columns get mapped." % + (key, self, column.key, prop)) + + def _post_configure_properties(self): + """Call the ``init()`` method on all ``MapperProperties`` + attached to this mapper. + + This is a deferred configuration step which is intended + to execute once all mappers have been constructed. + + """ + + self._log("_post_configure_properties() started") + l = [(key, prop) for key, prop in self._props.items()] + for key, prop in l: + self._log("initialize prop %s", key) + + if prop.parent is self and not prop._configure_started: + prop.init() + + if prop._configure_finished: + prop.post_instrument_class(self) + + self._log("_post_configure_properties() complete") + self.configured = True + + def add_properties(self, dict_of_properties): + """Add the given dictionary of properties to this mapper, + using `add_property`. + + """ + for key, value in dict_of_properties.items(): + self.add_property(key, value) + + def add_property(self, key, prop): + """Add an individual MapperProperty to this mapper. + + If the mapper has not been configured yet, just adds the + property to the initial properties dictionary sent to the + constructor. If this Mapper has already been configured, then + the given MapperProperty is configured immediately. + + """ + self._init_properties[key] = prop + self._configure_property(key, prop, init=self.configured) + + def _expire_memoizations(self): + for mapper in self.iterate_to_root(): + _memoized_configured_property.expire_instance(mapper) + + @property + def _log_desc(self): + return "(" + self.class_.__name__ + \ + "|" + \ + (self.local_table is not None and + self.local_table.description or + str(self.local_table)) +\ + (self.non_primary and + "|non-primary" or "") + ")" + + def _log(self, msg, *args): + self.logger.info( + "%s " + msg, *((self._log_desc,) + args) + ) + + def _log_debug(self, msg, *args): + self.logger.debug( + "%s " + msg, *((self._log_desc,) + args) + ) + + def __repr__(self): + return '' % ( + id(self), self.class_.__name__) + + def __str__(self): + return "Mapper|%s|%s%s" % ( + self.class_.__name__, + self.local_table is not None and + self.local_table.description or None, + self.non_primary and "|non-primary" or "" + ) + + def _is_orphan(self, state): + orphan_possible = False + for mapper in self.iterate_to_root(): + for (key, cls) in mapper._delete_orphans: + orphan_possible = True + + has_parent = attributes.manager_of_class(cls).has_parent( + state, key, optimistic=state.has_identity) + + if self.legacy_is_orphan and has_parent: + return False + elif not self.legacy_is_orphan and not has_parent: + return True + + if self.legacy_is_orphan: + return orphan_possible + else: + return False + + def has_property(self, key): + return key in self._props + + def get_property(self, key, _configure_mappers=True): + """return a MapperProperty associated with the given key. + """ + + if _configure_mappers and Mapper._new_mappers: + configure_mappers() + + try: + return self._props[key] + except KeyError: + raise sa_exc.InvalidRequestError( + "Mapper '%s' has no property '%s'" % (self, key)) + + def get_property_by_column(self, column): + """Given a :class:`.Column` object, return the + :class:`.MapperProperty` which maps this column.""" + + return self._columntoproperty[column] + + @property + def iterate_properties(self): + """return an iterator of all MapperProperty objects.""" + if Mapper._new_mappers: + configure_mappers() + return iter(self._props.values()) + + def _mappers_from_spec(self, spec, selectable): + """given a with_polymorphic() argument, return the set of mappers it + represents. + + Trims the list of mappers to just those represented within the given + selectable, if present. This helps some more legacy-ish mappings. + + """ + if spec == '*': + mappers = list(self.self_and_descendants) + elif spec: + mappers = set() + for m in util.to_list(spec): + m = _class_to_mapper(m) + if not m.isa(self): + raise sa_exc.InvalidRequestError( + "%r does not inherit from %r" % + (m, self)) + + if selectable is None: + mappers.update(m.iterate_to_root()) + else: + mappers.add(m) + mappers = [m for m in self.self_and_descendants if m in mappers] + else: + mappers = [] + + if selectable is not None: + tables = set(sql_util.find_tables(selectable, + include_aliases=True)) + mappers = [m for m in mappers if m.local_table in tables] + return mappers + + def _selectable_from_mappers(self, mappers, innerjoin): + """given a list of mappers (assumed to be within this mapper's + inheritance hierarchy), construct an outerjoin amongst those mapper's + mapped tables. + + """ + from_obj = self.mapped_table + for m in mappers: + if m is self: + continue + if m.concrete: + raise sa_exc.InvalidRequestError( + "'with_polymorphic()' requires 'selectable' argument " + "when concrete-inheriting mappers are used.") + elif not m.single: + if innerjoin: + from_obj = from_obj.join(m.local_table, + m.inherit_condition) + else: + from_obj = from_obj.outerjoin(m.local_table, + m.inherit_condition) + + return from_obj + + @_memoized_configured_property + def _single_table_criterion(self): + if self.single and \ + self.inherits and \ + self.polymorphic_on is not None: + return self.polymorphic_on.in_( + m.polymorphic_identity + for m in self.self_and_descendants) + else: + return None + + @_memoized_configured_property + def _with_polymorphic_mappers(self): + if Mapper._new_mappers: + configure_mappers() + if not self.with_polymorphic: + return [] + return self._mappers_from_spec(*self.with_polymorphic) + + @_memoized_configured_property + def _with_polymorphic_selectable(self): + if not self.with_polymorphic: + return self.mapped_table + + spec, selectable = self.with_polymorphic + if selectable is not None: + return selectable + else: + return self._selectable_from_mappers( + self._mappers_from_spec(spec, selectable), + False) + + with_polymorphic_mappers = _with_polymorphic_mappers + """The list of :class:`.Mapper` objects included in the + default "polymorphic" query. + + """ + + @_memoized_configured_property + def _insert_cols_evaluating_none(self): + return dict( + ( + table, + frozenset( + col.key for col in columns + if col.type.should_evaluate_none + ) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _insert_cols_as_none(self): + return dict( + ( + table, + frozenset( + col.key for col in columns + if not col.primary_key and + not col.server_default and not col.default + and not col.type.should_evaluate_none) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _propkey_to_col(self): + return dict( + ( + table, + dict( + (self._columntoproperty[col].key, col) + for col in columns + ) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _pk_keys_by_table(self): + return dict( + ( + table, + frozenset([col.key for col in pks]) + ) + for table, pks in self._pks_by_table.items() + ) + + @_memoized_configured_property + def _pk_attr_keys_by_table(self): + return dict( + ( + table, + frozenset([self._columntoproperty[col].key for col in pks]) + ) + for table, pks in self._pks_by_table.items() + ) + + @_memoized_configured_property + def _server_default_cols(self): + return dict( + ( + table, + frozenset([ + col.key for col in columns + if col.server_default is not None]) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _server_default_plus_onupdate_propkeys(self): + result = set() + + for table, columns in self._cols_by_table.items(): + for col in columns: + if ( + ( + col.server_default is not None or + col.server_onupdate is not None + ) and col in self._columntoproperty + ): + result.add(self._columntoproperty[col].key) + + return result + + @_memoized_configured_property + def _server_onupdate_default_cols(self): + return dict( + ( + table, + frozenset([ + col.key for col in columns + if col.server_onupdate is not None]) + ) + for table, columns in self._cols_by_table.items() + ) + + @property + def selectable(self): + """The :func:`.select` construct this :class:`.Mapper` selects from + by default. + + Normally, this is equivalent to :attr:`.mapped_table`, unless + the ``with_polymorphic`` feature is in use, in which case the + full "polymorphic" selectable is returned. + + """ + return self._with_polymorphic_selectable + + def _with_polymorphic_args(self, spec=None, selectable=False, + innerjoin=False): + if self.with_polymorphic: + if not spec: + spec = self.with_polymorphic[0] + if selectable is False: + selectable = self.with_polymorphic[1] + elif selectable is False: + selectable = None + mappers = self._mappers_from_spec(spec, selectable) + if selectable is not None: + return mappers, selectable + else: + return mappers, self._selectable_from_mappers(mappers, + innerjoin) + + @_memoized_configured_property + def _polymorphic_properties(self): + return list(self._iterate_polymorphic_properties( + self._with_polymorphic_mappers)) + + def _iterate_polymorphic_properties(self, mappers=None): + """Return an iterator of MapperProperty objects which will render into + a SELECT.""" + if mappers is None: + mappers = self._with_polymorphic_mappers + + if not mappers: + for c in self.iterate_properties: + yield c + else: + # in the polymorphic case, filter out discriminator columns + # from other mappers, as these are sometimes dependent on that + # mapper's polymorphic selectable (which we don't want rendered) + for c in util.unique_list( + chain(*[ + list(mapper.iterate_properties) for mapper in + [self] + mappers + ]) + ): + if getattr(c, '_is_polymorphic_discriminator', False) and \ + (self.polymorphic_on is None or + c.columns[0] is not self.polymorphic_on): + continue + yield c + + @_memoized_configured_property + def attrs(self): + """A namespace of all :class:`.MapperProperty` objects + associated this mapper. + + This is an object that provides each property based on + its key name. For instance, the mapper for a + ``User`` class which has ``User.name`` attribute would + provide ``mapper.attrs.name``, which would be the + :class:`.ColumnProperty` representing the ``name`` + column. The namespace object can also be iterated, + which would yield each :class:`.MapperProperty`. + + :class:`.Mapper` has several pre-filtered views + of this attribute which limit the types of properties + returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`, + :attr:`.relationships`, and :attr:`.composites`. + + .. warning:: + + The :attr:`.Mapper.attrs` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.attrs[somename]`` over + ``getattr(mapper.attrs, somename)`` to avoid name collisions. + + .. seealso:: + + :attr:`.Mapper.all_orm_descriptors` + + """ + if Mapper._new_mappers: + configure_mappers() + return util.ImmutableProperties(self._props) + + @_memoized_configured_property + def all_orm_descriptors(self): + """A namespace of all :class:`.InspectionAttr` attributes associated + with the mapped class. + + These attributes are in all cases Python :term:`descriptors` + associated with the mapped class or its superclasses. + + This namespace includes attributes that are mapped to the class + as well as attributes declared by extension modules. + It includes any Python descriptor type that inherits from + :class:`.InspectionAttr`. This includes + :class:`.QueryableAttribute`, as well as extension types such as + :class:`.hybrid_property`, :class:`.hybrid_method` and + :class:`.AssociationProxy`. + + To distinguish between mapped attributes and extension attributes, + the attribute :attr:`.InspectionAttr.extension_type` will refer + to a constant that distinguishes between different extension types. + + When dealing with a :class:`.QueryableAttribute`, the + :attr:`.QueryableAttribute.property` attribute refers to the + :class:`.MapperProperty` property, which is what you get when + referring to the collection of mapped properties via + :attr:`.Mapper.attrs`. + + .. warning:: + + The :attr:`.Mapper.all_orm_descriptors` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over + ``getattr(mapper.all_orm_descriptors, somename)`` to avoid name + collisions. + + .. versionadded:: 0.8.0 + + .. seealso:: + + :attr:`.Mapper.attrs` + + """ + return util.ImmutableProperties( + dict(self.class_manager._all_sqla_attributes())) + + @_memoized_configured_property + def synonyms(self): + """Return a namespace of all :class:`.SynonymProperty` + properties maintained by this :class:`.Mapper`. + + .. seealso:: + + :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` + objects. + + """ + return self._filter_properties(properties.SynonymProperty) + + @_memoized_configured_property + def column_attrs(self): + """Return a namespace of all :class:`.ColumnProperty` + properties maintained by this :class:`.Mapper`. + + .. seealso:: + + :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` + objects. + + """ + return self._filter_properties(properties.ColumnProperty) + + @_memoized_configured_property + def relationships(self): + """A namespace of all :class:`.RelationshipProperty` properties + maintained by this :class:`.Mapper`. + + .. warning:: + + the :attr:`.Mapper.relationships` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.relationships[somename]`` over + ``getattr(mapper.relationships, somename)`` to avoid name + collisions. + + .. seealso:: + + :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` + objects. + + """ + return self._filter_properties(properties.RelationshipProperty) + + @_memoized_configured_property + def composites(self): + """Return a namespace of all :class:`.CompositeProperty` + properties maintained by this :class:`.Mapper`. + + .. seealso:: + + :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` + objects. + + """ + return self._filter_properties(properties.CompositeProperty) + + def _filter_properties(self, type_): + if Mapper._new_mappers: + configure_mappers() + return util.ImmutableProperties(util.OrderedDict( + (k, v) for k, v in self._props.items() + if isinstance(v, type_) + )) + + @_memoized_configured_property + def _get_clause(self): + """create a "get clause" based on the primary key. this is used + by query.get() and many-to-one lazyloads to load this item + by primary key. + + """ + params = [(primary_key, sql.bindparam(None, type_=primary_key.type)) + for primary_key in self.primary_key] + return sql.and_(*[k == v for (k, v) in params]), \ + util.column_dict(params) + + @_memoized_configured_property + def _equivalent_columns(self): + """Create a map of all *equivalent* columns, based on + the determination of column pairs that are equated to + one another based on inherit condition. This is designed + to work with the queries that util.polymorphic_union + comes up with, which often don't include the columns from + the base table directly (including the subclass table columns + only). + + The resulting structure is a dictionary of columns mapped + to lists of equivalent columns, i.e. + + { + tablea.col1: + set([tableb.col1, tablec.col1]), + tablea.col2: + set([tabled.col2]) + } + + """ + result = util.column_dict() + + def visit_binary(binary): + if binary.operator == operators.eq: + if binary.left in result: + result[binary.left].add(binary.right) + else: + result[binary.left] = util.column_set((binary.right,)) + if binary.right in result: + result[binary.right].add(binary.left) + else: + result[binary.right] = util.column_set((binary.left,)) + for mapper in self.base_mapper.self_and_descendants: + if mapper.inherit_condition is not None: + visitors.traverse( + mapper.inherit_condition, {}, + {'binary': visit_binary}) + + return result + + def _is_userland_descriptor(self, obj): + if isinstance(obj, (_MappedAttribute, + instrumentation.ClassManager, + expression.ColumnElement)): + return False + else: + return True + + def _should_exclude(self, name, assigned_name, local, column): + """determine whether a particular property should be implicitly + present on the class. + + This occurs when properties are propagated from an inherited class, or + are applied from the columns present in the mapped table. + + """ + + # check for class-bound attributes and/or descriptors, + # either local or from an inherited class + if local: + if self.class_.__dict__.get(assigned_name, None) is not None \ + and self._is_userland_descriptor( + self.class_.__dict__[assigned_name]): + return True + else: + if getattr(self.class_, assigned_name, None) is not None \ + and self._is_userland_descriptor( + getattr(self.class_, assigned_name)): + return True + + if self.include_properties is not None and \ + name not in self.include_properties and \ + (column is None or column not in self.include_properties): + self._log("not including property %s" % (name)) + return True + + if self.exclude_properties is not None and \ + ( + name in self.exclude_properties or + (column is not None and column in self.exclude_properties) + ): + self._log("excluding property %s" % (name)) + return True + + return False + + def common_parent(self, other): + """Return true if the given mapper shares a + common inherited parent as this mapper.""" + + return self.base_mapper is other.base_mapper + + def _canload(self, state, allow_subtypes): + s = self.primary_mapper() + if self.polymorphic_on is not None or allow_subtypes: + return _state_mapper(state).isa(s) + else: + return _state_mapper(state) is s + + def isa(self, other): + """Return True if the this mapper inherits from the given mapper.""" + + m = self + while m and m is not other: + m = m.inherits + return bool(m) + + def iterate_to_root(self): + m = self + while m: + yield m + m = m.inherits + + @_memoized_configured_property + def self_and_descendants(self): + """The collection including this mapper and all descendant mappers. + + This includes not just the immediately inheriting mappers but + all their inheriting mappers as well. + + """ + descendants = [] + stack = deque([self]) + while stack: + item = stack.popleft() + descendants.append(item) + stack.extend(item._inheriting_mappers) + return util.WeakSequence(descendants) + + def polymorphic_iterator(self): + """Iterate through the collection including this mapper and + all descendant mappers. + + This includes not just the immediately inheriting mappers but + all their inheriting mappers as well. + + To iterate through an entire hierarchy, use + ``mapper.base_mapper.polymorphic_iterator()``. + + """ + return iter(self.self_and_descendants) + + def primary_mapper(self): + """Return the primary mapper corresponding to this mapper's class key + (class).""" + + return self.class_manager.mapper + + @property + def primary_base_mapper(self): + return self.class_manager.mapper.base_mapper + + def _result_has_identity_key(self, result, adapter=None): + pk_cols = self.primary_key + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + for col in pk_cols: + if not result._has_key(col): + return False + else: + return True + + def identity_key_from_row(self, row, adapter=None): + """Return an identity-map key for use in storing/retrieving an + item from the identity map. + + :param row: A :class:`.RowProxy` instance. The columns which are + mapped by this :class:`.Mapper` should be locatable in the row, + preferably via the :class:`.Column` object directly (as is the case + when a :func:`.select` construct is executed), or via string names of + the form ``_``. + + """ + pk_cols = self.primary_key + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + + return self._identity_class, \ + tuple(row[column] for column in pk_cols) + + def identity_key_from_primary_key(self, primary_key): + """Return an identity-map key for use in storing/retrieving an + item from an identity map. + + :param primary_key: A list of values indicating the identifier. + + """ + return self._identity_class, tuple(primary_key) + + def identity_key_from_instance(self, instance): + """Return the identity key for the given instance, based on + its primary key attributes. + + If the instance's state is expired, calling this method + will result in a database check to see if the object has been deleted. + If the row no longer exists, + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + This value is typically also found on the instance state under the + attribute name `key`. + + """ + return self.identity_key_from_primary_key( + self.primary_key_from_instance(instance)) + + def _identity_key_from_state(self, state): + dict_ = state.dict + manager = state.manager + return self._identity_class, tuple([ + manager[self._columntoproperty[col].key]. + impl.get(state, dict_, attributes.PASSIVE_RETURN_NEVER_SET) + for col in self.primary_key + ]) + + def primary_key_from_instance(self, instance): + """Return the list of primary key values for the given + instance. + + If the instance's state is expired, calling this method + will result in a database check to see if the object has been deleted. + If the row no longer exists, + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + """ + state = attributes.instance_state(instance) + return self._primary_key_from_state(state, attributes.PASSIVE_OFF) + + def _primary_key_from_state( + self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET): + dict_ = state.dict + manager = state.manager + return [ + manager[prop.key]. + impl.get(state, dict_, passive) + for prop in self._identity_key_props + ] + + @_memoized_configured_property + def _identity_key_props(self): + return [self._columntoproperty[col] for col in self.primary_key] + + @_memoized_configured_property + def _all_pk_props(self): + collection = set() + for table in self.tables: + collection.update(self._pks_by_table[table]) + return collection + + @_memoized_configured_property + def _should_undefer_in_wildcard(self): + cols = set(self.primary_key) + if self.polymorphic_on is not None: + cols.add(self.polymorphic_on) + return cols + + @_memoized_configured_property + def _primary_key_propkeys(self): + return set([prop.key for prop in self._all_pk_props]) + + def _get_state_attr_by_column( + self, state, dict_, column, + passive=attributes.PASSIVE_RETURN_NEVER_SET): + prop = self._columntoproperty[column] + return state.manager[prop.key].impl.get(state, dict_, passive=passive) + + def _set_committed_state_attr_by_column(self, state, dict_, column, value): + prop = self._columntoproperty[column] + state.manager[prop.key].impl.set_committed_value(state, dict_, value) + + def _set_state_attr_by_column(self, state, dict_, column, value): + prop = self._columntoproperty[column] + state.manager[prop.key].impl.set(state, dict_, value, None) + + def _get_committed_attr_by_column(self, obj, column): + state = attributes.instance_state(obj) + dict_ = attributes.instance_dict(obj) + return self._get_committed_state_attr_by_column( + state, dict_, column, passive=attributes.PASSIVE_OFF) + + def _get_committed_state_attr_by_column( + self, state, dict_, column, + passive=attributes.PASSIVE_RETURN_NEVER_SET): + + prop = self._columntoproperty[column] + return state.manager[prop.key].impl.\ + get_committed_value(state, dict_, passive=passive) + + def _optimized_get_statement(self, state, attribute_names): + """assemble a WHERE clause which retrieves a given state by primary + key, using a minimized set of tables. + + Applies to a joined-table inheritance mapper where the + requested attribute names are only present on joined tables, + not the base table. The WHERE clause attempts to include + only those tables to minimize joins. + + """ + props = self._props + + tables = set(chain( + *[sql_util.find_tables(c, check_columns=True) + for key in attribute_names + for c in props[key].columns] + )) + + if self.base_mapper.local_table in tables: + return None + + class ColumnsNotAvailable(Exception): + pass + + def visit_binary(binary): + leftcol = binary.left + rightcol = binary.right + if leftcol is None or rightcol is None: + return + + if leftcol.table not in tables: + leftval = self._get_committed_state_attr_by_column( + state, state.dict, + leftcol, + passive=attributes.PASSIVE_NO_INITIALIZE) + if leftval in orm_util._none_set: + raise ColumnsNotAvailable() + binary.left = sql.bindparam(None, leftval, + type_=binary.right.type) + elif rightcol.table not in tables: + rightval = self._get_committed_state_attr_by_column( + state, state.dict, + rightcol, + passive=attributes.PASSIVE_NO_INITIALIZE) + if rightval in orm_util._none_set: + raise ColumnsNotAvailable() + binary.right = sql.bindparam(None, rightval, + type_=binary.right.type) + + allconds = [] + + try: + start = False + for mapper in reversed(list(self.iterate_to_root())): + if mapper.local_table in tables: + start = True + elif not isinstance(mapper.local_table, + expression.TableClause): + return None + if start and not mapper.single: + allconds.append(visitors.cloned_traverse( + mapper.inherit_condition, + {}, + {'binary': visit_binary} + ) + ) + except ColumnsNotAvailable: + return None + + cond = sql.and_(*allconds) + + cols = [] + for key in attribute_names: + cols.extend(props[key].columns) + return sql.select(cols, cond, use_labels=True) + + def cascade_iterator(self, type_, state, halt_on=None): + """Iterate each element and its mapper in an object graph, + for all relationships that meet the given cascade rule. + + :param type_: + The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``, + etc.). + + .. note:: the ``"all"`` cascade is not accepted here. For a generic + object traversal function, see :ref:`faq_walk_objects`. + + :param state: + The lead InstanceState. child items will be processed per + the relationships defined for this object's mapper. + + :return: the method yields individual object instances. + + .. seealso:: + + :ref:`unitofwork_cascades` + + :ref:`faq_walk_objects` - illustrates a generic function to + traverse all objects without relying on cascades. + + """ + visited_states = set() + prp, mpp = object(), object() + + visitables = deque([(deque(self._props.values()), prp, + state, state.dict)]) + + while visitables: + iterator, item_type, parent_state, parent_dict = visitables[-1] + if not iterator: + visitables.pop() + continue + + if item_type is prp: + prop = iterator.popleft() + if type_ not in prop.cascade: + continue + queue = deque(prop.cascade_iterator( + type_, parent_state, parent_dict, + visited_states, halt_on)) + if queue: + visitables.append((queue, mpp, None, None)) + elif item_type is mpp: + instance, instance_mapper, corresponding_state, \ + corresponding_dict = iterator.popleft() + yield instance, instance_mapper, \ + corresponding_state, corresponding_dict + visitables.append((deque(instance_mapper._props.values()), + prp, corresponding_state, + corresponding_dict)) + + @_memoized_configured_property + def _compiled_cache(self): + return util.LRUCache(self._compiled_cache_size) + + @_memoized_configured_property + def _sorted_tables(self): + table_to_mapper = {} + + for mapper in self.base_mapper.self_and_descendants: + for t in mapper.tables: + table_to_mapper.setdefault(t, mapper) + + extra_dependencies = [] + for table, mapper in table_to_mapper.items(): + super_ = mapper.inherits + if super_: + extra_dependencies.extend([ + (super_table, table) + for super_table in super_.tables + ]) + + def skip(fk): + # attempt to skip dependencies that are not + # significant to the inheritance chain + # for two tables that are related by inheritance. + # while that dependency may be important, it's technically + # not what we mean to sort on here. + parent = table_to_mapper.get(fk.parent.table) + dep = table_to_mapper.get(fk.column.table) + if parent is not None and \ + dep is not None and \ + dep is not parent and \ + dep.inherit_condition is not None: + cols = set(sql_util._find_columns(dep.inherit_condition)) + if parent.inherit_condition is not None: + cols = cols.union(sql_util._find_columns( + parent.inherit_condition)) + return fk.parent not in cols and fk.column not in cols + else: + return fk.parent not in cols + return False + + sorted_ = sql_util.sort_tables(table_to_mapper, + skip_fn=skip, + extra_dependencies=extra_dependencies) + + ret = util.OrderedDict() + for t in sorted_: + ret[t] = table_to_mapper[t] + return ret + + def _memo(self, key, callable_): + if key in self._memoized_values: + return self._memoized_values[key] + else: + self._memoized_values[key] = value = callable_() + return value + + @util.memoized_property + def _table_to_equated(self): + """memoized map of tables to collections of columns to be + synchronized upwards to the base mapper.""" + + result = util.defaultdict(list) + + for table in self._sorted_tables: + cols = set(table.c) + for m in self.iterate_to_root(): + if m._inherits_equated_pairs and \ + cols.intersection( + util.reduce(set.union, + [l.proxy_set for l, r in + m._inherits_equated_pairs]) + ): + result[table].append((m, m._inherits_equated_pairs)) + + return result + + +def configure_mappers(): + """Initialize the inter-mapper relationships of all mappers that + have been constructed thus far. + + This function can be called any number of times, but in + most cases is invoked automatically, the first time mappings are used, + as well as whenever mappings are used and additional not-yet-configured + mappers have been constructed. + + Points at which this occur include when a mapped class is instantiated + into an instance, as well as when the :meth:`.Session.query` method + is used. + + The :func:`.configure_mappers` function provides several event hooks + that can be used to augment its functionality. These methods include: + + * :meth:`.MapperEvents.before_configured` - called once before + :func:`.configure_mappers` does any work; this can be used to establish + additional options, properties, or related mappings before the operation + proceeds. + + * :meth:`.MapperEvents.mapper_configured` - called as each indivudal + :class:`.Mapper` is configured within the process; will include all + mapper state except for backrefs set up by other mappers that are still + to be configured. + + * :meth:`.MapperEvents.after_configured` - called once after + :func:`.configure_mappers` is complete; at this stage, all + :class:`.Mapper` objects that are known to SQLAlchemy will be fully + configured. Note that the calling application may still have other + mappings that haven't been produced yet, such as if they are in modules + as yet unimported. + + """ + + if not Mapper._new_mappers: + return + + _CONFIGURE_MUTEX.acquire() + try: + global _already_compiling + if _already_compiling: + return + _already_compiling = True + try: + + # double-check inside mutex + if not Mapper._new_mappers: + return + + Mapper.dispatch._for_class(Mapper).before_configured() + # initialize properties on all mappers + # note that _mapper_registry is unordered, which + # may randomly conceal/reveal issues related to + # the order of mapper compilation + + for mapper in list(_mapper_registry): + if getattr(mapper, '_configure_failed', False): + e = sa_exc.InvalidRequestError( + "One or more mappers failed to initialize - " + "can't proceed with initialization of other " + "mappers. Triggering mapper: '%s'. " + "Original exception was: %s" + % (mapper, mapper._configure_failed)) + e._configure_failed = mapper._configure_failed + raise e + if not mapper.configured: + try: + mapper._post_configure_properties() + mapper._expire_memoizations() + mapper.dispatch.mapper_configured( + mapper, mapper.class_) + except Exception: + exc = sys.exc_info()[1] + if not hasattr(exc, '_configure_failed'): + mapper._configure_failed = exc + raise + + Mapper._new_mappers = False + finally: + _already_compiling = False + finally: + _CONFIGURE_MUTEX.release() + Mapper.dispatch._for_class(Mapper).after_configured() + + +def reconstructor(fn): + """Decorate a method as the 'reconstructor' hook. + + Designates a method as the "reconstructor", an ``__init__``-like + method that will be called by the ORM after the instance has been + loaded from the database or otherwise reconstituted. + + The reconstructor will be invoked with no arguments. Scalar + (non-collection) database-mapped attributes of the instance will + be available for use within the function. Eagerly-loaded + collections are generally not yet available and will usually only + contain the first element. ORM state changes made to objects at + this stage will not be recorded for the next flush() operation, so + the activity within a reconstructor should be conservative. + + """ + fn.__sa_reconstructor__ = True + return fn + + +def validates(*names, **kw): + r"""Decorate a method as a 'validator' for one or more named properties. + + Designates a method as a validator, a method which receives the + name of the attribute as well as a value to be assigned, or in the + case of a collection, the value to be added to the collection. + The function can then raise validation exceptions to halt the + process from continuing (where Python's built-in ``ValueError`` + and ``AssertionError`` exceptions are reasonable choices), or can + modify or replace the value before proceeding. The function should + otherwise return the given value. + + Note that a validator for a collection **cannot** issue a load of that + collection within the validation routine - this usage raises + an assertion to avoid recursion overflows. This is a reentrant + condition which is not supported. + + :param \*names: list of attribute names to be validated. + :param include_removes: if True, "remove" events will be + sent as well - the validation function must accept an additional + argument "is_remove" which will be a boolean. + + .. versionadded:: 0.7.7 + :param include_backrefs: defaults to ``True``; if ``False``, the + validation function will not emit if the originator is an attribute + event related via a backref. This can be used for bi-directional + :func:`.validates` usage where only one validator should emit per + attribute operation. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :ref:`simple_validators` - usage examples for :func:`.validates` + + """ + include_removes = kw.pop('include_removes', False) + include_backrefs = kw.pop('include_backrefs', True) + + def wrap(fn): + fn.__sa_validators__ = names + fn.__sa_validation_opts__ = { + "include_removes": include_removes, + "include_backrefs": include_backrefs + } + return fn + return wrap + + +def _event_on_load(state, ctx): + instrumenting_mapper = state.manager.info[_INSTRUMENTOR] + if instrumenting_mapper._reconstructor: + instrumenting_mapper._reconstructor(state.obj()) + + +def _event_on_first_init(manager, cls): + """Initial mapper compilation trigger. + + instrumentation calls this one when InstanceState + is first generated, and is needed for legacy mutable + attributes to work. + """ + + instrumenting_mapper = manager.info.get(_INSTRUMENTOR) + if instrumenting_mapper: + if Mapper._new_mappers: + configure_mappers() + + +def _event_on_init(state, args, kwargs): + """Run init_instance hooks. + + This also includes mapper compilation, normally not needed + here but helps with some piecemeal configuration + scenarios (such as in the ORM tutorial). + + """ + + instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) + if instrumenting_mapper: + if Mapper._new_mappers: + configure_mappers() + if instrumenting_mapper._set_polymorphic_identity: + instrumenting_mapper._set_polymorphic_identity(state) + + +class _ColumnMapping(dict): + """Error reporting helper for mapper._columntoproperty.""" + + __slots__ = 'mapper', + + def __init__(self, mapper): + self.mapper = mapper + + def __missing__(self, column): + prop = self.mapper._props.get(column) + if prop: + raise orm_exc.UnmappedColumnError( + "Column '%s.%s' is not available, due to " + "conflicting property '%s':%r" % ( + column.table.name, column.name, column.key, prop)) + raise orm_exc.UnmappedColumnError( + "No column %s is configured on mapper %s..." % + (column, self.mapper)) diff --git a/app/lib/sqlalchemy/orm/path_registry.py b/app/lib/sqlalchemy/orm/path_registry.py new file mode 100644 index 0000000..580995a --- /dev/null +++ b/app/lib/sqlalchemy/orm/path_registry.py @@ -0,0 +1,271 @@ +# orm/path_registry.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Path tracking utilities, representing mapper graph traversals. + +""" + +from .. import inspection +from .. import util +from .. import exc +from itertools import chain +from .base import class_mapper +import logging + +log = logging.getLogger(__name__) + + +def _unreduce_path(path): + return PathRegistry.deserialize(path) + + +_WILDCARD_TOKEN = "*" +_DEFAULT_TOKEN = "_sa_default" + + +class PathRegistry(object): + """Represent query load paths and registry functions. + + Basically represents structures like: + + (, "orders", , "items", ) + + These structures are generated by things like + query options (joinedload(), subqueryload(), etc.) and are + used to compose keys stored in the query._attributes dictionary + for various options. + + They are then re-composed at query compile/result row time as + the query is formed and as rows are fetched, where they again + serve to compose keys to look up options in the context.attributes + dictionary, which is copied from query._attributes. + + The path structure has a limited amount of caching, where each + "root" ultimately pulls from a fixed registry associated with + the first mapper, that also contains elements for each of its + property keys. However paths longer than two elements, which + are the exception rather than the rule, are generated on an + as-needed basis. + + """ + + is_token = False + is_root = False + + def __eq__(self, other): + return other is not None and \ + self.path == other.path + + def set(self, attributes, key, value): + log.debug("set '%s' on path '%s' to '%s'", key, self, value) + attributes[(key, self.path)] = value + + def setdefault(self, attributes, key, value): + log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value) + attributes.setdefault((key, self.path), value) + + def get(self, attributes, key, value=None): + key = (key, self.path) + if key in attributes: + return attributes[key] + else: + return value + + def __len__(self): + return len(self.path) + + @property + def length(self): + return len(self.path) + + def pairs(self): + path = self.path + for i in range(0, len(path), 2): + yield path[i], path[i + 1] + + def contains_mapper(self, mapper): + for path_mapper in [ + self.path[i] for i in range(0, len(self.path), 2) + ]: + if path_mapper.is_mapper and \ + path_mapper.isa(mapper): + return True + else: + return False + + def contains(self, attributes, key): + return (key, self.path) in attributes + + def __reduce__(self): + return _unreduce_path, (self.serialize(), ) + + def serialize(self): + path = self.path + return list(zip( + [m.class_ for m in [path[i] for i in range(0, len(path), 2)]], + [path[i].key for i in range(1, len(path), 2)] + [None] + )) + + @classmethod + def deserialize(cls, path): + if path is None: + return None + + p = tuple(chain(*[(class_mapper(mcls), + class_mapper(mcls).attrs[key] + if key is not None else None) + for mcls, key in path])) + if p and p[-1] is None: + p = p[0:-1] + return cls.coerce(p) + + @classmethod + def per_mapper(cls, mapper): + return EntityRegistry( + cls.root, mapper + ) + + @classmethod + def coerce(cls, raw): + return util.reduce(lambda prev, next: prev[next], raw, cls.root) + + def token(self, token): + if token.endswith(':' + _WILDCARD_TOKEN): + return TokenRegistry(self, token) + elif token.endswith(":" + _DEFAULT_TOKEN): + return TokenRegistry(self.root, token) + else: + raise exc.ArgumentError("invalid token: %s" % token) + + def __add__(self, other): + return util.reduce( + lambda prev, next: prev[next], + other.path, self) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self.path, ) + + +class RootRegistry(PathRegistry): + """Root registry, defers to mappers so that + paths are maintained per-root-mapper. + + """ + path = () + has_entity = False + is_aliased_class = False + is_root = True + + def __getitem__(self, entity): + return entity._path_registry + +PathRegistry.root = RootRegistry() + + +class TokenRegistry(PathRegistry): + def __init__(self, parent, token): + self.token = token + self.parent = parent + self.path = parent.path + (token,) + + has_entity = False + + is_token = True + + def generate_for_superclasses(self): + if not self.parent.is_aliased_class and not self.parent.is_root: + for ent in self.parent.mapper.iterate_to_root(): + yield TokenRegistry(self.parent.parent[ent], self.token) + else: + yield self + + def __getitem__(self, entity): + raise NotImplementedError() + + +class PropRegistry(PathRegistry): + def __init__(self, parent, prop): + # restate this path in terms of the + # given MapperProperty's parent. + insp = inspection.inspect(parent[-1]) + if not insp.is_aliased_class or insp._use_mapper_path: + parent = parent.parent[prop.parent] + elif insp.is_aliased_class and insp.with_polymorphic_mappers: + if prop.parent is not insp.mapper and \ + prop.parent in insp.with_polymorphic_mappers: + subclass_entity = parent[-1]._entity_for_mapper(prop.parent) + parent = parent.parent[subclass_entity] + + self.prop = prop + self.parent = parent + self.path = parent.path + (prop,) + + self._wildcard_path_loader_key = ( + "loader", + self.parent.path + self.prop._wildcard_token + ) + self._default_path_loader_key = self.prop._default_path_loader_key + self._loader_key = ("loader", self.path) + + def __str__(self): + return " -> ".join( + str(elem) for elem in self.path + ) + + @util.memoized_property + def has_entity(self): + return hasattr(self.prop, "mapper") + + @util.memoized_property + def entity(self): + return self.prop.mapper + + @property + def mapper(self): + return self.entity + + @property + def entity_path(self): + return self[self.entity] + + def __getitem__(self, entity): + if isinstance(entity, (int, slice)): + return self.path[entity] + else: + return EntityRegistry( + self, entity + ) + + +class EntityRegistry(PathRegistry, dict): + is_aliased_class = False + has_entity = True + + def __init__(self, parent, entity): + self.key = entity + self.parent = parent + self.is_aliased_class = entity.is_aliased_class + self.entity = entity + self.path = parent.path + (entity,) + self.entity_path = self + + @property + def mapper(self): + return inspection.inspect(self.entity).mapper + + def __bool__(self): + return True + __nonzero__ = __bool__ + + def __getitem__(self, entity): + if isinstance(entity, (int, slice)): + return self.path[entity] + else: + return dict.__getitem__(self, entity) + + def __missing__(self, key): + self[key] = item = PropRegistry(self, key) + return item diff --git a/app/lib/sqlalchemy/orm/persistence.py b/app/lib/sqlalchemy/orm/persistence.py new file mode 100644 index 0000000..ad268c1 --- /dev/null +++ b/app/lib/sqlalchemy/orm/persistence.py @@ -0,0 +1,1460 @@ +# orm/persistence.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""private module containing functions used to emit INSERT, UPDATE +and DELETE statements on behalf of a :class:`.Mapper` and its descending +mappers. + +The functions here are called only by the unit of work functions +in unitofwork.py. + +""" + +import operator +from itertools import groupby, chain +from .. import sql, util, exc as sa_exc +from . import attributes, sync, exc as orm_exc, evaluator +from .base import state_str, _attr_as_key, _entity_descriptor +from ..sql import expression +from ..sql.base import _from_objects +from . import loading + + +def _bulk_insert( + mapper, mappings, session_transaction, isstates, return_defaults, + render_nulls): + base_mapper = mapper.base_mapper + + cached_connections = _cached_connection_dict(base_mapper) + + if session_transaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_insert()") + + if isstates: + if return_defaults: + states = [(state, state.dict) for state in mappings] + mappings = [dict_ for (state, dict_) in states] + else: + mappings = [state.dict for state in mappings] + else: + mappings = list(mappings) + + connection = session_transaction.connection(base_mapper) + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper): + continue + + records = ( + (None, state_dict, params, mapper, + connection, value_params, has_all_pks, has_all_defaults) + for + state, state_dict, params, mp, + conn, value_params, has_all_pks, + has_all_defaults in _collect_insert_commands(table, ( + (None, mapping, mapper, connection) + for mapping in mappings), + bulk=True, return_defaults=return_defaults, + render_nulls=render_nulls + ) + ) + _emit_insert_statements(base_mapper, None, + cached_connections, + super_mapper, table, records, + bookkeeping=return_defaults) + + if return_defaults and isstates: + identity_cls = mapper._identity_class + identity_props = [p.key for p in mapper._identity_key_props] + for state, dict_ in states: + state.key = ( + identity_cls, + tuple([dict_[key] for key in identity_props]) + ) + + +def _bulk_update(mapper, mappings, session_transaction, + isstates, update_changed_only): + base_mapper = mapper.base_mapper + + cached_connections = _cached_connection_dict(base_mapper) + + search_keys = mapper._primary_key_propkeys + if mapper._version_id_prop: + search_keys = set([mapper._version_id_prop.key]).union(search_keys) + + def _changed_dict(mapper, state): + return dict( + (k, v) + for k, v in state.dict.items() if k in state.committed_state or k + in search_keys + + ) + + if isstates: + if update_changed_only: + mappings = [_changed_dict(mapper, state) for state in mappings] + else: + mappings = [state.dict for state in mappings] + else: + mappings = list(mappings) + + if session_transaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_update()") + + connection = session_transaction.connection(base_mapper) + + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper): + continue + + records = _collect_update_commands(None, table, ( + (None, mapping, mapper, connection, + (mapping[mapper._version_id_prop.key] + if mapper._version_id_prop else None)) + for mapping in mappings + ), bulk=True) + + _emit_update_statements(base_mapper, None, + cached_connections, + super_mapper, table, records, + bookkeeping=False) + + +def save_obj( + base_mapper, states, uowtransaction, single=False): + """Issue ``INSERT`` and/or ``UPDATE`` statements for a list + of objects. + + This is called within the context of a UOWTransaction during a + flush operation, given a list of states to be flushed. The + base mapper in an inheritance hierarchy handles the inserts/ + updates for all descendant mappers. + + """ + + # if batch=false, call _save_obj separately for each object + if not single and not base_mapper.batch: + for state in _sort_states(states): + save_obj(base_mapper, [state], uowtransaction, single=True) + return + + states_to_update = [] + states_to_insert = [] + cached_connections = _cached_connection_dict(base_mapper) + + for (state, dict_, mapper, connection, + has_identity, + row_switch, update_version_id) in _organize_states_for_save( + base_mapper, states, uowtransaction + ): + if has_identity or row_switch: + states_to_update.append( + (state, dict_, mapper, connection, update_version_id) + ) + else: + states_to_insert.append( + (state, dict_, mapper, connection) + ) + + for table, mapper in base_mapper._sorted_tables.items(): + if table not in mapper._pks_by_table: + continue + insert = _collect_insert_commands(table, states_to_insert) + + update = _collect_update_commands( + uowtransaction, table, states_to_update) + + _emit_update_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, update) + + _emit_insert_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, insert) + + _finalize_insert_update_commands( + base_mapper, uowtransaction, + chain( + ( + (state, state_dict, mapper, connection, False) + for state, state_dict, mapper, connection in states_to_insert + ), + ( + (state, state_dict, mapper, connection, True) + for state, state_dict, mapper, connection, + update_version_id in states_to_update + ) + ) + ) + + +def post_update(base_mapper, states, uowtransaction, post_update_cols): + """Issue UPDATE statements on behalf of a relationship() which + specifies post_update. + + """ + cached_connections = _cached_connection_dict(base_mapper) + + states_to_update = list(_organize_states_for_post_update( + base_mapper, + states, uowtransaction)) + + for table, mapper in base_mapper._sorted_tables.items(): + if table not in mapper._pks_by_table: + continue + + update = ( + (state, state_dict, sub_mapper, connection) + for + state, state_dict, sub_mapper, connection in states_to_update + if table in sub_mapper._pks_by_table + ) + + update = _collect_post_update_commands(base_mapper, uowtransaction, + table, update, + post_update_cols) + + _emit_post_update_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, update) + + +def delete_obj(base_mapper, states, uowtransaction): + """Issue ``DELETE`` statements for a list of objects. + + This is called within the context of a UOWTransaction during a + flush operation. + + """ + + cached_connections = _cached_connection_dict(base_mapper) + + states_to_delete = list(_organize_states_for_delete( + base_mapper, + states, + uowtransaction)) + + table_to_mapper = base_mapper._sorted_tables + + for table in reversed(list(table_to_mapper.keys())): + mapper = table_to_mapper[table] + if table not in mapper._pks_by_table: + continue + elif mapper.inherits and mapper.passive_deletes: + continue + + delete = _collect_delete_commands(base_mapper, uowtransaction, + table, states_to_delete) + + _emit_delete_statements(base_mapper, uowtransaction, + cached_connections, mapper, table, delete) + + for state, state_dict, mapper, connection, \ + update_version_id in states_to_delete: + mapper.dispatch.after_delete(mapper, connection, state) + + +def _organize_states_for_save(base_mapper, states, uowtransaction): + """Make an initial pass across a set of states for INSERT or + UPDATE. + + This includes splitting out into distinct lists for + each, calling before_insert/before_update, obtaining + key information for each state including its dictionary, + mapper, the connection to use for the execution per state, + and the identity flag. + + """ + + for state, dict_, mapper, connection in _connections_for_states( + base_mapper, uowtransaction, + states): + + has_identity = bool(state.key) + + instance_key = state.key or mapper._identity_key_from_state(state) + + row_switch = update_version_id = None + + # call before_XXX extensions + if not has_identity: + mapper.dispatch.before_insert(mapper, connection, state) + else: + mapper.dispatch.before_update(mapper, connection, state) + + if mapper._validate_polymorphic_identity: + mapper._validate_polymorphic_identity(mapper, state, dict_) + + # detect if we have a "pending" instance (i.e. has + # no instance_key attached to it), and another instance + # with the same identity key already exists as persistent. + # convert to an UPDATE if so. + if not has_identity and \ + instance_key in uowtransaction.session.identity_map: + instance = \ + uowtransaction.session.identity_map[instance_key] + existing = attributes.instance_state(instance) + + if not uowtransaction.was_already_deleted(existing): + if not uowtransaction.is_deleted(existing): + raise orm_exc.FlushError( + "New instance %s with identity key %s conflicts " + "with persistent instance %s" % + (state_str(state), instance_key, + state_str(existing))) + + base_mapper._log_debug( + "detected row switch for identity %s. " + "will update %s, remove %s from " + "transaction", instance_key, + state_str(state), state_str(existing)) + + # remove the "delete" flag from the existing element + uowtransaction.remove_state_actions(existing) + row_switch = existing + + if (has_identity or row_switch) and mapper.version_id_col is not None: + update_version_id = mapper._get_committed_state_attr_by_column( + row_switch if row_switch else state, + row_switch.dict if row_switch else dict_, + mapper.version_id_col) + + yield (state, dict_, mapper, connection, + has_identity, row_switch, update_version_id) + + +def _organize_states_for_post_update(base_mapper, states, + uowtransaction): + """Make an initial pass across a set of states for UPDATE + corresponding to post_update. + + This includes obtaining key information for each state + including its dictionary, mapper, the connection to use for + the execution per state. + + """ + return _connections_for_states(base_mapper, uowtransaction, states) + + +def _organize_states_for_delete(base_mapper, states, uowtransaction): + """Make an initial pass across a set of states for DELETE. + + This includes calling out before_delete and obtaining + key information for each state including its dictionary, + mapper, the connection to use for the execution per state. + + """ + for state, dict_, mapper, connection in _connections_for_states( + base_mapper, uowtransaction, + states): + + mapper.dispatch.before_delete(mapper, connection, state) + + if mapper.version_id_col is not None: + update_version_id = \ + mapper._get_committed_state_attr_by_column( + state, dict_, + mapper.version_id_col) + else: + update_version_id = None + + yield ( + state, dict_, mapper, connection, update_version_id) + + +def _collect_insert_commands( + table, states_to_insert, + bulk=False, return_defaults=False, render_nulls=False): + """Identify sets of values to use in INSERT statements for a + list of states. + + """ + for state, state_dict, mapper, connection in states_to_insert: + if table not in mapper._pks_by_table: + continue + + params = {} + value_params = {} + + propkey_to_col = mapper._propkey_to_col[table] + + eval_none = mapper._insert_cols_evaluating_none[table] + + for propkey in set(propkey_to_col).intersection(state_dict): + value = state_dict[propkey] + col = propkey_to_col[propkey] + if value is None and propkey not in eval_none and not render_nulls: + continue + elif not bulk and hasattr(value, '__clause_element__') or \ + isinstance(value, sql.ClauseElement): + value_params[col.key] = value.__clause_element__() \ + if hasattr(value, '__clause_element__') else value + else: + params[col.key] = value + + if not bulk: + # for all the columns that have no default and we don't have + # a value and where "None" is not a special value, add + # explicit None to the INSERT. This is a legacy behavior + # which might be worth removing, as it should not be necessary + # and also produces confusion, given that "missing" and None + # now have distinct meanings + for colkey in mapper._insert_cols_as_none[table].\ + difference(params).difference(value_params): + params[colkey] = None + + if not bulk or return_defaults: + has_all_pks = mapper._pk_keys_by_table[table].issubset(params) + + if mapper.base_mapper.eager_defaults: + has_all_defaults = mapper._server_default_cols[table].\ + issubset(params) + else: + has_all_defaults = True + else: + has_all_defaults = has_all_pks = True + + if mapper.version_id_generator is not False \ + and mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + params[mapper.version_id_col.key] = \ + mapper.version_id_generator(None) + + yield ( + state, state_dict, params, mapper, + connection, value_params, has_all_pks, + has_all_defaults) + + +def _collect_update_commands( + uowtransaction, table, states_to_update, + bulk=False): + """Identify sets of values to use in UPDATE statements for a + list of states. + + This function works intricately with the history system + to determine exactly what values should be updated + as well as how the row should be matched within an UPDATE + statement. Includes some tricky scenarios where the primary + key of an object might have been changed. + + """ + + for state, state_dict, mapper, connection, \ + update_version_id in states_to_update: + + if table not in mapper._pks_by_table: + continue + + pks = mapper._pks_by_table[table] + + value_params = {} + + propkey_to_col = mapper._propkey_to_col[table] + + if bulk: + params = dict( + (propkey_to_col[propkey].key, state_dict[propkey]) + for propkey in + set(propkey_to_col).intersection(state_dict).difference( + mapper._pk_keys_by_table[table]) + ) + has_all_defaults = True + else: + params = {} + for propkey in set(propkey_to_col).intersection( + state.committed_state): + value = state_dict[propkey] + col = propkey_to_col[propkey] + + if hasattr(value, '__clause_element__') or \ + isinstance(value, sql.ClauseElement): + value_params[col] = value.__clause_element__() \ + if hasattr(value, '__clause_element__') else value + # guard against values that generate non-__nonzero__ + # objects for __eq__() + elif state.manager[propkey].impl.is_equal( + value, state.committed_state[propkey]) is not True: + params[col.key] = value + + if mapper.base_mapper.eager_defaults: + has_all_defaults = mapper._server_onupdate_default_cols[table].\ + issubset(params) + else: + has_all_defaults = True + + if update_version_id is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + + if not bulk and not (params or value_params): + # HACK: check for history in other tables, in case the + # history is only in a different table than the one + # where the version_id_col is. This logic was lost + # from 0.9 -> 1.0.0 and restored in 1.0.6. + for prop in mapper._columntoproperty.values(): + history = ( + state.manager[prop.key].impl.get_history( + state, state_dict, + attributes.PASSIVE_NO_INITIALIZE)) + if history.added: + break + else: + # no net change, break + continue + + col = mapper.version_id_col + params[col._label] = update_version_id + + if (bulk or col.key not in params) and \ + mapper.version_id_generator is not False: + val = mapper.version_id_generator(update_version_id) + params[col.key] = val + + elif not (params or value_params): + continue + + has_all_pks = True + if bulk: + pk_params = dict( + (propkey_to_col[propkey]._label, state_dict.get(propkey)) + for propkey in + set(propkey_to_col). + intersection(mapper._pk_attr_keys_by_table[table]) + ) + else: + pk_params = {} + for col in pks: + propkey = mapper._columntoproperty[col].key + + history = state.manager[propkey].impl.get_history( + state, state_dict, attributes.PASSIVE_OFF) + + if history.added: + if not history.deleted or \ + ("pk_cascaded", state, col) in \ + uowtransaction.attributes: + pk_params[col._label] = history.added[0] + params.pop(col.key, None) + else: + # else, use the old value to locate the row + pk_params[col._label] = history.deleted[0] + if col in value_params: + has_all_pks = False + else: + pk_params[col._label] = history.unchanged[0] + if pk_params[col._label] is None: + raise orm_exc.FlushError( + "Can't update table %s using NULL for primary " + "key value on column %s" % (table, col)) + + if params or value_params: + params.update(pk_params) + yield ( + state, state_dict, params, mapper, + connection, value_params, has_all_defaults, has_all_pks) + + +def _collect_post_update_commands(base_mapper, uowtransaction, table, + states_to_update, post_update_cols): + """Identify sets of values to use in UPDATE statements for a + list of states within a post_update operation. + + """ + + for state, state_dict, mapper, connection in states_to_update: + + # assert table in mapper._pks_by_table + + pks = mapper._pks_by_table[table] + params = {} + hasdata = False + + for col in mapper._cols_by_table[table]: + if col in pks: + params[col._label] = \ + mapper._get_state_attr_by_column( + state, + state_dict, col, passive=attributes.PASSIVE_OFF) + + elif col in post_update_cols: + prop = mapper._columntoproperty[col] + history = state.manager[prop.key].impl.get_history( + state, state_dict, + attributes.PASSIVE_NO_INITIALIZE) + if history.added: + value = history.added[0] + params[col.key] = value + hasdata = True + if hasdata: + yield params, connection + + +def _collect_delete_commands(base_mapper, uowtransaction, table, + states_to_delete): + """Identify values to use in DELETE statements for a list of + states to be deleted.""" + + for state, state_dict, mapper, connection, \ + update_version_id in states_to_delete: + + if table not in mapper._pks_by_table: + continue + + params = {} + for col in mapper._pks_by_table[table]: + params[col.key] = \ + value = \ + mapper._get_committed_state_attr_by_column( + state, state_dict, col) + if value is None: + raise orm_exc.FlushError( + "Can't delete from table %s " + "using NULL for primary " + "key value on column %s" % (table, col)) + + if update_version_id is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + params[mapper.version_id_col.key] = update_version_id + yield params, connection + + +def _emit_update_statements(base_mapper, uowtransaction, + cached_connections, mapper, table, update, + bookkeeping=True): + """Emit UPDATE statements corresponding to value lists collected + by _collect_update_commands().""" + + needs_version_id = mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table] + + def update_stmt(): + clause = sql.and_() + + for col in mapper._pks_by_table[table]: + clause.clauses.append(col == sql.bindparam(col._label, + type_=col.type)) + + if needs_version_id: + clause.clauses.append( + mapper.version_id_col == sql.bindparam( + mapper.version_id_col._label, + type_=mapper.version_id_col.type)) + + stmt = table.update(clause) + return stmt + + cached_stmt = base_mapper._memo(('update', table), update_stmt) + + for (connection, paramkeys, hasvalue, has_all_defaults, has_all_pks), \ + records in groupby( + update, + lambda rec: ( + rec[4], # connection + set(rec[2]), # set of parameter keys + bool(rec[5]), # whether or not we have "value" parameters + rec[6], # has_all_defaults + rec[7] # has all pks + ) + ): + rows = 0 + records = list(records) + + statement = cached_stmt + + # TODO: would be super-nice to not have to determine this boolean + # inside the loop here, in the 99.9999% of the time there's only + # one connection in use + assert_singlerow = connection.dialect.supports_sane_rowcount + assert_multirow = assert_singlerow and \ + connection.dialect.supports_sane_multi_rowcount + allow_multirow = has_all_defaults and not needs_version_id + + if not has_all_pks: + statement = statement.return_defaults() + elif bookkeeping and not has_all_defaults and \ + mapper.base_mapper.eager_defaults: + statement = statement.return_defaults() + elif mapper.version_id_col is not None: + statement = statement.return_defaults(mapper.version_id_col) + + if hasvalue: + for state, state_dict, params, mapper, \ + connection, value_params, \ + has_all_defaults, has_all_pks in records: + c = connection.execute( + statement.values(value_params), + params) + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) + rows += c.rowcount + check_rowcount = True + else: + if not allow_multirow: + check_rowcount = assert_singlerow + for state, state_dict, params, mapper, \ + connection, value_params, has_all_defaults, \ + has_all_pks in records: + c = cached_connections[connection].\ + execute(statement, params) + + # TODO: why with bookkeeping=False? + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) + rows += c.rowcount + else: + multiparams = [rec[2] for rec in records] + + check_rowcount = assert_multirow or ( + assert_singlerow and + len(multiparams) == 1 + ) + + c = cached_connections[connection].\ + execute(statement, multiparams) + + rows += c.rowcount + + for state, state_dict, params, mapper, \ + connection, value_params, \ + has_all_defaults, has_all_pks in records: + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) + + if check_rowcount: + if rows != len(records): + raise orm_exc.StaleDataError( + "UPDATE statement on table '%s' expected to " + "update %d row(s); %d were matched." % + (table.description, len(records), rows)) + + elif needs_version_id: + util.warn("Dialect %s does not support updated rowcount " + "- versioning cannot be verified." % + c.dialect.dialect_description) + + +def _emit_insert_statements(base_mapper, uowtransaction, + cached_connections, mapper, table, insert, + bookkeeping=True): + """Emit INSERT statements corresponding to value lists collected + by _collect_insert_commands().""" + + cached_stmt = base_mapper._memo(('insert', table), table.insert) + + for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \ + records in groupby( + insert, + lambda rec: ( + rec[4], # connection + set(rec[2]), # parameter keys + bool(rec[5]), # whether we have "value" parameters + rec[6], + rec[7])): + + statement = cached_stmt + + if not bookkeeping or \ + ( + has_all_defaults + or not base_mapper.eager_defaults + or not connection.dialect.implicit_returning + ) and has_all_pks and not hasvalue: + + records = list(records) + multiparams = [rec[2] for rec in records] + + c = cached_connections[connection].\ + execute(statement, multiparams) + + if bookkeeping: + for (state, state_dict, params, mapper_rec, + conn, value_params, has_all_pks, has_all_defaults), \ + last_inserted_params in \ + zip(records, c.context.compiled_parameters): + if state: + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + last_inserted_params, + value_params) + else: + _postfetch_bulk_save(mapper_rec, state_dict, table) + + else: + if not has_all_defaults and base_mapper.eager_defaults: + statement = statement.return_defaults() + elif mapper.version_id_col is not None: + statement = statement.return_defaults(mapper.version_id_col) + + for state, state_dict, params, mapper_rec, \ + connection, value_params, \ + has_all_pks, has_all_defaults in records: + + if value_params: + result = connection.execute( + statement.values(value_params), + params) + else: + result = cached_connections[connection].\ + execute(statement, params) + + primary_key = result.context.inserted_primary_key + + if primary_key is not None: + # set primary key attributes + for pk, col in zip(primary_key, + mapper._pks_by_table[table]): + prop = mapper_rec._columntoproperty[col] + if state_dict.get(prop.key) is None: + state_dict[prop.key] = pk + if bookkeeping: + if state: + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + result, + result.context.compiled_parameters[0], + value_params) + else: + _postfetch_bulk_save(mapper_rec, state_dict, table) + + +def _emit_post_update_statements(base_mapper, uowtransaction, + cached_connections, mapper, table, update): + """Emit UPDATE statements corresponding to value lists collected + by _collect_post_update_commands().""" + + def update_stmt(): + clause = sql.and_() + + for col in mapper._pks_by_table[table]: + clause.clauses.append(col == sql.bindparam(col._label, + type_=col.type)) + + return table.update(clause) + + statement = base_mapper._memo(('post_update', table), update_stmt) + + # execute each UPDATE in the order according to the original + # list of states to guarantee row access order, but + # also group them into common (connection, cols) sets + # to support executemany(). + for key, grouper in groupby( + update, lambda rec: ( + rec[1], # connection + set(rec[0]) # parameter keys + ) + ): + connection = key[0] + multiparams = [params for params, conn in grouper] + cached_connections[connection].\ + execute(statement, multiparams) + + +def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, + mapper, table, delete): + """Emit DELETE statements corresponding to value lists collected + by _collect_delete_commands().""" + + need_version_id = mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table] + + def delete_stmt(): + clause = sql.and_() + for col in mapper._pks_by_table[table]: + clause.clauses.append( + col == sql.bindparam(col.key, type_=col.type)) + + if need_version_id: + clause.clauses.append( + mapper.version_id_col == + sql.bindparam( + mapper.version_id_col.key, + type_=mapper.version_id_col.type + ) + ) + + return table.delete(clause) + + statement = base_mapper._memo(('delete', table), delete_stmt) + for connection, recs in groupby( + delete, + lambda rec: rec[1] # connection + ): + del_objects = [params for params, connection in recs] + + connection = cached_connections[connection] + + expected = len(del_objects) + rows_matched = -1 + only_warn = False + if connection.dialect.supports_sane_multi_rowcount: + c = connection.execute(statement, del_objects) + + if not need_version_id: + only_warn = True + + rows_matched = c.rowcount + + elif need_version_id: + if connection.dialect.supports_sane_rowcount: + rows_matched = 0 + # execute deletes individually so that versioned + # rows can be verified + for params in del_objects: + c = connection.execute(statement, params) + rows_matched += c.rowcount + else: + util.warn( + "Dialect %s does not support deleted rowcount " + "- versioning cannot be verified." % + connection.dialect.dialect_description, + stacklevel=12) + connection.execute(statement, del_objects) + else: + connection.execute(statement, del_objects) + + if base_mapper.confirm_deleted_rows and \ + rows_matched > -1 and expected != rows_matched: + if only_warn: + util.warn( + "DELETE statement on table '%s' expected to " + "delete %d row(s); %d were matched. Please set " + "confirm_deleted_rows=False within the mapper " + "configuration to prevent this warning." % + (table.description, expected, rows_matched) + ) + else: + raise orm_exc.StaleDataError( + "DELETE statement on table '%s' expected to " + "delete %d row(s); %d were matched. Please set " + "confirm_deleted_rows=False within the mapper " + "configuration to prevent this warning." % + (table.description, expected, rows_matched) + ) + + +def _finalize_insert_update_commands(base_mapper, uowtransaction, states): + """finalize state on states that have been inserted or updated, + including calling after_insert/after_update events. + + """ + for state, state_dict, mapper, connection, has_identity in states: + + if mapper._readonly_props: + readonly = state.unmodified_intersection( + [p.key for p in mapper._readonly_props + if p.expire_on_flush or p.key not in state.dict] + ) + if readonly: + state._expire_attributes(state.dict, readonly) + + # if eager_defaults option is enabled, load + # all expired cols. Else if we have a version_id_col, make sure + # it isn't expired. + toload_now = [] + + if base_mapper.eager_defaults: + toload_now.extend( + state._unloaded_non_object.intersection( + mapper._server_default_plus_onupdate_propkeys) + ) + + if mapper.version_id_col is not None and \ + mapper.version_id_generator is False: + if mapper._version_id_prop.key in state.unloaded: + toload_now.extend([mapper._version_id_prop.key]) + + if toload_now: + state.key = base_mapper._identity_key_from_state(state) + loading.load_on_ident( + uowtransaction.session.query(mapper), + state.key, refresh_state=state, + only_load_props=toload_now) + + # call after_XXX extensions + if not has_identity: + mapper.dispatch.after_insert(mapper, connection, state) + else: + mapper.dispatch.after_update(mapper, connection, state) + + +def _postfetch(mapper, uowtransaction, table, + state, dict_, result, params, value_params): + """Expire attributes in need of newly persisted database state, + after an INSERT or UPDATE statement has proceeded for that + state.""" + + prefetch_cols = result.context.compiled.prefetch + postfetch_cols = result.context.compiled.postfetch + returning_cols = result.context.compiled.returning + + if mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] + + refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush) + if refresh_flush: + load_evt_attrs = [] + + if returning_cols: + row = result.context.returned_defaults + if row is not None: + for col in returning_cols: + # pk cols returned from insert are handled + # distinctly, don't step on the values here + if col.primary_key and result.context.isinsert: + continue + + # note that columns can be in the "return defaults" that are + # not mapped to this mapper, typically because they are + # "excluded", which can be specified directly or also occurs + # when using declarative w/ single table inheritance + prop = mapper._columntoproperty.get(col) + if prop: + dict_[prop.key] = row[col] + if refresh_flush: + load_evt_attrs.append(prop.key) + + for c in prefetch_cols: + if c.key in params and c in mapper._columntoproperty: + dict_[mapper._columntoproperty[c].key] = params[c.key] + if refresh_flush: + load_evt_attrs.append(mapper._columntoproperty[c].key) + + if refresh_flush and load_evt_attrs: + mapper.class_manager.dispatch.refresh_flush( + state, uowtransaction, load_evt_attrs) + + if postfetch_cols: + state._expire_attributes(state.dict, + [mapper._columntoproperty[c].key + for c in postfetch_cols if c in + mapper._columntoproperty] + ) + + # synchronize newly inserted ids from one table to the next + # TODO: this still goes a little too often. would be nice to + # have definitive list of "columns that changed" here + for m, equated_pairs in mapper._table_to_equated[table]: + sync.populate(state, m, state, m, + equated_pairs, + uowtransaction, + mapper.passive_updates) + + +def _postfetch_bulk_save(mapper, dict_, table): + for m, equated_pairs in mapper._table_to_equated[table]: + sync.bulk_populate_inherit_keys(dict_, m, equated_pairs) + + +def _connections_for_states(base_mapper, uowtransaction, states): + """Return an iterator of (state, state.dict, mapper, connection). + + The states are sorted according to _sort_states, then paired + with the connection they should be using for the given + unit of work transaction. + + """ + # if session has a connection callable, + # organize individual states with the connection + # to use for update + if uowtransaction.session.connection_callable: + connection_callable = \ + uowtransaction.session.connection_callable + else: + connection = uowtransaction.transaction.connection(base_mapper) + connection_callable = None + + for state in _sort_states(states): + if connection_callable: + connection = connection_callable(base_mapper, state.obj()) + + mapper = state.manager.mapper + + yield state, state.dict, mapper, connection + + +def _cached_connection_dict(base_mapper): + # dictionary of connection->connection_with_cache_options. + return util.PopulateDict( + lambda conn: conn.execution_options( + compiled_cache=base_mapper._compiled_cache + )) + + +def _sort_states(states): + pending = set(states) + persistent = set(s for s in pending if s.key is not None) + pending.difference_update(persistent) + return sorted(pending, key=operator.attrgetter("insert_order")) + \ + sorted(persistent, key=lambda q: q.key[1]) + + +class BulkUD(object): + """Handle bulk update and deletes via a :class:`.Query`.""" + + def __init__(self, query): + self.query = query.enable_eagerloads(False) + self.mapper = self.query._bind_mapper() + self._validate_query_state() + + def _validate_query_state(self): + for attr, methname, notset, op in ( + ('_limit', 'limit()', None, operator.is_), + ('_offset', 'offset()', None, operator.is_), + ('_order_by', 'order_by()', False, operator.is_), + ('_group_by', 'group_by()', False, operator.is_), + ('_distinct', 'distinct()', False, operator.is_), + ( + '_from_obj', + 'join(), outerjoin(), select_from(), or from_self()', + (), operator.eq) + ): + if not op(getattr(self.query, attr), notset): + raise sa_exc.InvalidRequestError( + "Can't call Query.update() or Query.delete() " + "when %s has been called" % + (methname, ) + ) + + @property + def session(self): + return self.query.session + + @classmethod + def _factory(cls, lookup, synchronize_session, *arg): + try: + klass = lookup[synchronize_session] + except KeyError: + raise sa_exc.ArgumentError( + "Valid strategies for session synchronization " + "are %s" % (", ".join(sorted(repr(x) + for x in lookup)))) + else: + return klass(*arg) + + def exec_(self): + self._do_pre() + self._do_pre_synchronize() + self._do_exec() + self._do_post_synchronize() + self._do_post() + + @util.dependencies("sqlalchemy.orm.query") + def _do_pre(self, querylib): + query = self.query + self.context = querylib.QueryContext(query) + + if isinstance(query._entities[0], querylib._ColumnEntity): + # check for special case of query(table) + tables = set() + for ent in query._entities: + if not isinstance(ent, querylib._ColumnEntity): + tables.clear() + break + else: + tables.update(_from_objects(ent.column)) + + if len(tables) != 1: + raise sa_exc.InvalidRequestError( + "This operation requires only one Table or " + "entity be specified as the target." + ) + else: + self.primary_table = tables.pop() + + else: + self.primary_table = query._only_entity_zero( + "This operation requires only one Table or " + "entity be specified as the target." + ).mapper.local_table + + session = query.session + + if query._autoflush: + session._autoflush() + + def _do_pre_synchronize(self): + pass + + def _do_post_synchronize(self): + pass + + +class BulkEvaluate(BulkUD): + """BulkUD which does the 'evaluate' method of session state resolution.""" + + def _additional_evaluators(self, evaluator_compiler): + pass + + def _do_pre_synchronize(self): + query = self.query + target_cls = query._mapper_zero().class_ + + try: + evaluator_compiler = evaluator.EvaluatorCompiler(target_cls) + if query.whereclause is not None: + eval_condition = evaluator_compiler.process( + query.whereclause) + else: + def eval_condition(obj): + return True + + self._additional_evaluators(evaluator_compiler) + + except evaluator.UnevaluatableError: + raise sa_exc.InvalidRequestError( + "Could not evaluate current criteria in Python. " + "Specify 'fetch' or False for the " + "synchronize_session parameter.") + + # TODO: detect when the where clause is a trivial primary key match + self.matched_objects = [ + obj for (cls, pk), obj in + query.session.identity_map.items() + if issubclass(cls, target_cls) and + eval_condition(obj)] + + +class BulkFetch(BulkUD): + """BulkUD which does the 'fetch' method of session state resolution.""" + + def _do_pre_synchronize(self): + query = self.query + session = query.session + context = query._compile_context() + select_stmt = context.statement.with_only_columns( + self.primary_table.primary_key) + self.matched_rows = session.execute( + select_stmt, + mapper=self.mapper, + params=query._params).fetchall() + + +class BulkUpdate(BulkUD): + """BulkUD which handles UPDATEs.""" + + def __init__(self, query, values, update_kwargs): + super(BulkUpdate, self).__init__(query) + self.values = values + self.update_kwargs = update_kwargs + + @classmethod + def factory(cls, query, synchronize_session, values, update_kwargs): + return BulkUD._factory({ + "evaluate": BulkUpdateEvaluate, + "fetch": BulkUpdateFetch, + False: BulkUpdate + }, synchronize_session, query, values, update_kwargs) + + def _resolve_string_to_expr(self, key): + if self.mapper and isinstance(key, util.string_types): + attr = _entity_descriptor(self.mapper, key) + return attr.__clause_element__() + else: + return key + + def _resolve_key_to_attrname(self, key): + if self.mapper and isinstance(key, util.string_types): + attr = _entity_descriptor(self.mapper, key) + return attr.property.key + elif isinstance(key, attributes.InstrumentedAttribute): + return key.key + elif hasattr(key, '__clause_element__'): + key = key.__clause_element__() + + if self.mapper and isinstance(key, expression.ColumnElement): + try: + attr = self.mapper._columntoproperty[key] + except orm_exc.UnmappedColumnError: + return None + else: + return attr.key + else: + raise sa_exc.InvalidRequestError( + "Invalid expression type: %r" % key) + + def _do_exec(self): + + values = [ + (self._resolve_string_to_expr(k), v) + for k, v in ( + self.values.items() if hasattr(self.values, 'items') + else self.values) + ] + if not self.update_kwargs.get('preserve_parameter_order', False): + values = dict(values) + + update_stmt = sql.update(self.primary_table, + self.context.whereclause, values, + **self.update_kwargs) + + self.result = self.query.session.execute( + update_stmt, params=self.query._params, + mapper=self.mapper) + self.rowcount = self.result.rowcount + + def _do_post(self): + session = self.query.session + session.dispatch.after_bulk_update(self) + + +class BulkDelete(BulkUD): + """BulkUD which handles DELETEs.""" + + def __init__(self, query): + super(BulkDelete, self).__init__(query) + + @classmethod + def factory(cls, query, synchronize_session): + return BulkUD._factory({ + "evaluate": BulkDeleteEvaluate, + "fetch": BulkDeleteFetch, + False: BulkDelete + }, synchronize_session, query) + + def _do_exec(self): + delete_stmt = sql.delete(self.primary_table, + self.context.whereclause) + + self.result = self.query.session.execute( + delete_stmt, + params=self.query._params, + mapper=self.mapper) + self.rowcount = self.result.rowcount + + def _do_post(self): + session = self.query.session + session.dispatch.after_bulk_delete(self) + + +class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate): + """BulkUD which handles UPDATEs using the "evaluate" + method of session resolution.""" + + def _additional_evaluators(self, evaluator_compiler): + self.value_evaluators = {} + values = (self.values.items() if hasattr(self.values, 'items') + else self.values) + for key, value in values: + key = self._resolve_key_to_attrname(key) + if key is not None: + self.value_evaluators[key] = evaluator_compiler.process( + expression._literal_as_binds(value)) + + def _do_post_synchronize(self): + session = self.query.session + states = set() + evaluated_keys = list(self.value_evaluators.keys()) + for obj in self.matched_objects: + state, dict_ = attributes.instance_state(obj),\ + attributes.instance_dict(obj) + + # only evaluate unmodified attributes + to_evaluate = state.unmodified.intersection( + evaluated_keys) + for key in to_evaluate: + dict_[key] = self.value_evaluators[key](obj) + + state._commit(dict_, list(to_evaluate)) + + # expire attributes with pending changes + # (there was no autoflush, so they are overwritten) + state._expire_attributes(dict_, + set(evaluated_keys). + difference(to_evaluate)) + states.add(state) + session._register_altered(states) + + +class BulkDeleteEvaluate(BulkEvaluate, BulkDelete): + """BulkUD which handles DELETEs using the "evaluate" + method of session resolution.""" + + def _do_post_synchronize(self): + self.query.session._remove_newly_deleted( + [attributes.instance_state(obj) + for obj in self.matched_objects]) + + +class BulkUpdateFetch(BulkFetch, BulkUpdate): + """BulkUD which handles UPDATEs using the "fetch" + method of session resolution.""" + + def _do_post_synchronize(self): + session = self.query.session + target_mapper = self.query._mapper_zero() + + states = set([ + attributes.instance_state(session.identity_map[identity_key]) + for identity_key in [ + target_mapper.identity_key_from_primary_key( + list(primary_key)) + for primary_key in self.matched_rows + ] + if identity_key in session.identity_map + ]) + attrib = [_attr_as_key(k) for k in self.values] + for state in states: + session._expire_state(state, attrib) + session._register_altered(states) + + +class BulkDeleteFetch(BulkFetch, BulkDelete): + """BulkUD which handles DELETEs using the "fetch" + method of session resolution.""" + + def _do_post_synchronize(self): + session = self.query.session + target_mapper = self.query._mapper_zero() + for primary_key in self.matched_rows: + # TODO: inline this and call remove_newly_deleted + # once + identity_key = target_mapper.identity_key_from_primary_key( + list(primary_key)) + if identity_key in session.identity_map: + session._remove_newly_deleted( + [attributes.instance_state( + session.identity_map[identity_key] + )] + ) diff --git a/app/lib/sqlalchemy/orm/properties.py b/app/lib/sqlalchemy/orm/properties.py new file mode 100644 index 0000000..63e7e1e --- /dev/null +++ b/app/lib/sqlalchemy/orm/properties.py @@ -0,0 +1,277 @@ +# orm/properties.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""MapperProperty implementations. + +This is a private module which defines the behavior of invidual ORM- +mapped attributes. + +""" +from __future__ import absolute_import + +from .. import util, log +from ..sql import expression +from . import attributes +from .util import _orm_full_deannotate + +from .interfaces import PropComparator, StrategizedProperty + +__all__ = ['ColumnProperty', 'CompositeProperty', 'SynonymProperty', + 'ComparableProperty', 'RelationshipProperty'] + + +@log.class_logger +class ColumnProperty(StrategizedProperty): + """Describes an object attribute that corresponds to a table column. + + Public constructor is the :func:`.orm.column_property` function. + + """ + + strategy_wildcard_key = 'column' + + __slots__ = ( + '_orig_columns', 'columns', 'group', 'deferred', + 'instrument', 'comparator_factory', 'descriptor', 'extension', + 'active_history', 'expire_on_flush', 'info', 'doc', + 'strategy_key', '_creation_order', '_is_polymorphic_discriminator', + '_mapped_by_synonym', '_deferred_column_loader') + + def __init__(self, *columns, **kwargs): + r"""Provide a column-level property for use with a Mapper. + + Column-based properties can normally be applied to the mapper's + ``properties`` dictionary using the :class:`.Column` element directly. + Use this function when the given column is not directly present within + the mapper's selectable; examples include SQL expressions, functions, + and scalar SELECT queries. + + Columns that aren't present in the mapper's selectable won't be + persisted by the mapper and are effectively "read-only" attributes. + + :param \*cols: + list of Column objects to be mapped. + + :param active_history=False: + When ``True``, indicates that the "previous" value for a + scalar attribute should be loaded when replaced, if not + already loaded. Normally, history tracking logic for + simple non-primary-key scalar values only needs to be + aware of the "new" value in order to perform a flush. This + flag is available for applications that make use of + :func:`.attributes.get_history` or :meth:`.Session.is_modified` + which also need to know + the "previous" value of the attribute. + + .. versionadded:: 0.6.6 + + :param comparator_factory: a class which extends + :class:`.ColumnProperty.Comparator` which provides custom SQL + clause generation for comparison operations. + + :param group: + a group name for this property when marked as deferred. + + :param deferred: + when True, the column property is "deferred", meaning that + it does not load immediately, and is instead loaded when the + attribute is first accessed on an instance. See also + :func:`~sqlalchemy.orm.deferred`. + + :param doc: + optional string that will be applied as the doc on the + class-bound descriptor. + + :param expire_on_flush=True: + Disable expiry on flush. A column_property() which refers + to a SQL expression (and not a single table-bound column) + is considered to be a "read only" property; populating it + has no effect on the state of data, and it can only return + database state. For this reason a column_property()'s value + is expired whenever the parent object is involved in a + flush, that is, has any kind of "dirty" state within a flush. + Setting this parameter to ``False`` will have the effect of + leaving any existing value present after the flush proceeds. + Note however that the :class:`.Session` with default expiration + settings still expires + all attributes after a :meth:`.Session.commit` call, however. + + .. versionadded:: 0.7.3 + + :param info: Optional data dictionary which will be populated into the + :attr:`.MapperProperty.info` attribute of this object. + + .. versionadded:: 0.8 + + :param extension: + an + :class:`.AttributeExtension` + instance, or list of extensions, which will be prepended + to the list of attribute listeners for the resulting + descriptor placed on the class. + **Deprecated.** Please see :class:`.AttributeEvents`. + + """ + super(ColumnProperty, self).__init__() + self._orig_columns = [expression._labeled(c) for c in columns] + self.columns = [expression._labeled(_orm_full_deannotate(c)) + for c in columns] + self.group = kwargs.pop('group', None) + self.deferred = kwargs.pop('deferred', False) + self.instrument = kwargs.pop('_instrument', True) + self.comparator_factory = kwargs.pop('comparator_factory', + self.__class__.Comparator) + self.descriptor = kwargs.pop('descriptor', None) + self.extension = kwargs.pop('extension', None) + self.active_history = kwargs.pop('active_history', False) + self.expire_on_flush = kwargs.pop('expire_on_flush', True) + + if 'info' in kwargs: + self.info = kwargs.pop('info') + + if 'doc' in kwargs: + self.doc = kwargs.pop('doc') + else: + for col in reversed(self.columns): + doc = getattr(col, 'doc', None) + if doc is not None: + self.doc = doc + break + else: + self.doc = None + + if kwargs: + raise TypeError( + "%s received unexpected keyword argument(s): %s" % ( + self.__class__.__name__, + ', '.join(sorted(kwargs.keys())))) + + util.set_creation_order(self) + + self.strategy_key = ( + ("deferred", self.deferred), + ("instrument", self.instrument) + ) + + @util.dependencies("sqlalchemy.orm.state", "sqlalchemy.orm.strategies") + def _memoized_attr__deferred_column_loader(self, state, strategies): + return state.InstanceState._instance_level_callable_processor( + self.parent.class_manager, + strategies.LoadDeferredColumns(self.key), self.key) + + @property + def expression(self): + """Return the primary column or expression for this ColumnProperty. + + """ + return self.columns[0] + + def instrument_class(self, mapper): + if not self.instrument: + return + + attributes.register_descriptor( + mapper.class_, + self.key, + comparator=self.comparator_factory(self, mapper), + parententity=mapper, + doc=self.doc + ) + + def do_init(self): + super(ColumnProperty, self).do_init() + if len(self.columns) > 1 and \ + set(self.parent.primary_key).issuperset(self.columns): + util.warn( + ("On mapper %s, primary key column '%s' is being combined " + "with distinct primary key column '%s' in attribute '%s'. " + "Use explicit properties to give each column its own mapped " + "attribute name.") % (self.parent, self.columns[1], + self.columns[0], self.key)) + + def copy(self): + return ColumnProperty( + deferred=self.deferred, + group=self.group, + active_history=self.active_history, + *self.columns) + + def _getcommitted(self, state, dict_, column, + passive=attributes.PASSIVE_OFF): + return state.get_impl(self.key).\ + get_committed_value(state, dict_, passive=passive) + + def merge(self, session, source_state, source_dict, dest_state, + dest_dict, load, _recursive, _resolve_conflict_map): + if not self.instrument: + return + elif self.key in source_dict: + value = source_dict[self.key] + + if not load: + dest_dict[self.key] = value + else: + impl = dest_state.get_impl(self.key) + impl.set(dest_state, dest_dict, value, None) + elif dest_state.has_identity and self.key not in dest_dict: + dest_state._expire_attributes( + dest_dict, [self.key], no_loader=True) + + class Comparator(util.MemoizedSlots, PropComparator): + """Produce boolean, comparison, and other operators for + :class:`.ColumnProperty` attributes. + + See the documentation for :class:`.PropComparator` for a brief + overview. + + See also: + + :class:`.PropComparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + __slots__ = '__clause_element__', 'info' + + def _memoized_method___clause_element__(self): + if self.adapter: + return self.adapter(self.prop.columns[0]) + else: + # no adapter, so we aren't aliased + # assert self._parententity is self._parentmapper + return self.prop.columns[0]._annotate({ + "parententity": self._parententity, + "parentmapper": self._parententity}) + + def _memoized_attr_info(self): + ce = self.__clause_element__() + try: + return ce.info + except AttributeError: + return self.prop.info + + def _fallback_getattr(self, key): + """proxy attribute access down to the mapped column. + + this allows user-defined comparison methods to be accessed. + """ + return getattr(self.__clause_element__(), key) + + def operate(self, op, *other, **kwargs): + return op(self.__clause_element__(), *other, **kwargs) + + def reverse_operate(self, op, other, **kwargs): + col = self.__clause_element__() + return op(col._bind_param(op, other), col, **kwargs) + + def __str__(self): + return str(self.parent.class_.__name__) + "." + self.key diff --git a/app/lib/sqlalchemy/orm/query.py b/app/lib/sqlalchemy/orm/query.py new file mode 100644 index 0000000..e8bd717 --- /dev/null +++ b/app/lib/sqlalchemy/orm/query.py @@ -0,0 +1,4187 @@ +# orm/query.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""The Query class and support. + +Defines the :class:`.Query` class, the central +construct used by the ORM to construct database queries. + +The :class:`.Query` class should not be confused with the +:class:`.Select` class, which defines database +SELECT operations at the SQL (non-ORM) level. ``Query`` differs from +``Select`` in that it returns ORM-mapped objects and interacts with an +ORM session, whereas the ``Select`` construct interacts directly with the +database to return iterable result sets. + +""" + +from itertools import chain + +from . import ( + attributes, interfaces, object_mapper, persistence, + exc as orm_exc, loading +) +from .base import _entity_descriptor, _is_aliased_class, \ + _is_mapped_class, _orm_columns, _generative, InspectionAttr +from .path_registry import PathRegistry +from .util import ( + AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased +) +from .. import sql, util, log, exc as sa_exc, inspect, inspection +from ..sql.expression import _interpret_as_from +from ..sql import ( + util as sql_util, + expression, visitors +) +from ..sql.base import ColumnCollection +from . import properties + +__all__ = ['Query', 'QueryContext', 'aliased'] + + +_path_registry = PathRegistry.root + + +@inspection._self_inspects +@log.class_logger +class Query(object): + """ORM-level SQL construction object. + + :class:`.Query` is the source of all SELECT statements generated by the + ORM, both those formulated by end-user query operations as well as by + high level internal operations such as related collection loading. It + features a generative interface whereby successive calls return a new + :class:`.Query` object, a copy of the former with additional + criteria and options associated with it. + + :class:`.Query` objects are normally initially generated using the + :meth:`~.Session.query` method of :class:`.Session`, and in + less common cases by instantiating the :class:`.Query` directly and + associating with a :class:`.Session` using the :meth:`.Query.with_session` + method. + + For a full walkthrough of :class:`.Query` usage, see the + :ref:`ormtutorial_toplevel`. + + """ + + _enable_eagerloads = True + _enable_assertions = True + _with_labels = False + _criterion = None + _yield_per = None + _order_by = False + _group_by = False + _having = None + _distinct = False + _prefixes = None + _suffixes = None + _offset = None + _limit = None + _for_update_arg = None + _statement = None + _correlate = frozenset() + _populate_existing = False + _invoke_all_eagers = True + _version_check = False + _autoflush = True + _only_load_props = None + _refresh_state = None + _from_obj = () + _join_entities = () + _select_from_entity = None + _mapper_adapter_map = {} + _filter_aliases = None + _from_obj_alias = None + _joinpath = _joinpoint = util.immutabledict() + _execution_options = util.immutabledict() + _params = util.immutabledict() + _attributes = util.immutabledict() + _with_options = () + _with_hints = () + _enable_single_crit = True + _orm_only_adapt = True + _orm_only_from_obj_alias = True + _current_path = _path_registry + _has_mapper_entities = False + + def __init__(self, entities, session=None): + """Construct a :class:`.Query` directly. + + E.g.:: + + q = Query([User, Address], session=some_session) + + The above is equivalent to:: + + q = some_session.query(User, Address) + + :param entities: a sequence of entities and/or SQL expressions. + + :param session: a :class:`.Session` with which the :class:`.Query` + will be associated. Optional; a :class:`.Query` can be associated + with a :class:`.Session` generatively via the + :meth:`.Query.with_session` method as well. + + .. seealso:: + + :meth:`.Session.query` + + :meth:`.Query.with_session` + + """ + self.session = session + self._polymorphic_adapters = {} + self._set_entities(entities) + + def _set_entities(self, entities, entity_wrapper=None): + if entity_wrapper is None: + entity_wrapper = _QueryEntity + self._entities = [] + self._primary_entity = None + self._has_mapper_entities = False + for ent in util.to_list(entities): + entity_wrapper(self, ent) + + self._set_entity_selectables(self._entities) + + def _set_entity_selectables(self, entities): + self._mapper_adapter_map = d = self._mapper_adapter_map.copy() + + for ent in entities: + for entity in ent.entities: + if entity not in d: + ext_info = inspect(entity) + if not ext_info.is_aliased_class and \ + ext_info.mapper.with_polymorphic: + if ext_info.mapper.mapped_table not in \ + self._polymorphic_adapters: + self._mapper_loads_polymorphically_with( + ext_info.mapper, + sql_util.ColumnAdapter( + ext_info.selectable, + ext_info.mapper._equivalent_columns + ) + ) + aliased_adapter = None + elif ext_info.is_aliased_class: + aliased_adapter = ext_info._adapter + else: + aliased_adapter = None + + d[entity] = ( + ext_info, + aliased_adapter + ) + ent.setup_entity(*d[entity]) + + def _mapper_loads_polymorphically_with(self, mapper, adapter): + for m2 in mapper._with_polymorphic_mappers or [mapper]: + self._polymorphic_adapters[m2] = adapter + for m in m2.iterate_to_root(): + self._polymorphic_adapters[m.local_table] = adapter + + def _set_select_from(self, obj, set_base_alias): + fa = [] + select_from_alias = None + + for from_obj in obj: + info = inspect(from_obj) + if hasattr(info, 'mapper') and \ + (info.is_mapper or info.is_aliased_class): + self._select_from_entity = info + if set_base_alias and not info.is_aliased_class: + raise sa_exc.ArgumentError( + "A selectable (FromClause) instance is " + "expected when the base alias is being set.") + fa.append(info.selectable) + elif not info.is_selectable: + raise sa_exc.ArgumentError( + "argument is not a mapped class, mapper, " + "aliased(), or FromClause instance.") + else: + if isinstance(from_obj, expression.SelectBase): + from_obj = from_obj.alias() + if set_base_alias: + select_from_alias = from_obj + fa.append(from_obj) + + self._from_obj = tuple(fa) + + if set_base_alias and \ + len(self._from_obj) == 1 and \ + isinstance(select_from_alias, expression.Alias): + equivs = self.__all_equivs() + self._from_obj_alias = sql_util.ColumnAdapter( + self._from_obj[0], equivs) + elif set_base_alias and \ + len(self._from_obj) == 1 and \ + hasattr(info, "mapper") and \ + info.is_aliased_class: + self._from_obj_alias = info._adapter + + def _reset_polymorphic_adapter(self, mapper): + for m2 in mapper._with_polymorphic_mappers: + self._polymorphic_adapters.pop(m2, None) + for m in m2.iterate_to_root(): + self._polymorphic_adapters.pop(m.local_table, None) + + def _adapt_polymorphic_element(self, element): + if "parententity" in element._annotations: + search = element._annotations['parententity'] + alias = self._polymorphic_adapters.get(search, None) + if alias: + return alias.adapt_clause(element) + + if isinstance(element, expression.FromClause): + search = element + elif hasattr(element, 'table'): + search = element.table + else: + return None + + alias = self._polymorphic_adapters.get(search, None) + if alias: + return alias.adapt_clause(element) + + def _adapt_col_list(self, cols): + return [ + self._adapt_clause( + expression._literal_as_label_reference(o), + True, True) + for o in cols + ] + + @_generative() + def _adapt_all_clauses(self): + self._orm_only_adapt = False + + def _adapt_clause(self, clause, as_filter, orm_only): + """Adapt incoming clauses to transformations which + have been applied within this query.""" + + adapters = [] + # do we adapt all expression elements or only those + # tagged as 'ORM' constructs ? + if not self._orm_only_adapt: + orm_only = False + + if as_filter and self._filter_aliases: + for fa in self._filter_aliases._visitor_iterator: + adapters.append( + ( + orm_only, fa.replace + ) + ) + + if self._from_obj_alias: + # for the "from obj" alias, apply extra rule to the + # 'ORM only' check, if this query were generated from a + # subquery of itself, i.e. _from_selectable(), apply adaption + # to all SQL constructs. + adapters.append( + ( + orm_only if self._orm_only_from_obj_alias else False, + self._from_obj_alias.replace + ) + ) + + if self._polymorphic_adapters: + adapters.append( + ( + orm_only, self._adapt_polymorphic_element + ) + ) + + if not adapters: + return clause + + def replace(elem): + for _orm_only, adapter in adapters: + # if 'orm only', look for ORM annotations + # in the element before adapting. + if not _orm_only or \ + '_orm_adapt' in elem._annotations or \ + "parententity" in elem._annotations: + + e = adapter(elem) + if e is not None: + return e + + return visitors.replacement_traverse( + clause, + {}, + replace + ) + + def _query_entity_zero(self): + """Return the first QueryEntity.""" + return self._entities[0] + + def _mapper_zero(self): + """return the Mapper associated with the first QueryEntity.""" + return self._entities[0].mapper + + def _entity_zero(self): + """Return the 'entity' (mapper or AliasedClass) associated + with the first QueryEntity, or alternatively the 'select from' + entity if specified.""" + + return self._select_from_entity \ + if self._select_from_entity is not None \ + else self._query_entity_zero().entity_zero + + @property + def _mapper_entities(self): + for ent in self._entities: + if isinstance(ent, _MapperEntity): + yield ent + + def _joinpoint_zero(self): + return self._joinpoint.get( + '_joinpoint_entity', + self._entity_zero() + ) + + def _bind_mapper(self): + ezero = self._entity_zero() + if ezero is not None: + insp = inspect(ezero) + if not insp.is_clause_element: + return insp.mapper + + return None + + def _only_full_mapper_zero(self, methname): + if self._entities != [self._primary_entity]: + raise sa_exc.InvalidRequestError( + "%s() can only be used against " + "a single mapped class." % methname) + return self._primary_entity.entity_zero + + def _only_entity_zero(self, rationale=None): + if len(self._entities) > 1: + raise sa_exc.InvalidRequestError( + rationale or + "This operation requires a Query " + "against a single mapper." + ) + return self._entity_zero() + + def __all_equivs(self): + equivs = {} + for ent in self._mapper_entities: + equivs.update(ent.mapper._equivalent_columns) + return equivs + + def _get_condition(self): + return self._no_criterion_condition( + "get", order_by=False, distinct=False) + + def _get_existing_condition(self): + self._no_criterion_assertion("get", order_by=False, distinct=False) + + def _no_criterion_assertion(self, meth, order_by=True, distinct=True): + if not self._enable_assertions: + return + if self._criterion is not None or \ + self._statement is not None or self._from_obj or \ + self._limit is not None or self._offset is not None or \ + self._group_by or (order_by and self._order_by) or \ + (distinct and self._distinct): + raise sa_exc.InvalidRequestError( + "Query.%s() being called on a " + "Query with existing criterion. " % meth) + + def _no_criterion_condition(self, meth, order_by=True, distinct=True): + self._no_criterion_assertion(meth, order_by, distinct) + + self._from_obj = () + self._statement = self._criterion = None + self._order_by = self._group_by = self._distinct = False + + def _no_clauseelement_condition(self, meth): + if not self._enable_assertions: + return + if self._order_by: + raise sa_exc.InvalidRequestError( + "Query.%s() being called on a " + "Query with existing criterion. " % meth) + self._no_criterion_condition(meth) + + def _no_statement_condition(self, meth): + if not self._enable_assertions: + return + if self._statement is not None: + raise sa_exc.InvalidRequestError( + ("Query.%s() being called on a Query with an existing full " + "statement - can't apply criterion.") % meth) + + def _no_limit_offset(self, meth): + if not self._enable_assertions: + return + if self._limit is not None or self._offset is not None: + raise sa_exc.InvalidRequestError( + "Query.%s() being called on a Query which already has LIMIT " + "or OFFSET applied. To modify the row-limited results of a " + " Query, call from_self() first. " + "Otherwise, call %s() before limit() or offset() " + "are applied." + % (meth, meth) + ) + + def _get_options(self, populate_existing=None, + version_check=None, + only_load_props=None, + refresh_state=None): + if populate_existing: + self._populate_existing = populate_existing + if version_check: + self._version_check = version_check + if refresh_state: + self._refresh_state = refresh_state + if only_load_props: + self._only_load_props = set(only_load_props) + return self + + def _clone(self): + cls = self.__class__ + q = cls.__new__(cls) + q.__dict__ = self.__dict__.copy() + return q + + @property + def statement(self): + """The full SELECT statement represented by this Query. + + The statement by default will not have disambiguating labels + applied to the construct unless with_labels(True) is called + first. + + """ + + stmt = self._compile_context(labels=self._with_labels).\ + statement + if self._params: + stmt = stmt.params(self._params) + + # TODO: there's no tests covering effects of + # the annotation not being there + return stmt._annotate({'no_replacement_traverse': True}) + + def subquery(self, name=None, with_labels=False, reduce_columns=False): + """return the full SELECT statement represented by + this :class:`.Query`, embedded within an :class:`.Alias`. + + Eager JOIN generation within the query is disabled. + + :param name: string name to be assigned as the alias; + this is passed through to :meth:`.FromClause.alias`. + If ``None``, a name will be deterministically generated + at compile time. + + :param with_labels: if True, :meth:`.with_labels` will be called + on the :class:`.Query` first to apply table-qualified labels + to all columns. + + :param reduce_columns: if True, :meth:`.Select.reduce_columns` will + be called on the resulting :func:`.select` construct, + to remove same-named columns where one also refers to the other + via foreign key or WHERE clause equivalence. + + .. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns`` + keyword arguments were added. + + """ + q = self.enable_eagerloads(False) + if with_labels: + q = q.with_labels() + q = q.statement + if reduce_columns: + q = q.reduce_columns() + return q.alias(name=name) + + def cte(self, name=None, recursive=False): + r"""Return the full SELECT statement represented by this + :class:`.Query` represented as a common table expression (CTE). + + Parameters and usage are the same as those of the + :meth:`.SelectBase.cte` method; see that method for + further details. + + Here is the `PostgreSQL WITH + RECURSIVE example + `_. + Note that, in this example, the ``included_parts`` cte and the + ``incl_alias`` alias of it are Core selectables, which + means the columns are accessed via the ``.c.`` attribute. The + ``parts_alias`` object is an :func:`.orm.aliased` instance of the + ``Part`` entity, so column-mapped attributes are available + directly:: + + from sqlalchemy.orm import aliased + + class Part(Base): + __tablename__ = 'part' + part = Column(String, primary_key=True) + sub_part = Column(String, primary_key=True) + quantity = Column(Integer) + + included_parts = session.query( + Part.sub_part, + Part.part, + Part.quantity).\ + filter(Part.part=="our part").\ + cte(name="included_parts", recursive=True) + + incl_alias = aliased(included_parts, name="pr") + parts_alias = aliased(Part, name="p") + included_parts = included_parts.union_all( + session.query( + parts_alias.sub_part, + parts_alias.part, + parts_alias.quantity).\ + filter(parts_alias.part==incl_alias.c.sub_part) + ) + + q = session.query( + included_parts.c.sub_part, + func.sum(included_parts.c.quantity). + label('total_quantity') + ).\ + group_by(included_parts.c.sub_part) + + .. seealso:: + + :meth:`.HasCTE.cte` + + """ + return self.enable_eagerloads(False).\ + statement.cte(name=name, recursive=recursive) + + def label(self, name): + """Return the full SELECT statement represented by this + :class:`.Query`, converted + to a scalar subquery with a label of the given name. + + Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`. + + .. versionadded:: 0.6.5 + + """ + + return self.enable_eagerloads(False).statement.label(name) + + def as_scalar(self): + """Return the full SELECT statement represented by this + :class:`.Query`, converted to a scalar subquery. + + Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`. + + .. versionadded:: 0.6.5 + + """ + + return self.enable_eagerloads(False).statement.as_scalar() + + @property + def selectable(self): + """Return the :class:`.Select` object emitted by this :class:`.Query`. + + Used for :func:`.inspect` compatibility, this is equivalent to:: + + query.enable_eagerloads(False).with_labels().statement + + """ + return self.__clause_element__() + + def __clause_element__(self): + return self.enable_eagerloads(False).with_labels().statement + + @_generative() + def enable_eagerloads(self, value): + """Control whether or not eager joins and subqueries are + rendered. + + When set to False, the returned Query will not render + eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, + :func:`~sqlalchemy.orm.subqueryload` options + or mapper-level ``lazy='joined'``/``lazy='subquery'`` + configurations. + + This is used primarily when nesting the Query's + statement into a subquery or other + selectable, or when using :meth:`.Query.yield_per`. + + """ + self._enable_eagerloads = value + + def _no_yield_per(self, message): + raise sa_exc.InvalidRequestError( + "The yield_per Query option is currently not " + "compatible with %s eager loading. Please " + "specify lazyload('*') or query.enable_eagerloads(False) in " + "order to " + "proceed with query.yield_per()." % message) + + @_generative() + def with_labels(self): + """Apply column labels to the return value of Query.statement. + + Indicates that this Query's `statement` accessor should return + a SELECT statement that applies labels to all columns in the + form _; this is commonly used to + disambiguate columns from multiple tables which have the same + name. + + When the `Query` actually issues SQL to load rows, it always + uses column labeling. + + .. note:: The :meth:`.Query.with_labels` method *only* applies + the output of :attr:`.Query.statement`, and *not* to any of + the result-row invoking systems of :class:`.Query` itself, e.g. + :meth:`.Query.first`, :meth:`.Query.all`, etc. To execute + a query using :meth:`.Query.with_labels`, invoke the + :attr:`.Query.statement` using :meth:`.Session.execute`:: + + result = session.execute(query.with_labels().statement) + + + """ + self._with_labels = True + + @_generative() + def enable_assertions(self, value): + """Control whether assertions are generated. + + When set to False, the returned Query will + not assert its state before certain operations, + including that LIMIT/OFFSET has not been applied + when filter() is called, no criterion exists + when get() is called, and no "from_statement()" + exists when filter()/order_by()/group_by() etc. + is called. This more permissive mode is used by + custom Query subclasses to specify criterion or + other modifiers outside of the usual usage patterns. + + Care should be taken to ensure that the usage + pattern is even possible. A statement applied + by from_statement() will override any criterion + set by filter() or order_by(), for example. + + """ + self._enable_assertions = value + + @property + def whereclause(self): + """A readonly attribute which returns the current WHERE criterion for + this Query. + + This returned value is a SQL expression construct, or ``None`` if no + criterion has been established. + + """ + return self._criterion + + @_generative() + def _with_current_path(self, path): + """indicate that this query applies to objects loaded + within a certain path. + + Used by deferred loaders (see strategies.py) which transfer + query options from an originating query to a newly generated + query intended for the deferred load. + + """ + self._current_path = path + + @_generative(_no_clauseelement_condition) + def with_polymorphic(self, + cls_or_mappers, + selectable=None, + polymorphic_on=None): + """Load columns for inheriting classes. + + :meth:`.Query.with_polymorphic` applies transformations + to the "main" mapped class represented by this :class:`.Query`. + The "main" mapped class here means the :class:`.Query` + object's first argument is a full class, i.e. + ``session.query(SomeClass)``. These transformations allow additional + tables to be present in the FROM clause so that columns for a + joined-inheritance subclass are available in the query, both for the + purposes of load-time efficiency as well as the ability to use + these columns at query time. + + See the documentation section :ref:`with_polymorphic` for + details on how this method is used. + + .. versionchanged:: 0.8 + A new and more flexible function + :func:`.orm.with_polymorphic` supersedes + :meth:`.Query.with_polymorphic`, as it can apply the equivalent + functionality to any set of columns or classes in the + :class:`.Query`, not just the "zero mapper". See that + function for a description of arguments. + + """ + + if not self._primary_entity: + raise sa_exc.InvalidRequestError( + "No primary mapper set up for this Query.") + entity = self._entities[0]._clone() + self._entities = [entity] + self._entities[1:] + entity.set_with_polymorphic(self, + cls_or_mappers, + selectable=selectable, + polymorphic_on=polymorphic_on) + + @_generative() + def yield_per(self, count): + r"""Yield only ``count`` rows at a time. + + The purpose of this method is when fetching very large result sets + (> 10K rows), to batch results in sub-collections and yield them + out partially, so that the Python interpreter doesn't need to declare + very large areas of memory which is both time consuming and leads + to excessive memory use. The performance from fetching hundreds of + thousands of rows can often double when a suitable yield-per setting + (e.g. approximately 1000) is used, even with DBAPIs that buffer + rows (which are most). + + The :meth:`.Query.yield_per` method **is not compatible with most + eager loading schemes, including subqueryload and joinedload with + collections**. For this reason, it may be helpful to disable + eager loads, either unconditionally with + :meth:`.Query.enable_eagerloads`:: + + q = sess.query(Object).yield_per(100).enable_eagerloads(False) + + Or more selectively using :func:`.lazyload`; such as with + an asterisk to specify the default loader scheme:: + + q = sess.query(Object).yield_per(100).\ + options(lazyload('*'), joinedload(Object.some_related)) + + .. warning:: + + Use this method with caution; if the same instance is + present in more than one batch of rows, end-user changes + to attributes will be overwritten. + + In particular, it's usually impossible to use this setting + with eagerly loaded collections (i.e. any lazy='joined' or + 'subquery') since those collections will be cleared for a + new load when encountered in a subsequent result batch. + In the case of 'subquery' loading, the full result for all + rows is fetched which generally defeats the purpose of + :meth:`~sqlalchemy.orm.query.Query.yield_per`. + + Also note that while + :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the + ``stream_results`` execution option to True, currently + this is only understood by + :mod:`~sqlalchemy.dialects.postgresql.psycopg2`, + :mod:`~sqlalchemy.dialects.mysql.mysqldb` and + :mod:`~sqlalchemy.dialects.mysql.pymysql` dialects + which will stream results using server side cursors + instead of pre-buffer all rows for this query. Other + DBAPIs **pre-buffer all rows** before making them + available. The memory use of raw database rows is much less + than that of an ORM-mapped object, but should still be taken into + consideration when benchmarking. + + .. seealso:: + + :meth:`.Query.enable_eagerloads` + + """ + self._yield_per = count + self._execution_options = self._execution_options.union( + {"stream_results": True, + "max_row_buffer": count}) + + def get(self, ident): + """Return an instance based on the given primary key identifier, + or ``None`` if not found. + + E.g.:: + + my_user = session.query(User).get(5) + + some_object = session.query(VersionedFoo).get((5, 10)) + + :meth:`~.Query.get` is special in that it provides direct + access to the identity map of the owning :class:`.Session`. + If the given primary key identifier is present + in the local identity map, the object is returned + directly from this collection and no SQL is emitted, + unless the object has been marked fully expired. + If not present, + a SELECT is performed in order to locate the object. + + :meth:`~.Query.get` also will perform a check if + the object is present in the identity map and + marked as expired - a SELECT + is emitted to refresh the object as well as to + ensure that the row is still present. + If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + + :meth:`~.Query.get` is only used to return a single + mapped instance, not multiple instances or + individual column constructs, and strictly + on a single primary key value. The originating + :class:`.Query` must be constructed in this way, + i.e. against a single mapped entity, + with no additional filtering criterion. Loading + options via :meth:`~.Query.options` may be applied + however, and will be used if the object is not + yet locally present. + + A lazy-loading, many-to-one attribute configured + by :func:`.relationship`, using a simple + foreign-key-to-primary-key criterion, will also use an + operation equivalent to :meth:`~.Query.get` in order to retrieve + the target value from the local identity map + before querying the database. See :doc:`/orm/loading_relationships` + for further details on relationship loading. + + :param ident: A scalar or tuple value representing + the primary key. For a composite primary key, + the order of identifiers corresponds in most cases + to that of the mapped :class:`.Table` object's + primary key columns. For a :func:`.mapper` that + was given the ``primary key`` argument during + construction, the order of identifiers corresponds + to the elements present in this collection. + + :return: The object instance, or ``None``. + + """ + return self._get_impl(ident, loading.load_on_ident) + + def _get_impl(self, ident, fallback_fn): + # convert composite types to individual args + if hasattr(ident, '__composite_values__'): + ident = ident.__composite_values__() + + ident = util.to_list(ident) + + mapper = self._only_full_mapper_zero("get") + + if len(ident) != len(mapper.primary_key): + raise sa_exc.InvalidRequestError( + "Incorrect number of values in identifier to formulate " + "primary key for query.get(); primary key columns are %s" % + ','.join("'%s'" % c for c in mapper.primary_key)) + + key = mapper.identity_key_from_primary_key(ident) + + if not self._populate_existing and \ + not mapper.always_refresh and \ + self._for_update_arg is None: + + instance = loading.get_from_identity( + self.session, key, attributes.PASSIVE_OFF) + if instance is not None: + self._get_existing_condition() + # reject calls for id in identity map but class + # mismatch. + if not issubclass(instance.__class__, mapper.class_): + return None + return instance + + return fallback_fn(self, key) + + @_generative() + def correlate(self, *args): + """Return a :class:`.Query` construct which will correlate the given + FROM clauses to that of an enclosing :class:`.Query` or + :func:`~.expression.select`. + + The method here accepts mapped classes, :func:`.aliased` constructs, + and :func:`.mapper` constructs as arguments, which are resolved into + expression constructs, in addition to appropriate expression + constructs. + + The correlation arguments are ultimately passed to + :meth:`.Select.correlate` after coercion to expression constructs. + + The correlation arguments take effect in such cases + as when :meth:`.Query.from_self` is used, or when + a subquery as returned by :meth:`.Query.subquery` is + embedded in another :func:`~.expression.select` construct. + + """ + + for s in args: + if s is None: + self._correlate = self._correlate.union([None]) + else: + self._correlate = self._correlate.union( + sql_util.surface_selectables(_interpret_as_from(s)) + ) + + @_generative() + def autoflush(self, setting): + """Return a Query with a specific 'autoflush' setting. + + Note that a Session with autoflush=False will + not autoflush, even if this flag is set to True at the + Query level. Therefore this flag is usually used only + to disable autoflush for a specific Query. + + """ + self._autoflush = setting + + @_generative() + def populate_existing(self): + """Return a :class:`.Query` that will expire and refresh all instances + as they are loaded, or reused from the current :class:`.Session`. + + :meth:`.populate_existing` does not improve behavior when + the ORM is used normally - the :class:`.Session` object's usual + behavior of maintaining a transaction and expiring all attributes + after rollback or commit handles object state automatically. + This method is not intended for general use. + + """ + self._populate_existing = True + + @_generative() + def _with_invoke_all_eagers(self, value): + """Set the 'invoke all eagers' flag which causes joined- and + subquery loaders to traverse into already-loaded related objects + and collections. + + Default is that of :attr:`.Query._invoke_all_eagers`. + + """ + self._invoke_all_eagers = value + + def with_parent(self, instance, property=None): + """Add filtering criterion that relates the given instance + to a child object or collection, using its attribute state + as well as an established :func:`.relationship()` + configuration. + + The method uses the :func:`.with_parent` function to generate + the clause, the result of which is passed to :meth:`.Query.filter`. + + Parameters are the same as :func:`.with_parent`, with the exception + that the given property can be None, in which case a search is + performed against this :class:`.Query` object's target mapper. + + """ + + if property is None: + mapper_zero = self._mapper_zero() + + mapper = object_mapper(instance) + + for prop in mapper.iterate_properties: + if isinstance(prop, properties.RelationshipProperty) and \ + prop.mapper is mapper_zero: + property = prop + break + else: + raise sa_exc.InvalidRequestError( + "Could not locate a property which relates instances " + "of class '%s' to instances of class '%s'" % + ( + self._mapper_zero().class_.__name__, + instance.__class__.__name__) + ) + + return self.filter(with_parent(instance, property)) + + @_generative() + def add_entity(self, entity, alias=None): + """add a mapped entity to the list of result columns + to be returned.""" + + if alias is not None: + entity = aliased(entity, alias) + + self._entities = list(self._entities) + m = _MapperEntity(self, entity) + self._set_entity_selectables([m]) + + @_generative() + def with_session(self, session): + """Return a :class:`.Query` that will use the given :class:`.Session`. + + While the :class:`.Query` object is normally instantiated using the + :meth:`.Session.query` method, it is legal to build the :class:`.Query` + directly without necessarily using a :class:`.Session`. Such a + :class:`.Query` object, or any :class:`.Query` already associated + with a different :class:`.Session`, can produce a new :class:`.Query` + object associated with a target session using this method:: + + from sqlalchemy.orm import Query + + query = Query([MyClass]).filter(MyClass.id == 5) + + result = query.with_session(my_session).one() + + """ + + self.session = session + + def from_self(self, *entities): + r"""return a Query that selects from this Query's + SELECT statement. + + :meth:`.Query.from_self` essentially turns the SELECT statement + into a SELECT of itself. Given a query such as:: + + q = session.query(User).filter(User.name.like('e%')) + + Given the :meth:`.Query.from_self` version:: + + q = session.query(User).filter(User.name.like('e%')).from_self() + + This query renders as: + + .. sourcecode:: sql + + SELECT anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1) AS anon_1 + + There are lots of cases where :meth:`.Query.from_self` may be useful. + A simple one is where above, we may want to apply a row LIMIT to + the set of user objects we query against, and then apply additional + joins against that row-limited set:: + + q = session.query(User).filter(User.name.like('e%')).\ + limit(5).from_self().\ + join(User.addresses).filter(Address.email.like('q%')) + + The above query joins to the ``Address`` entity but only against the + first five results of the ``User`` query: + + .. sourcecode:: sql + + SELECT anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1 + LIMIT :param_1) AS anon_1 + JOIN address ON anon_1.user_id = address.user_id + WHERE address.email LIKE :email_1 + + **Automatic Aliasing** + + Another key behavior of :meth:`.Query.from_self` is that it applies + **automatic aliasing** to the entities inside the subquery, when + they are referenced on the outside. Above, if we continue to + refer to the ``User`` entity without any additional aliasing applied + to it, those references wil be in terms of the subquery:: + + q = session.query(User).filter(User.name.like('e%')).\ + limit(5).from_self().\ + join(User.addresses).filter(Address.email.like('q%')).\ + order_by(User.name) + + The ORDER BY against ``User.name`` is aliased to be in terms of the + inner subquery: + + .. sourcecode:: sql + + SELECT anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1 + LIMIT :param_1) AS anon_1 + JOIN address ON anon_1.user_id = address.user_id + WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name + + The automatic aliasing feature only works in a **limited** way, + for simple filters and orderings. More ambitious constructions + such as referring to the entity in joins should prefer to use + explicit subquery objects, typically making use of the + :meth:`.Query.subquery` method to produce an explicit subquery object. + Always test the structure of queries by viewing the SQL to ensure + a particular structure does what's expected! + + **Changing the Entities** + + :meth:`.Query.from_self` also includes the ability to modify what + columns are being queried. In our example, we want ``User.id`` + to be queried by the inner query, so that we can join to the + ``Address`` entity on the outside, but we only wanted the outer + query to return the ``Address.email`` column:: + + q = session.query(User).filter(User.name.like('e%')).\ + limit(5).from_self(Address.email).\ + join(User.addresses).filter(Address.email.like('q%')) + + yielding: + + .. sourcecode:: sql + + SELECT address.email AS address_email + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1 + LIMIT :param_1) AS anon_1 + JOIN address ON anon_1.user_id = address.user_id + WHERE address.email LIKE :email_1 + + **Looking out for Inner / Outer Columns** + + Keep in mind that when referring to columns that originate from + inside the subquery, we need to ensure they are present in the + columns clause of the subquery itself; this is an ordinary aspect of + SQL. For example, if we wanted to load from a joined entity inside + the subquery using :func:`.contains_eager`, we need to add those + columns. Below illustrates a join of ``Address`` to ``User``, + then a subquery, and then we'd like :func:`.contains_eager` to access + the ``User`` columns:: + + q = session.query(Address).join(Address.user).\ + filter(User.name.like('e%')) + + q = q.add_entity(User).from_self().\ + options(contains_eager(Address.user)) + + We use :meth:`.Query.add_entity` above **before** we call + :meth:`.Query.from_self` so that the ``User`` columns are present + in the inner subquery, so that they are available to the + :func:`.contains_eager` modifier we are using on the outside, + producing: + + .. sourcecode:: sql + + SELECT anon_1.address_id AS anon_1_address_id, + anon_1.address_email AS anon_1_address_email, + anon_1.address_user_id AS anon_1_address_user_id, + anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM ( + SELECT address.id AS address_id, + address.email AS address_email, + address.user_id AS address_user_id, + "user".id AS user_id, + "user".name AS user_name + FROM address JOIN "user" ON "user".id = address.user_id + WHERE "user".name LIKE :name_1) AS anon_1 + + If we didn't call ``add_entity(User)``, but still asked + :func:`.contains_eager` to load the ``User`` entity, it would be + forced to add the table on the outside without the correct + join criteria - note the ``anon1, "user"`` phrase at + the end: + + .. sourcecode:: sql + + -- incorrect query + SELECT anon_1.address_id AS anon_1_address_id, + anon_1.address_email AS anon_1_address_email, + anon_1.address_user_id AS anon_1_address_user_id, + "user".id AS user_id, + "user".name AS user_name + FROM ( + SELECT address.id AS address_id, + address.email AS address_email, + address.user_id AS address_user_id + FROM address JOIN "user" ON "user".id = address.user_id + WHERE "user".name LIKE :name_1) AS anon_1, "user" + + :param \*entities: optional list of entities which will replace + those being selected. + + """ + fromclause = self.with_labels().enable_eagerloads(False).\ + statement.correlate(None) + q = self._from_selectable(fromclause) + q._enable_single_crit = False + q._select_from_entity = self._entity_zero() + if entities: + q._set_entities(entities) + return q + + @_generative() + def _set_enable_single_crit(self, val): + self._enable_single_crit = val + + @_generative() + def _from_selectable(self, fromclause): + for attr in ( + '_statement', '_criterion', + '_order_by', '_group_by', + '_limit', '_offset', + '_joinpath', '_joinpoint', + '_distinct', '_having', + '_prefixes', '_suffixes' + ): + self.__dict__.pop(attr, None) + self._set_select_from([fromclause], True) + + # this enables clause adaptation for non-ORM + # expressions. + self._orm_only_from_obj_alias = False + + old_entities = self._entities + self._entities = [] + for e in old_entities: + e.adapt_to_selectable(self, self._from_obj[0]) + + def values(self, *columns): + """Return an iterator yielding result tuples corresponding + to the given list of columns""" + + if not columns: + return iter(()) + q = self._clone() + q._set_entities(columns, entity_wrapper=_ColumnEntity) + if not q._yield_per: + q._yield_per = 10 + return iter(q) + _values = values + + def value(self, column): + """Return a scalar result corresponding to the given + column expression.""" + try: + return next(self.values(column))[0] + except StopIteration: + return None + + @_generative() + def with_entities(self, *entities): + """Return a new :class:`.Query` replacing the SELECT list with the + given entities. + + e.g.:: + + # Users, filtered on some arbitrary criterion + # and then ordered by related email address + q = session.query(User).\ + join(User.address).\ + filter(User.name.like('%ed%')).\ + order_by(Address.email) + + # given *only* User.id==5, Address.email, and 'q', what + # would the *next* User in the result be ? + subq = q.with_entities(Address.email).\ + order_by(None).\ + filter(User.id==5).\ + subquery() + q = q.join((subq, subq.c.email < Address.email)).\ + limit(1) + + .. versionadded:: 0.6.5 + + """ + self._set_entities(entities) + + @_generative() + def add_columns(self, *column): + """Add one or more column expressions to the list + of result columns to be returned.""" + + self._entities = list(self._entities) + l = len(self._entities) + for c in column: + _ColumnEntity(self, c) + # _ColumnEntity may add many entities if the + # given arg is a FROM clause + self._set_entity_selectables(self._entities[l:]) + + @util.pending_deprecation("0.7", + ":meth:`.add_column` is superseded " + "by :meth:`.add_columns`", + False) + def add_column(self, column): + """Add a column expression to the list of result columns to be + returned. + + Pending deprecation: :meth:`.add_column` will be superseded by + :meth:`.add_columns`. + + """ + return self.add_columns(column) + + def options(self, *args): + """Return a new Query object, applying the given list of + mapper options. + + Most supplied options regard changing how column- and + relationship-mapped attributes are loaded. See the sections + :ref:`deferred` and :doc:`/orm/loading_relationships` for reference + documentation. + + """ + return self._options(False, *args) + + def _conditional_options(self, *args): + return self._options(True, *args) + + @_generative() + def _options(self, conditional, *args): + # most MapperOptions write to the '_attributes' dictionary, + # so copy that as well + self._attributes = self._attributes.copy() + opts = tuple(util.flatten_iterator(args)) + self._with_options = self._with_options + opts + if conditional: + for opt in opts: + opt.process_query_conditionally(self) + else: + for opt in opts: + opt.process_query(self) + + def with_transformation(self, fn): + """Return a new :class:`.Query` object transformed by + the given function. + + E.g.:: + + def filter_something(criterion): + def transform(q): + return q.filter(criterion) + return transform + + q = q.with_transformation(filter_something(x==5)) + + This allows ad-hoc recipes to be created for :class:`.Query` + objects. See the example at :ref:`hybrid_transformers`. + + .. versionadded:: 0.7.4 + + """ + return fn(self) + + @_generative() + def with_hint(self, selectable, text, dialect_name='*'): + """Add an indexing or other executional context + hint for the given entity or selectable to + this :class:`.Query`. + + Functionality is passed straight through to + :meth:`~sqlalchemy.sql.expression.Select.with_hint`, + with the addition that ``selectable`` can be a + :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class + /etc. + + .. seealso:: + + :meth:`.Query.with_statement_hint` + + """ + if selectable is not None: + selectable = inspect(selectable).selectable + + self._with_hints += ((selectable, text, dialect_name),) + + def with_statement_hint(self, text, dialect_name='*'): + """add a statement hint to this :class:`.Select`. + + This method is similar to :meth:`.Select.with_hint` except that + it does not require an individual table, and instead applies to the + statement as a whole. + + This feature calls down into :meth:`.Select.with_statement_hint`. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.Query.with_hint` + + """ + return self.with_hint(None, text, dialect_name) + + @_generative() + def execution_options(self, **kwargs): + """ Set non-SQL options which take effect during execution. + + The options are the same as those accepted by + :meth:`.Connection.execution_options`. + + Note that the ``stream_results`` execution option is enabled + automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` + method is used. + + """ + self._execution_options = self._execution_options.union(kwargs) + + @_generative() + def with_lockmode(self, mode): + """Return a new :class:`.Query` object with the specified "locking mode", + which essentially refers to the ``FOR UPDATE`` clause. + + .. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`. + + :param mode: a string representing the desired locking mode. + Valid values are: + + * ``None`` - translates to no lockmode + + * ``'update'`` - translates to ``FOR UPDATE`` + (standard SQL, supported by most dialects) + + * ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT`` + (supported by Oracle, PostgreSQL 8.1 upwards) + + * ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), + and ``FOR SHARE`` (for PostgreSQL) + + .. seealso:: + + :meth:`.Query.with_for_update` - improved API for + specifying the ``FOR UPDATE`` clause. + + """ + self._for_update_arg = LockmodeArg.parse_legacy_query(mode) + + @_generative() + def with_for_update(self, read=False, nowait=False, of=None, + skip_locked=False, key_share=False): + """return a new :class:`.Query` with the specified options for the + ``FOR UPDATE`` clause. + + The behavior of this method is identical to that of + :meth:`.SelectBase.with_for_update`. When called with no arguments, + the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause + appended. When additional arguments are specified, backend-specific + options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE`` + can take effect. + + E.g.:: + + q = sess.query(User).with_for_update(nowait=True, of=User) + + The above query on a PostgreSQL backend will render like:: + + SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT + + .. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes + the :meth:`.Query.with_lockmode` method. + + .. seealso:: + + :meth:`.GenerativeSelect.with_for_update` - Core level method with + full argument and behavioral description. + + """ + self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of, + skip_locked=skip_locked, + key_share=key_share) + + @_generative() + def params(self, *args, **kwargs): + r"""add values for bind parameters which may have been + specified in filter(). + + parameters may be specified using \**kwargs, or optionally a single + dictionary as the first positional argument. The reason for both is + that \**kwargs is convenient, however some parameter dictionaries + contain unicode keys in which case \**kwargs cannot be used. + + """ + if len(args) == 1: + kwargs.update(args[0]) + elif len(args) > 0: + raise sa_exc.ArgumentError( + "params() takes zero or one positional argument, " + "which is a dictionary.") + self._params = self._params.copy() + self._params.update(kwargs) + + @_generative(_no_statement_condition, _no_limit_offset) + def filter(self, *criterion): + r"""apply the given filtering criterion to a copy + of this :class:`.Query`, using SQL expressions. + + e.g.:: + + session.query(MyClass).filter(MyClass.name == 'some name') + + Multiple criteria may be specified as comma separated; the effect + is that they will be joined together using the :func:`.and_` + function:: + + session.query(MyClass).\ + filter(MyClass.name == 'some name', MyClass.id > 5) + + The criterion is any SQL expression object applicable to the + WHERE clause of a select. String expressions are coerced + into SQL expression constructs via the :func:`.text` construct. + + .. seealso:: + + :meth:`.Query.filter_by` - filter on keyword expressions. + + """ + for criterion in list(criterion): + criterion = expression._expression_literal_as_text(criterion) + + criterion = self._adapt_clause(criterion, True, True) + + if self._criterion is not None: + self._criterion = self._criterion & criterion + else: + self._criterion = criterion + + def filter_by(self, **kwargs): + r"""apply the given filtering criterion to a copy + of this :class:`.Query`, using keyword expressions. + + e.g.:: + + session.query(MyClass).filter_by(name = 'some name') + + Multiple criteria may be specified as comma separated; the effect + is that they will be joined together using the :func:`.and_` + function:: + + session.query(MyClass).\ + filter_by(name = 'some name', id = 5) + + The keyword expressions are extracted from the primary + entity of the query, or the last entity that was the + target of a call to :meth:`.Query.join`. + + .. seealso:: + + :meth:`.Query.filter` - filter on SQL expressions. + + """ + + clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value + for key, value in kwargs.items()] + return self.filter(sql.and_(*clauses)) + + @_generative(_no_statement_condition, _no_limit_offset) + def order_by(self, *criterion): + """apply one or more ORDER BY criterion to the query and return + the newly resulting ``Query`` + + All existing ORDER BY settings can be suppressed by + passing ``None`` - this will suppress any ORDER BY configured + on mappers as well. + + Alternatively, passing False will reset ORDER BY and additionally + re-allow default mapper.order_by to take place. Note mapper.order_by + is deprecated. + + """ + + if len(criterion) == 1: + if criterion[0] is False: + if '_order_by' in self.__dict__: + self._order_by = False + return + if criterion[0] is None: + self._order_by = None + return + + criterion = self._adapt_col_list(criterion) + + if self._order_by is False or self._order_by is None: + self._order_by = criterion + else: + self._order_by = self._order_by + criterion + + @_generative(_no_statement_condition, _no_limit_offset) + def group_by(self, *criterion): + """apply one or more GROUP BY criterion to the query and return + the newly resulting :class:`.Query` + + All existing GROUP BY settings can be suppressed by + passing ``None`` - this will suppress any GROUP BY configured + on mappers as well. + + .. versionadded:: 1.1 GROUP BY can be cancelled by passing None, + in the same way as ORDER BY. + + """ + + if len(criterion) == 1: + if criterion[0] is None: + self._group_by = False + return + + criterion = list(chain(*[_orm_columns(c) for c in criterion])) + criterion = self._adapt_col_list(criterion) + + if self._group_by is False: + self._group_by = criterion + else: + self._group_by = self._group_by + criterion + + @_generative(_no_statement_condition, _no_limit_offset) + def having(self, criterion): + r"""apply a HAVING criterion to the query and return the + newly resulting :class:`.Query`. + + :meth:`~.Query.having` is used in conjunction with + :meth:`~.Query.group_by`. + + HAVING criterion makes it possible to use filters on aggregate + functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: + + q = session.query(User.id).\ + join(User.addresses).\ + group_by(User.id).\ + having(func.count(Address.id) > 2) + + """ + + criterion = expression._expression_literal_as_text(criterion) + + if criterion is not None and \ + not isinstance(criterion, sql.ClauseElement): + raise sa_exc.ArgumentError( + "having() argument must be of type " + "sqlalchemy.sql.ClauseElement or string") + + criterion = self._adapt_clause(criterion, True, True) + + if self._having is not None: + self._having = self._having & criterion + else: + self._having = criterion + + def _set_op(self, expr_fn, *q): + return self._from_selectable( + expr_fn(*([self] + list(q))) + )._set_enable_single_crit(False) + + def union(self, *q): + """Produce a UNION of this Query against one or more queries. + + e.g.:: + + q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar') + q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo') + + q3 = q1.union(q2) + + The method accepts multiple Query objects so as to control + the level of nesting. A series of ``union()`` calls such as:: + + x.union(y).union(z).all() + + will nest on each ``union()``, and produces:: + + SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION + SELECT * FROM y) UNION SELECT * FROM Z) + + Whereas:: + + x.union(y, z).all() + + produces:: + + SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION + SELECT * FROM Z) + + Note that many database backends do not allow ORDER BY to + be rendered on a query called within UNION, EXCEPT, etc. + To disable all ORDER BY clauses including those configured + on mappers, issue ``query.order_by(None)`` - the resulting + :class:`.Query` object will not render ORDER BY within + its SELECT statement. + + """ + return self._set_op(expression.union, *q) + + def union_all(self, *q): + """Produce a UNION ALL of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + """ + return self._set_op(expression.union_all, *q) + + def intersect(self, *q): + """Produce an INTERSECT of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + """ + return self._set_op(expression.intersect, *q) + + def intersect_all(self, *q): + """Produce an INTERSECT ALL of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + """ + return self._set_op(expression.intersect_all, *q) + + def except_(self, *q): + """Produce an EXCEPT of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + """ + return self._set_op(expression.except_, *q) + + def except_all(self, *q): + """Produce an EXCEPT ALL of this Query against one or more queries. + + Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See + that method for usage examples. + + """ + return self._set_op(expression.except_all, *q) + + def join(self, *props, **kwargs): + r"""Create a SQL JOIN against this :class:`.Query` object's criterion + and apply generatively, returning the newly resulting :class:`.Query`. + + **Simple Relationship Joins** + + Consider a mapping between two classes ``User`` and ``Address``, + with a relationship ``User.addresses`` representing a collection + of ``Address`` objects associated with each ``User``. The most + common usage of :meth:`~.Query.join` is to create a JOIN along this + relationship, using the ``User.addresses`` attribute as an indicator + for how this should occur:: + + q = session.query(User).join(User.addresses) + + Where above, the call to :meth:`~.Query.join` along ``User.addresses`` + will result in SQL equivalent to:: + + SELECT user.* FROM user JOIN address ON user.id = address.user_id + + In the above example we refer to ``User.addresses`` as passed to + :meth:`~.Query.join` as the *on clause*, that is, it indicates + how the "ON" portion of the JOIN should be constructed. For a + single-entity query such as the one above (i.e. we start by selecting + only from ``User`` and nothing else), the relationship can also be + specified by its string name:: + + q = session.query(User).join("addresses") + + :meth:`~.Query.join` can also accommodate multiple + "on clause" arguments to produce a chain of joins, such as below + where a join across four related entities is constructed:: + + q = session.query(User).join("orders", "items", "keywords") + + The above would be shorthand for three separate calls to + :meth:`~.Query.join`, each using an explicit attribute to indicate + the source entity:: + + q = session.query(User).\ + join(User.orders).\ + join(Order.items).\ + join(Item.keywords) + + **Joins to a Target Entity or Selectable** + + A second form of :meth:`~.Query.join` allows any mapped entity + or core selectable construct as a target. In this usage, + :meth:`~.Query.join` will attempt + to create a JOIN along the natural foreign key relationship between + two entities:: + + q = session.query(User).join(Address) + + The above calling form of :meth:`~.Query.join` will raise an error if + either there are no foreign keys between the two entities, or if + there are multiple foreign key linkages between them. In the + above calling form, :meth:`~.Query.join` is called upon to + create the "on clause" automatically for us. The target can + be any mapped entity or selectable, such as a :class:`.Table`:: + + q = session.query(User).join(addresses_table) + + **Joins to a Target with an ON Clause** + + The third calling form allows both the target entity as well + as the ON clause to be passed explicitly. Suppose for + example we wanted to join to ``Address`` twice, using + an alias the second time. We use :func:`~sqlalchemy.orm.aliased` + to create a distinct alias of ``Address``, and join + to it using the ``target, onclause`` form, so that the + alias can be specified explicitly as the target along with + the relationship to instruct how the ON clause should proceed:: + + a_alias = aliased(Address) + + q = session.query(User).\ + join(User.addresses).\ + join(a_alias, User.addresses).\ + filter(Address.email_address=='ed@foo.com').\ + filter(a_alias.email_address=='ed@bar.com') + + Where above, the generated SQL would be similar to:: + + SELECT user.* FROM user + JOIN address ON user.id = address.user_id + JOIN address AS address_1 ON user.id=address_1.user_id + WHERE address.email_address = :email_address_1 + AND address_1.email_address = :email_address_2 + + The two-argument calling form of :meth:`~.Query.join` + also allows us to construct arbitrary joins with SQL-oriented + "on clause" expressions, not relying upon configured relationships + at all. Any SQL expression can be passed as the ON clause + when using the two-argument form, which should refer to the target + entity in some way as well as an applicable source entity:: + + q = session.query(User).join(Address, User.id==Address.user_id) + + .. versionchanged:: 0.7 + In SQLAlchemy 0.6 and earlier, the two argument form of + :meth:`~.Query.join` requires the usage of a tuple: + ``query(User).join((Address, User.id==Address.user_id))``\ . + This calling form is accepted in 0.7 and further, though + is not necessary unless multiple join conditions are passed to + a single :meth:`~.Query.join` call, which itself is also not + generally necessary as it is now equivalent to multiple + calls (this wasn't always the case). + + **Advanced Join Targeting and Adaption** + + There is a lot of flexibility in what the "target" can be when using + :meth:`~.Query.join`. As noted previously, it also accepts + :class:`.Table` constructs and other selectables such as + :func:`.alias` and :func:`.select` constructs, with either the one + or two-argument forms:: + + addresses_q = select([Address.user_id]).\ + where(Address.email_address.endswith("@bar.com")).\ + alias() + + q = session.query(User).\ + join(addresses_q, addresses_q.c.user_id==User.id) + + :meth:`~.Query.join` also features the ability to *adapt* a + :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target + selectable. Below we construct a JOIN from ``User`` to a subquery + against ``Address``, allowing the relationship denoted by + ``User.addresses`` to *adapt* itself to the altered target:: + + address_subq = session.query(Address).\ + filter(Address.email_address == 'ed@foo.com').\ + subquery() + + q = session.query(User).join(address_subq, User.addresses) + + Producing SQL similar to:: + + SELECT user.* FROM user + JOIN ( + SELECT address.id AS id, + address.user_id AS user_id, + address.email_address AS email_address + FROM address + WHERE address.email_address = :email_address_1 + ) AS anon_1 ON user.id = anon_1.user_id + + The above form allows one to fall back onto an explicit ON + clause at any time:: + + q = session.query(User).\ + join(address_subq, User.id==address_subq.c.user_id) + + **Controlling what to Join From** + + While :meth:`~.Query.join` exclusively deals with the "right" + side of the JOIN, we can also control the "left" side, in those + cases where it's needed, using :meth:`~.Query.select_from`. + Below we construct a query against ``Address`` but can still + make usage of ``User.addresses`` as our ON clause by instructing + the :class:`.Query` to select first from the ``User`` + entity:: + + q = session.query(Address).select_from(User).\ + join(User.addresses).\ + filter(User.name == 'ed') + + Which will produce SQL similar to:: + + SELECT address.* FROM user + JOIN address ON user.id=address.user_id + WHERE user.name = :name_1 + + **Constructing Aliases Anonymously** + + :meth:`~.Query.join` can construct anonymous aliases + using the ``aliased=True`` flag. This feature is useful + when a query is being joined algorithmically, such as + when querying self-referentially to an arbitrary depth:: + + q = session.query(Node).\ + join("children", "children", aliased=True) + + When ``aliased=True`` is used, the actual "alias" construct + is not explicitly available. To work with it, methods such as + :meth:`.Query.filter` will adapt the incoming entity to + the last join point:: + + q = session.query(Node).\ + join("children", "children", aliased=True).\ + filter(Node.name == 'grandchild 1') + + When using automatic aliasing, the ``from_joinpoint=True`` + argument can allow a multi-node join to be broken into + multiple calls to :meth:`~.Query.join`, so that + each path along the way can be further filtered:: + + q = session.query(Node).\ + join("children", aliased=True).\ + filter(Node.name='child 1').\ + join("children", aliased=True, from_joinpoint=True).\ + filter(Node.name == 'grandchild 1') + + The filtering aliases above can then be reset back to the + original ``Node`` entity using :meth:`~.Query.reset_joinpoint`:: + + q = session.query(Node).\ + join("children", "children", aliased=True).\ + filter(Node.name == 'grandchild 1').\ + reset_joinpoint().\ + filter(Node.name == 'parent 1) + + For an example of ``aliased=True``, see the distribution + example :ref:`examples_xmlpersistence` which illustrates + an XPath-like query system using algorithmic joins. + + :param \*props: A collection of one or more join conditions, + each consisting of a relationship-bound attribute or string + relationship name representing an "on clause", or a single + target entity, or a tuple in the form of ``(target, onclause)``. + A special two-argument calling form of the form ``target, onclause`` + is also accepted. + :param aliased=False: If True, indicate that the JOIN target should be + anonymously aliased. Subsequent calls to :meth:`~.Query.filter` + and similar will adapt the incoming criterion to the target + alias, until :meth:`~.Query.reset_joinpoint` is called. + :param isouter=False: If True, the join used will be a left outer join, + just as if the :meth:`.Query.outerjoin` method were called. This + flag is here to maintain consistency with the same flag as accepted + by :meth:`.FromClause.join` and other Core constructs. + + + .. versionadded:: 1.0.0 + + :param full=False: render FULL OUTER JOIN; implies ``isouter``. + + .. versionadded:: 1.1 + + :param from_joinpoint=False: When using ``aliased=True``, a setting + of True here will cause the join to be from the most recent + joined target, rather than starting back from the original + FROM clauses of the query. + + .. seealso:: + + :ref:`ormtutorial_joins` in the ORM tutorial. + + :ref:`inheritance_toplevel` for details on how + :meth:`~.Query.join` is used for inheritance relationships. + + :func:`.orm.join` - a standalone ORM-level join function, + used internally by :meth:`.Query.join`, which in previous + SQLAlchemy versions was the primary ORM-level joining interface. + + """ + aliased, from_joinpoint, isouter, full = kwargs.pop('aliased', False),\ + kwargs.pop('from_joinpoint', False),\ + kwargs.pop('isouter', False),\ + kwargs.pop('full', False) + if kwargs: + raise TypeError("unknown arguments: %s" % + ', '.join(sorted(kwargs))) + return self._join(props, + outerjoin=isouter, full=full, + create_aliases=aliased, + from_joinpoint=from_joinpoint) + + def outerjoin(self, *props, **kwargs): + """Create a left outer join against this ``Query`` object's criterion + and apply generatively, returning the newly resulting ``Query``. + + Usage is the same as the ``join()`` method. + + """ + aliased, from_joinpoint, full = kwargs.pop('aliased', False), \ + kwargs.pop('from_joinpoint', False), \ + kwargs.pop('full', False) + if kwargs: + raise TypeError("unknown arguments: %s" % + ', '.join(sorted(kwargs))) + return self._join(props, + outerjoin=True, full=full, create_aliases=aliased, + from_joinpoint=from_joinpoint) + + def _update_joinpoint(self, jp): + self._joinpoint = jp + # copy backwards to the root of the _joinpath + # dict, so that no existing dict in the path is mutated + while 'prev' in jp: + f, prev = jp['prev'] + prev = prev.copy() + prev[f] = jp + jp['prev'] = (f, prev) + jp = prev + self._joinpath = jp + + @_generative(_no_statement_condition, _no_limit_offset) + def _join(self, keys, outerjoin, full, create_aliases, from_joinpoint): + """consumes arguments from join() or outerjoin(), places them into a + consistent format with which to form the actual JOIN constructs. + + """ + + if not from_joinpoint: + self._reset_joinpoint() + + if len(keys) == 2 and \ + isinstance(keys[0], (expression.FromClause, + type, AliasedClass)) and \ + isinstance(keys[1], (str, expression.ClauseElement, + interfaces.PropComparator)): + # detect 2-arg form of join and + # convert to a tuple. + keys = (keys,) + + keylist = util.to_list(keys) + for idx, arg1 in enumerate(keylist): + if isinstance(arg1, tuple): + # "tuple" form of join, multiple + # tuples are accepted as well. The simpler + # "2-arg" form is preferred. May deprecate + # the "tuple" usage. + arg1, arg2 = arg1 + else: + arg2 = None + + # determine onclause/right_entity. there + # is a little bit of legacy behavior still at work here + # which means they might be in either order. may possibly + # lock this down to (right_entity, onclause) in 0.6. + if isinstance( + arg1, (interfaces.PropComparator, util.string_types)): + right_entity, onclause = arg2, arg1 + else: + right_entity, onclause = arg1, arg2 + + left_entity = prop = None + + if isinstance(onclause, interfaces.PropComparator): + of_type = getattr(onclause, '_of_type', None) + else: + of_type = None + + if isinstance(onclause, util.string_types): + left_entity = self._joinpoint_zero() + + descriptor = _entity_descriptor(left_entity, onclause) + onclause = descriptor + + # check for q.join(Class.propname, from_joinpoint=True) + # and Class is that of the current joinpoint + elif from_joinpoint and \ + isinstance(onclause, interfaces.PropComparator): + left_entity = onclause._parententity + + info = inspect(self._joinpoint_zero()) + left_mapper, left_selectable, left_is_aliased = \ + getattr(info, 'mapper', None), \ + info.selectable, \ + getattr(info, 'is_aliased_class', None) + + if left_mapper is left_entity: + left_entity = self._joinpoint_zero() + descriptor = _entity_descriptor(left_entity, + onclause.key) + onclause = descriptor + + if isinstance(onclause, interfaces.PropComparator): + if right_entity is None: + if of_type: + right_entity = of_type + else: + right_entity = onclause.property.mapper + + left_entity = onclause._parententity + + prop = onclause.property + if not isinstance(onclause, attributes.QueryableAttribute): + onclause = prop + + if not create_aliases: + # check for this path already present. + # don't render in that case. + edge = (left_entity, right_entity, prop.key) + if edge in self._joinpoint: + # The child's prev reference might be stale -- + # it could point to a parent older than the + # current joinpoint. If this is the case, + # then we need to update it and then fix the + # tree's spine with _update_joinpoint. Copy + # and then mutate the child, which might be + # shared by a different query object. + jp = self._joinpoint[edge].copy() + jp['prev'] = (edge, self._joinpoint) + self._update_joinpoint(jp) + + if idx == len(keylist) - 1: + util.warn( + "Pathed join target %s has already " + "been joined to; skipping" % prop) + continue + + elif onclause is not None and right_entity is None: + # TODO: no coverage here + raise NotImplementedError("query.join(a==b) not supported.") + + self._join_left_to_right( + left_entity, + right_entity, onclause, + outerjoin, full, create_aliases, prop) + + def _join_left_to_right(self, left, right, + onclause, outerjoin, full, create_aliases, prop): + """append a JOIN to the query's from clause.""" + + self._polymorphic_adapters = self._polymorphic_adapters.copy() + + if left is None: + if self._from_obj: + left = self._from_obj[0] + elif self._entities: + left = self._entities[0].entity_zero_or_selectable + + if left is None: + if self._entities: + problem = "Don't know how to join from %s" % self._entities[0] + else: + problem = "No entities to join from" + + raise sa_exc.InvalidRequestError( + "%s; please use " + "select_from() to establish the left " + "entity/selectable of this join" % problem) + + if left is right and \ + not create_aliases: + raise sa_exc.InvalidRequestError( + "Can't construct a join from %s to %s, they " + "are the same entity" % + (left, right)) + + l_info = inspect(left) + r_info = inspect(right) + + overlap = False + if not create_aliases: + right_mapper = getattr(r_info, "mapper", None) + # if the target is a joined inheritance mapping, + # be more liberal about auto-aliasing. + if right_mapper and ( + right_mapper.with_polymorphic or + isinstance(right_mapper.mapped_table, expression.Join) + ): + for from_obj in self._from_obj or [l_info.selectable]: + if sql_util.selectables_overlap( + l_info.selectable, from_obj) and \ + sql_util.selectables_overlap( + from_obj, r_info.selectable): + overlap = True + break + + if (overlap or not create_aliases) and \ + l_info.selectable is r_info.selectable: + raise sa_exc.InvalidRequestError( + "Can't join table/selectable '%s' to itself" % + l_info.selectable) + + right, onclause = self._prepare_right_side( + r_info, right, onclause, + create_aliases, + prop, overlap) + + # if joining on a MapperProperty path, + # track the path to prevent redundant joins + if not create_aliases and prop: + self._update_joinpoint({ + '_joinpoint_entity': right, + 'prev': ((left, right, prop.key), self._joinpoint) + }) + else: + self._joinpoint = {'_joinpoint_entity': right} + + self._join_to_left(l_info, left, right, onclause, outerjoin, full) + + def _prepare_right_side(self, r_info, right, onclause, create_aliases, + prop, overlap): + info = r_info + + right_mapper, right_selectable, right_is_aliased = \ + getattr(info, 'mapper', None), \ + info.selectable, \ + getattr(info, 'is_aliased_class', False) + + if right_mapper: + self._join_entities += (info, ) + + if right_mapper and prop and \ + not right_mapper.common_parent(prop.mapper): + raise sa_exc.InvalidRequestError( + "Join target %s does not correspond to " + "the right side of join condition %s" % (right, onclause) + ) + + if not right_mapper and prop: + right_mapper = prop.mapper + + need_adapter = False + + if right_mapper and right is right_selectable: + if not right_selectable.is_derived_from( + right_mapper.mapped_table): + raise sa_exc.InvalidRequestError( + "Selectable '%s' is not derived from '%s'" % + (right_selectable.description, + right_mapper.mapped_table.description)) + + if isinstance(right_selectable, expression.SelectBase): + # TODO: this isn't even covered now! + right_selectable = right_selectable.alias() + need_adapter = True + + right = aliased(right_mapper, right_selectable) + + aliased_entity = right_mapper and \ + not right_is_aliased and \ + ( + right_mapper.with_polymorphic and isinstance( + right_mapper._with_polymorphic_selectable, + expression.Alias) + or + overlap # test for overlap: + # orm/inheritance/relationships.py + # SelfReferentialM2MTest + ) + + if not need_adapter and (create_aliases or aliased_entity): + right = aliased(right, flat=True) + need_adapter = True + + # if an alias() of the right side was generated here, + # apply an adapter to all subsequent filter() calls + # until reset_joinpoint() is called. + if need_adapter: + self._filter_aliases = ORMAdapter( + right, + equivalents=right_mapper and + right_mapper._equivalent_columns or {}, + chain_to=self._filter_aliases) + + # if the onclause is a ClauseElement, adapt it with any + # adapters that are in place right now + if isinstance(onclause, expression.ClauseElement): + onclause = self._adapt_clause(onclause, True, True) + + # if an alias() on the right side was generated, + # which is intended to wrap a the right side in a subquery, + # ensure that columns retrieved from this target in the result + # set are also adapted. + if aliased_entity and not create_aliases: + self._mapper_loads_polymorphically_with( + right_mapper, + ORMAdapter( + right, + equivalents=right_mapper._equivalent_columns + ) + ) + + return right, onclause + + def _join_to_left(self, l_info, left, right, onclause, outerjoin, full): + info = l_info + left_mapper = getattr(info, 'mapper', None) + left_selectable = info.selectable + + if self._from_obj: + replace_clause_index, clause = sql_util.find_join_source( + self._from_obj, + left_selectable) + if clause is not None: + try: + clause = orm_join(clause, + right, + onclause, isouter=outerjoin, full=full) + except sa_exc.ArgumentError as ae: + raise sa_exc.InvalidRequestError( + "Could not find a FROM clause to join from. " + "Tried joining to %s, but got: %s" % (right, ae)) + + self._from_obj = \ + self._from_obj[:replace_clause_index] + \ + (clause, ) + \ + self._from_obj[replace_clause_index + 1:] + return + + if left_mapper: + for ent in self._entities: + if ent.corresponds_to(left): + clause = ent.selectable + break + else: + clause = left + else: + clause = left_selectable + + assert clause is not None + try: + clause = orm_join( + clause, right, onclause, isouter=outerjoin, full=full) + except sa_exc.ArgumentError as ae: + raise sa_exc.InvalidRequestError( + "Could not find a FROM clause to join from. " + "Tried joining to %s, but got: %s" % (right, ae)) + self._from_obj = self._from_obj + (clause,) + + def _reset_joinpoint(self): + self._joinpoint = self._joinpath + self._filter_aliases = None + + @_generative(_no_statement_condition) + def reset_joinpoint(self): + """Return a new :class:`.Query`, where the "join point" has + been reset back to the base FROM entities of the query. + + This method is usually used in conjunction with the + ``aliased=True`` feature of the :meth:`~.Query.join` + method. See the example in :meth:`~.Query.join` for how + this is used. + + """ + self._reset_joinpoint() + + @_generative(_no_clauseelement_condition) + def select_from(self, *from_obj): + r"""Set the FROM clause of this :class:`.Query` explicitly. + + :meth:`.Query.select_from` is often used in conjunction with + :meth:`.Query.join` in order to control which entity is selected + from on the "left" side of the join. + + The entity or selectable object here effectively replaces the + "left edge" of any calls to :meth:`~.Query.join`, when no + joinpoint is otherwise established - usually, the default "join + point" is the leftmost entity in the :class:`~.Query` object's + list of entities to be selected. + + A typical example:: + + q = session.query(Address).select_from(User).\ + join(User.addresses).\ + filter(User.name == 'ed') + + Which produces SQL equivalent to:: + + SELECT address.* FROM user + JOIN address ON user.id=address.user_id + WHERE user.name = :name_1 + + :param \*from_obj: collection of one or more entities to apply + to the FROM clause. Entities can be mapped classes, + :class:`.AliasedClass` objects, :class:`.Mapper` objects + as well as core :class:`.FromClause` elements like subqueries. + + .. versionchanged:: 0.9 + This method no longer applies the given FROM object + to be the selectable from which matching entities + select from; the :meth:`.select_entity_from` method + now accomplishes this. See that method for a description + of this behavior. + + .. seealso:: + + :meth:`~.Query.join` + + :meth:`.Query.select_entity_from` + + """ + + self._set_select_from(from_obj, False) + + @_generative(_no_clauseelement_condition) + def select_entity_from(self, from_obj): + r"""Set the FROM clause of this :class:`.Query` to a + core selectable, applying it as a replacement FROM clause + for corresponding mapped entities. + + The :meth:`.Query.select_entity_from` method supplies an alternative + approach to the use case of applying an :func:`.aliased` construct + explicitly throughout a query. Instead of referring to the + :func:`.aliased` construct explicitly, + :meth:`.Query.select_entity_from` automatically *adapts* all occurences + of the entity to the target selectable. + + Given a case for :func:`.aliased` such as selecting ``User`` + objects from a SELECT statement:: + + select_stmt = select([User]).where(User.id == 7) + user_alias = aliased(User, select_stmt) + + q = session.query(user_alias).\ + filter(user_alias.name == 'ed') + + Above, we apply the ``user_alias`` object explicitly throughout the + query. When it's not feasible for ``user_alias`` to be referenced + explicitly in many places, :meth:`.Query.select_entity_from` may be + used at the start of the query to adapt the existing ``User`` entity:: + + q = session.query(User).\ + select_entity_from(select_stmt).\ + filter(User.name == 'ed') + + Above, the generated SQL will show that the ``User`` entity is + adapted to our statement, even in the case of the WHERE clause: + + .. sourcecode:: sql + + SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name + FROM (SELECT "user".id AS id, "user".name AS name + FROM "user" + WHERE "user".id = :id_1) AS anon_1 + WHERE anon_1.name = :name_1 + + The :meth:`.Query.select_entity_from` method is similar to the + :meth:`.Query.select_from` method, in that it sets the FROM clause + of the query. The difference is that it additionally applies + adaptation to the other parts of the query that refer to the + primary entity. If above we had used :meth:`.Query.select_from` + instead, the SQL generated would have been: + + .. sourcecode:: sql + + -- uses plain select_from(), not select_entity_from() + SELECT "user".id AS user_id, "user".name AS user_name + FROM "user", (SELECT "user".id AS id, "user".name AS name + FROM "user" + WHERE "user".id = :id_1) AS anon_1 + WHERE "user".name = :name_1 + + To supply textual SQL to the :meth:`.Query.select_entity_from` method, + we can make use of the :func:`.text` construct. However, the + :func:`.text` construct needs to be aligned with the columns of our + entity, which is achieved by making use of the + :meth:`.TextClause.columns` method:: + + text_stmt = text("select id, name from user").columns( + User.id, User.name) + q = session.query(User).select_entity_from(text_stmt) + + :meth:`.Query.select_entity_from` itself accepts an :func:`.aliased` + object, so that the special options of :func:`.aliased` such as + :paramref:`.aliased.adapt_on_names` may be used within the + scope of the :meth:`.Query.select_entity_from` method's adaptation + services. Suppose + a view ``user_view`` also returns rows from ``user``. If + we reflect this view into a :class:`.Table`, this view has no + relationship to the :class:`.Table` to which we are mapped, however + we can use name matching to select from it:: + + user_view = Table('user_view', metadata, + autoload_with=engine) + user_view_alias = aliased( + User, user_view, adapt_on_names=True) + q = session.query(User).\ + select_entity_from(user_view_alias).\ + order_by(User.name) + + .. versionchanged:: 1.1.7 The :meth:`.Query.select_entity_from` + method now accepts an :func:`.aliased` object as an alternative + to a :class:`.FromClause` object. + + :param from_obj: a :class:`.FromClause` object that will replace + the FROM clause of this :class:`.Query`. It also may be an instance + of :func:`.aliased`. + + + + .. seealso:: + + :meth:`.Query.select_from` + + """ + + self._set_select_from([from_obj], True) + + def __getitem__(self, item): + if isinstance(item, slice): + start, stop, step = util.decode_slice(item) + + if isinstance(stop, int) and \ + isinstance(start, int) and \ + stop - start <= 0: + return [] + + # perhaps we should execute a count() here so that we + # can still use LIMIT/OFFSET ? + elif (isinstance(start, int) and start < 0) \ + or (isinstance(stop, int) and stop < 0): + return list(self)[item] + + res = self.slice(start, stop) + if step is not None: + return list(res)[None:None:item.step] + else: + return list(res) + else: + if item == -1: + return list(self)[-1] + else: + return list(self[item:item + 1])[0] + + @_generative(_no_statement_condition) + def slice(self, start, stop): + """Computes the "slice" of the :class:`.Query` represented by + the given indices and returns the resulting :class:`.Query`. + + The start and stop indices behave like the argument to Python's + built-in :func:`range` function. This method provides an + alternative to using ``LIMIT``/``OFFSET`` to get a slice of the + query. + + For example, :: + + session.query(User).order_by(User.id).slice(1, 3) + + renders as + + .. sourcecode:: sql + + SELECT users.id AS users_id, + users.name AS users_name + FROM users ORDER BY users.id + LIMIT ? OFFSET ? + (2, 1) + + .. seealso:: + + :meth:`.Query.limit` + + :meth:`.Query.offset` + + """ + if start is not None and stop is not None: + self._offset = (self._offset or 0) + start + self._limit = stop - start + elif start is None and stop is not None: + self._limit = stop + elif start is not None and stop is None: + self._offset = (self._offset or 0) + start + + if self._offset == 0: + self._offset = None + + @_generative(_no_statement_condition) + def limit(self, limit): + """Apply a ``LIMIT`` to the query and return the newly resulting + ``Query``. + + """ + self._limit = limit + + @_generative(_no_statement_condition) + def offset(self, offset): + """Apply an ``OFFSET`` to the query and return the newly resulting + ``Query``. + + """ + self._offset = offset + + @_generative(_no_statement_condition) + def distinct(self, *criterion): + r"""Apply a ``DISTINCT`` to the query and return the newly resulting + ``Query``. + + + .. note:: + + The :meth:`.distinct` call includes logic that will automatically + add columns from the ORDER BY of the query to the columns + clause of the SELECT statement, to satisfy the common need + of the database backend that ORDER BY columns be part of the + SELECT list when DISTINCT is used. These columns *are not* + added to the list of columns actually fetched by the + :class:`.Query`, however, so would not affect results. + The columns are passed through when using the + :attr:`.Query.statement` accessor, however. + + :param \*expr: optional column expressions. When present, + the PostgreSQL dialect will render a ``DISTINCT ON (>)`` + construct. + + """ + if not criterion: + self._distinct = True + else: + criterion = self._adapt_col_list(criterion) + if isinstance(self._distinct, list): + self._distinct += criterion + else: + self._distinct = criterion + + @_generative() + def prefix_with(self, *prefixes): + r"""Apply the prefixes to the query and return the newly resulting + ``Query``. + + :param \*prefixes: optional prefixes, typically strings, + not using any commas. In particular is useful for MySQL keywords. + + e.g.:: + + query = sess.query(User.name).\ + prefix_with('HIGH_PRIORITY').\ + prefix_with('SQL_SMALL_RESULT', 'ALL') + + Would render:: + + SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name + FROM users + + .. versionadded:: 0.7.7 + + .. seealso:: + + :meth:`.HasPrefixes.prefix_with` + + """ + if self._prefixes: + self._prefixes += prefixes + else: + self._prefixes = prefixes + + @_generative() + def suffix_with(self, *suffixes): + r"""Apply the suffix to the query and return the newly resulting + ``Query``. + + :param \*suffixes: optional suffixes, typically strings, + not using any commas. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.Query.prefix_with` + + :meth:`.HasSuffixes.suffix_with` + + """ + if self._suffixes: + self._suffixes += suffixes + else: + self._suffixes = suffixes + + def all(self): + """Return the results represented by this ``Query`` as a list. + + This results in an execution of the underlying query. + + """ + return list(self) + + @_generative(_no_clauseelement_condition) + def from_statement(self, statement): + """Execute the given SELECT statement and return results. + + This method bypasses all internal statement compilation, and the + statement is executed without modification. + + The statement is typically either a :func:`~.expression.text` + or :func:`~.expression.select` construct, and should return the set + of columns + appropriate to the entity class represented by this :class:`.Query`. + + .. seealso:: + + :ref:`orm_tutorial_literal_sql` - usage examples in the + ORM tutorial + + """ + statement = expression._expression_literal_as_text(statement) + + if not isinstance(statement, + (expression.TextClause, + expression.SelectBase)): + raise sa_exc.ArgumentError( + "from_statement accepts text(), select(), " + "and union() objects only.") + + self._statement = statement + + def first(self): + """Return the first result of this ``Query`` or + None if the result doesn't contain any row. + + first() applies a limit of one within the generated SQL, so that + only one primary entity row is generated on the server side + (note this may consist of multiple result rows if join-loaded + collections are present). + + Calling :meth:`.Query.first` results in an execution of the underlying query. + + .. seealso:: + + :meth:`.Query.one` + + :meth:`.Query.one_or_none` + + """ + if self._statement is not None: + ret = list(self)[0:1] + else: + ret = list(self[0:1]) + if len(ret) > 0: + return ret[0] + else: + return None + + def one_or_none(self): + """Return at most one result or raise an exception. + + Returns ``None`` if the query selects + no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` + if multiple object identities are returned, or if multiple + rows are returned for a query that returns only scalar values + as opposed to full identity-mapped entities. + + Calling :meth:`.Query.one_or_none` results in an execution of the + underlying query. + + .. versionadded:: 1.0.9 + + Added :meth:`.Query.one_or_none` + + .. seealso:: + + :meth:`.Query.first` + + :meth:`.Query.one` + + """ + ret = list(self) + + l = len(ret) + if l == 1: + return ret[0] + elif l == 0: + return None + else: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one_or_none()") + + def one(self): + """Return exactly one result or raise an exception. + + Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects + no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` + if multiple object identities are returned, or if multiple + rows are returned for a query that returns only scalar values + as opposed to full identity-mapped entities. + + Calling :meth:`.one` results in an execution of the underlying query. + + .. seealso:: + + :meth:`.Query.first` + + :meth:`.Query.one_or_none` + + """ + try: + ret = self.one_or_none() + except orm_exc.MultipleResultsFound: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one()") + else: + if ret is None: + raise orm_exc.NoResultFound("No row was found for one()") + return ret + + def scalar(self): + """Return the first element of the first result or None + if no rows present. If multiple rows are returned, + raises MultipleResultsFound. + + >>> session.query(Item).scalar() + + >>> session.query(Item.id).scalar() + 1 + >>> session.query(Item.id).filter(Item.id < 0).scalar() + None + >>> session.query(Item.id, Item.name).scalar() + 1 + >>> session.query(func.count(Parent.id)).scalar() + 20 + + This results in an execution of the underlying query. + + """ + try: + ret = self.one() + if not isinstance(ret, tuple): + return ret + return ret[0] + except orm_exc.NoResultFound: + return None + + def __iter__(self): + context = self._compile_context() + context.statement.use_labels = True + if self._autoflush and not self._populate_existing: + self.session._autoflush() + return self._execute_and_instances(context) + + def __str__(self): + context = self._compile_context() + try: + bind = self._get_bind_args( + context, self.session.get_bind) if self.session else None + except sa_exc.UnboundExecutionError: + bind = None + return str(context.statement.compile(bind)) + + def _connection_from_session(self, **kw): + conn = self.session.connection(**kw) + if self._execution_options: + conn = conn.execution_options(**self._execution_options) + return conn + + def _execute_and_instances(self, querycontext): + conn = self._get_bind_args( + querycontext, + self._connection_from_session, + close_with_result=True) + + result = conn.execute(querycontext.statement, self._params) + return loading.instances(querycontext.query, result, querycontext) + + def _get_bind_args(self, querycontext, fn, **kw): + return fn( + mapper=self._bind_mapper(), + clause=querycontext.statement, + **kw + ) + + @property + def column_descriptions(self): + """Return metadata about the columns which would be + returned by this :class:`.Query`. + + Format is a list of dictionaries:: + + user_alias = aliased(User, name='user2') + q = sess.query(User, User.id, user_alias) + + # this expression: + q.column_descriptions + + # would return: + [ + { + 'name':'User', + 'type':User, + 'aliased':False, + 'expr':User, + 'entity': User + }, + { + 'name':'id', + 'type':Integer(), + 'aliased':False, + 'expr':User.id, + 'entity': User + }, + { + 'name':'user2', + 'type':User, + 'aliased':True, + 'expr':user_alias, + 'entity': user_alias + } + ] + + """ + + return [ + { + 'name': ent._label_name, + 'type': ent.type, + 'aliased': getattr(insp_ent, 'is_aliased_class', False), + 'expr': ent.expr, + 'entity': + getattr(insp_ent, "entity", None) + if ent.entity_zero is not None + and not insp_ent.is_clause_element + else None + } + for ent, insp_ent in [ + ( + _ent, + (inspect(_ent.entity_zero) + if _ent.entity_zero is not None else None) + ) + for _ent in self._entities + ] + ] + + def instances(self, cursor, __context=None): + """Given a ResultProxy cursor as returned by connection.execute(), + return an ORM result as an iterator. + + e.g.:: + + result = engine.execute("select * from users") + for u in session.query(User).instances(result): + print u + """ + context = __context + if context is None: + context = QueryContext(self) + + return loading.instances(self, cursor, context) + + def merge_result(self, iterator, load=True): + """Merge a result into this :class:`.Query` object's Session. + + Given an iterator returned by a :class:`.Query` of the same structure + as this one, return an identical iterator of results, with all mapped + instances merged into the session using :meth:`.Session.merge`. This + is an optimized method which will merge all mapped instances, + preserving the structure of the result rows and unmapped columns with + less method overhead than that of calling :meth:`.Session.merge` + explicitly for each value. + + The structure of the results is determined based on the column list of + this :class:`.Query` - if these do not correspond, unchecked errors + will occur. + + The 'load' argument is the same as that of :meth:`.Session.merge`. + + For an example of how :meth:`~.Query.merge_result` is used, see + the source code for the example :ref:`examples_caching`, where + :meth:`~.Query.merge_result` is used to efficiently restore state + from a cache back into a target :class:`.Session`. + + """ + + return loading.merge_result(self, iterator, load) + + @property + def _select_args(self): + return { + 'limit': self._limit, + 'offset': self._offset, + 'distinct': self._distinct, + 'prefixes': self._prefixes, + 'suffixes': self._suffixes, + 'group_by': self._group_by or None, + 'having': self._having + } + + @property + def _should_nest_selectable(self): + kwargs = self._select_args + return (kwargs.get('limit') is not None or + kwargs.get('offset') is not None or + kwargs.get('distinct', False)) + + def exists(self): + """A convenience method that turns a query into an EXISTS subquery + of the form EXISTS (SELECT 1 FROM ... WHERE ...). + + e.g.:: + + q = session.query(User).filter(User.name == 'fred') + session.query(q.exists()) + + Producing SQL similar to:: + + SELECT EXISTS ( + SELECT 1 FROM users WHERE users.name = :name_1 + ) AS anon_1 + + The EXISTS construct is usually used in the WHERE clause:: + + session.query(User.id).filter(q.exists()).scalar() + + Note that some databases such as SQL Server don't allow an + EXISTS expression to be present in the columns clause of a + SELECT. To select a simple boolean value based on the exists + as a WHERE, use :func:`.literal`:: + + from sqlalchemy import literal + + session.query(literal(True)).filter(q.exists()).scalar() + + .. versionadded:: 0.8.1 + + """ + + # .add_columns() for the case that we are a query().select_from(X), + # so that ".statement" can be produced (#2995) but also without + # omitting the FROM clause from a query(X) (#2818); + # .with_only_columns() after we have a core select() so that + # we get just "SELECT 1" without any entities. + return sql.exists(self.add_columns('1').with_labels(). + statement.with_only_columns([1])) + + def count(self): + r"""Return a count of rows this Query would return. + + This generates the SQL for this Query as follows:: + + SELECT count(1) AS count_1 FROM ( + SELECT + ) AS anon_1 + + .. versionchanged:: 0.7 + The above scheme is newly refined as of 0.7b3. + + For fine grained control over specific columns + to count, to skip the usage of a subquery or + otherwise control of the FROM clause, + or to use other aggregate functions, + use :attr:`~sqlalchemy.sql.expression.func` + expressions in conjunction + with :meth:`~.Session.query`, i.e.:: + + from sqlalchemy import func + + # count User records, without + # using a subquery. + session.query(func.count(User.id)) + + # return count of user "id" grouped + # by "name" + session.query(func.count(User.id)).\ + group_by(User.name) + + from sqlalchemy import distinct + + # count distinct "name" values + session.query(func.count(distinct(User.name))) + + """ + col = sql.func.count(sql.literal_column('*')) + return self.from_self(col).scalar() + + def delete(self, synchronize_session='evaluate'): + r"""Perform a bulk delete query. + + Deletes rows matched by this query from the database. + + E.g.:: + + sess.query(User).filter(User.age == 25).\ + delete(synchronize_session=False) + + sess.query(User).filter(User.age == 25).\ + delete(synchronize_session='evaluate') + + .. warning:: The :meth:`.Query.delete` method is a "bulk" operation, + which bypasses ORM unit-of-work automation in favor of greater + performance. **Please read all caveats and warnings below.** + + :param synchronize_session: chooses the strategy for the removal of + matched objects from the session. Valid values are: + + ``False`` - don't synchronize the session. This option is the most + efficient and is reliable once the session is expired, which + typically occurs after a commit(), or explicitly using + expire_all(). Before the expiration, objects may still remain in + the session which were in fact deleted which can lead to confusing + results if they are accessed via get() or already loaded + collections. + + ``'fetch'`` - performs a select query before the delete to find + objects that are matched by the delete query and need to be + removed from the session. Matched objects are removed from the + session. + + ``'evaluate'`` - Evaluate the query's criteria in Python straight + on the objects in the session. If evaluation of the criteria isn't + implemented, an error is raised. + + The expression evaluator currently doesn't account for differing + string collations between the database and Python. + + :return: the count of rows matched as returned by the database's + "row count" feature. + + .. warning:: **Additional Caveats for bulk query deletes** + + * This method does **not work for joined + inheritance mappings**, since the **multiple table + deletes are not supported by SQL** as well as that the + **join condition of an inheritance mapper is not + automatically rendered**. Care must be taken in any + multiple-table delete to first accommodate via some other means + how the related table will be deleted, as well as to + explicitly include the joining + condition between those tables, even in mappings where + this is normally automatic. E.g. if a class ``Engineer`` + subclasses ``Employee``, a DELETE against the ``Employee`` + table would look like:: + + session.query(Engineer).\ + filter(Engineer.id == Employee.id).\ + filter(Employee.name == 'dilbert').\ + delete() + + However the above SQL will not delete from the Engineer table, + unless an ON DELETE CASCADE rule is established in the database + to handle it. + + Short story, **do not use this method for joined inheritance + mappings unless you have taken the additional steps to make + this feasible**. + + * The polymorphic identity WHERE criteria is **not** included + for single- or + joined- table updates - this must be added **manually** even + for single table inheritance. + + * The method does **not** offer in-Python cascading of + relationships - it is assumed that ON DELETE CASCADE/SET + NULL/etc. is configured for any foreign key references + which require it, otherwise the database may emit an + integrity violation if foreign key references are being + enforced. + + After the DELETE, dependent objects in the + :class:`.Session` which were impacted by an ON DELETE + may not contain the current state, or may have been + deleted. This issue is resolved once the + :class:`.Session` is expired, which normally occurs upon + :meth:`.Session.commit` or can be forced by using + :meth:`.Session.expire_all`. Accessing an expired + object whose row has been deleted will invoke a SELECT + to locate the row; when the row is not found, an + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is + raised. + + * The ``'fetch'`` strategy results in an additional + SELECT statement emitted and will significantly reduce + performance. + + * The ``'evaluate'`` strategy performs a scan of + all matching objects within the :class:`.Session`; if the + contents of the :class:`.Session` are expired, such as + via a proceeding :meth:`.Session.commit` call, **this will + result in SELECT queries emitted for every matching object**. + + * The :meth:`.MapperEvents.before_delete` and + :meth:`.MapperEvents.after_delete` + events **are not invoked** from this method. Instead, the + :meth:`.SessionEvents.after_bulk_delete` method is provided to + act upon a mass DELETE of entity rows. + + .. seealso:: + + :meth:`.Query.update` + + :ref:`inserts_and_updates` - Core SQL tutorial + + """ + + delete_op = persistence.BulkDelete.factory( + self, synchronize_session) + delete_op.exec_() + return delete_op.rowcount + + def update(self, values, synchronize_session='evaluate', update_args=None): + r"""Perform a bulk update query. + + Updates rows matched by this query in the database. + + E.g.:: + + sess.query(User).filter(User.age == 25).\ + update({User.age: User.age - 10}, synchronize_session=False) + + sess.query(User).filter(User.age == 25).\ + update({"age": User.age - 10}, synchronize_session='evaluate') + + + .. warning:: The :meth:`.Query.update` method is a "bulk" operation, + which bypasses ORM unit-of-work automation in favor of greater + performance. **Please read all caveats and warnings below.** + + + :param values: a dictionary with attributes names, or alternatively + mapped attributes or SQL expressions, as keys, and literal + values or sql expressions as values. If :ref:`parameter-ordered + mode ` is desired, the values can be + passed as a list of 2-tuples; + this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag is passed to the :paramref:`.Query.update.update_args` dictionary + as well. + + .. versionchanged:: 1.0.0 - string names in the values dictionary + are now resolved against the mapped entity; previously, these + strings were passed as literal column names with no mapper-level + translation. + + :param synchronize_session: chooses the strategy to update the + attributes on objects in the session. Valid values are: + + ``False`` - don't synchronize the session. This option is the most + efficient and is reliable once the session is expired, which + typically occurs after a commit(), or explicitly using + expire_all(). Before the expiration, updated objects may still + remain in the session with stale values on their attributes, which + can lead to confusing results. + + ``'fetch'`` - performs a select query before the update to find + objects that are matched by the update query. The updated + attributes are expired on matched objects. + + ``'evaluate'`` - Evaluate the Query's criteria in Python straight + on the objects in the session. If evaluation of the criteria isn't + implemented, an exception is raised. + + The expression evaluator currently doesn't account for differing + string collations between the database and Python. + + :param update_args: Optional dictionary, if present will be passed + to the underlying :func:`.update` construct as the ``**kw`` for + the object. May be used to pass dialect-specific arguments such + as ``mysql_limit``, as well as other special arguments such as + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`. + + .. versionadded:: 1.0.0 + + :return: the count of rows matched as returned by the database's + "row count" feature. + + .. warning:: **Additional Caveats for bulk query updates** + + * The method does **not** offer in-Python cascading of + relationships - it is assumed that ON UPDATE CASCADE is + configured for any foreign key references which require + it, otherwise the database may emit an integrity + violation if foreign key references are being enforced. + + After the UPDATE, dependent objects in the + :class:`.Session` which were impacted by an ON UPDATE + CASCADE may not contain the current state; this issue is + resolved once the :class:`.Session` is expired, which + normally occurs upon :meth:`.Session.commit` or can be + forced by using :meth:`.Session.expire_all`. + + * The ``'fetch'`` strategy results in an additional + SELECT statement emitted and will significantly reduce + performance. + + * The ``'evaluate'`` strategy performs a scan of + all matching objects within the :class:`.Session`; if the + contents of the :class:`.Session` are expired, such as + via a proceeding :meth:`.Session.commit` call, **this will + result in SELECT queries emitted for every matching object**. + + * The method supports multiple table updates, as detailed + in :ref:`multi_table_updates`, and this behavior does + extend to support updates of joined-inheritance and + other multiple table mappings. However, the **join + condition of an inheritance mapper is not + automatically rendered**. Care must be taken in any + multiple-table update to explicitly include the joining + condition between those tables, even in mappings where + this is normally automatic. E.g. if a class ``Engineer`` + subclasses ``Employee``, an UPDATE of the ``Engineer`` + local table using criteria against the ``Employee`` + local table might look like:: + + session.query(Engineer).\ + filter(Engineer.id == Employee.id).\ + filter(Employee.name == 'dilbert').\ + update({"engineer_type": "programmer"}) + + * The polymorphic identity WHERE criteria is **not** included + for single- or + joined- table updates - this must be added **manually**, even + for single table inheritance. + + * The :meth:`.MapperEvents.before_update` and + :meth:`.MapperEvents.after_update` + events **are not invoked from this method**. Instead, the + :meth:`.SessionEvents.after_bulk_update` method is provided to + act upon a mass UPDATE of entity rows. + + .. seealso:: + + :meth:`.Query.delete` + + :ref:`inserts_and_updates` - Core SQL tutorial + + """ + + update_args = update_args or {} + update_op = persistence.BulkUpdate.factory( + self, synchronize_session, values, update_args) + update_op.exec_() + return update_op.rowcount + + def _compile_context(self, labels=True): + if self.dispatch.before_compile: + for fn in self.dispatch.before_compile: + new_query = fn(self) + if new_query is not None: + self = new_query + + context = QueryContext(self) + + if context.statement is not None: + return context + + context.labels = labels + + context._for_update_arg = self._for_update_arg + + for entity in self._entities: + entity.setup_context(self, context) + + for rec in context.create_eager_joins: + strategy = rec[0] + strategy(*rec[1:]) + + if context.from_clause: + # "load from explicit FROMs" mode, + # i.e. when select_from() or join() is used + context.froms = list(context.from_clause) + # else "load from discrete FROMs" mode, + # i.e. when each _MappedEntity has its own FROM + + if self._enable_single_crit: + self._adjust_for_single_inheritance(context) + + if not context.primary_columns: + if self._only_load_props: + raise sa_exc.InvalidRequestError( + "No column-based properties specified for " + "refresh operation. Use session.expire() " + "to reload collections and related items.") + else: + raise sa_exc.InvalidRequestError( + "Query contains no columns with which to " + "SELECT from.") + + if context.multi_row_eager_loaders and self._should_nest_selectable: + context.statement = self._compound_eager_statement(context) + else: + context.statement = self._simple_statement(context) + + return context + + def _compound_eager_statement(self, context): + # for eager joins present and LIMIT/OFFSET/DISTINCT, + # wrap the query inside a select, + # then append eager joins onto that + + if context.order_by: + order_by_col_expr = \ + sql_util.expand_column_list_from_order_by( + context.primary_columns, + context.order_by + ) + else: + context.order_by = None + order_by_col_expr = [] + + inner = sql.select( + context.primary_columns + order_by_col_expr, + context.whereclause, + from_obj=context.froms, + use_labels=context.labels, + # TODO: this order_by is only needed if + # LIMIT/OFFSET is present in self._select_args, + # else the application on the outside is enough + order_by=context.order_by, + **self._select_args + ) + + for hint in self._with_hints: + inner = inner.with_hint(*hint) + + if self._correlate: + inner = inner.correlate(*self._correlate) + + inner = inner.alias() + + equivs = self.__all_equivs() + + context.adapter = sql_util.ColumnAdapter(inner, equivs) + + statement = sql.select( + [inner] + context.secondary_columns, + use_labels=context.labels) + + statement._for_update_arg = context._for_update_arg + + from_clause = inner + for eager_join in context.eager_joins.values(): + # EagerLoader places a 'stop_on' attribute on the join, + # giving us a marker as to where the "splice point" of + # the join should be + from_clause = sql_util.splice_joins( + from_clause, + eager_join, eager_join.stop_on) + + statement.append_from(from_clause) + + if context.order_by: + statement.append_order_by( + *context.adapter.copy_and_process( + context.order_by + ) + ) + + statement.append_order_by(*context.eager_order_by) + return statement + + def _simple_statement(self, context): + if not context.order_by: + context.order_by = None + + if self._distinct is True and context.order_by: + context.primary_columns += \ + sql_util.expand_column_list_from_order_by( + context.primary_columns, + context.order_by + ) + context.froms += tuple(context.eager_joins.values()) + + statement = sql.select( + context.primary_columns + + context.secondary_columns, + context.whereclause, + from_obj=context.froms, + use_labels=context.labels, + order_by=context.order_by, + **self._select_args + ) + statement._for_update_arg = context._for_update_arg + + for hint in self._with_hints: + statement = statement.with_hint(*hint) + + if self._correlate: + statement = statement.correlate(*self._correlate) + + if context.eager_order_by: + statement.append_order_by(*context.eager_order_by) + return statement + + def _adjust_for_single_inheritance(self, context): + """Apply single-table-inheritance filtering. + + For all distinct single-table-inheritance mappers represented in + the columns clause of this query, add criterion to the WHERE + clause of the given QueryContext such that only the appropriate + subtypes are selected from the total results. + + """ + for (ext_info, adapter) in set(self._mapper_adapter_map.values()): + if ext_info in self._join_entities: + continue + single_crit = ext_info.mapper._single_table_criterion + if single_crit is not None: + if adapter: + single_crit = adapter.traverse(single_crit) + single_crit = self._adapt_clause(single_crit, False, False) + context.whereclause = sql.and_( + sql.True_._ifnone(context.whereclause), + single_crit) + + +from ..sql.selectable import ForUpdateArg + + +class LockmodeArg(ForUpdateArg): + @classmethod + def parse_legacy_query(self, mode): + if mode in (None, False): + return None + + if mode == "read": + read = True + nowait = False + elif mode == "update": + read = nowait = False + elif mode == "update_nowait": + nowait = True + read = False + else: + raise sa_exc.ArgumentError( + "Unknown with_lockmode argument: %r" % mode) + + return LockmodeArg(read=read, nowait=nowait) + + +class _QueryEntity(object): + """represent an entity column returned within a Query result.""" + + def __new__(cls, *args, **kwargs): + if cls is _QueryEntity: + entity = args[1] + if not isinstance(entity, util.string_types) and \ + _is_mapped_class(entity): + cls = _MapperEntity + elif isinstance(entity, Bundle): + cls = _BundleEntity + else: + cls = _ColumnEntity + return object.__new__(cls) + + def _clone(self): + q = self.__class__.__new__(self.__class__) + q.__dict__ = self.__dict__.copy() + return q + + +class _MapperEntity(_QueryEntity): + """mapper/class/AliasedClass entity""" + + def __init__(self, query, entity): + if not query._primary_entity: + query._primary_entity = self + query._entities.append(self) + query._has_mapper_entities = True + self.entities = [entity] + self.expr = entity + + supports_single_entity = True + + use_id_for_hash = True + + def setup_entity(self, ext_info, aliased_adapter): + self.mapper = ext_info.mapper + self.aliased_adapter = aliased_adapter + self.selectable = ext_info.selectable + self.is_aliased_class = ext_info.is_aliased_class + self._with_polymorphic = ext_info.with_polymorphic_mappers + self._polymorphic_discriminator = \ + ext_info.polymorphic_on + self.entity_zero = ext_info + if ext_info.is_aliased_class: + self._label_name = self.entity_zero.name + else: + self._label_name = self.mapper.class_.__name__ + self.path = self.entity_zero._path_registry + + def set_with_polymorphic(self, query, cls_or_mappers, + selectable, polymorphic_on): + """Receive an update from a call to query.with_polymorphic(). + + Note the newer style of using a free standing with_polymporphic() + construct doesn't make use of this method. + + + """ + if self.is_aliased_class: + # TODO: invalidrequest ? + raise NotImplementedError( + "Can't use with_polymorphic() against " + "an Aliased object" + ) + + if cls_or_mappers is None: + query._reset_polymorphic_adapter(self.mapper) + return + + mappers, from_obj = self.mapper._with_polymorphic_args( + cls_or_mappers, selectable) + self._with_polymorphic = mappers + self._polymorphic_discriminator = polymorphic_on + + self.selectable = from_obj + query._mapper_loads_polymorphically_with( + self.mapper, sql_util.ColumnAdapter( + from_obj, self.mapper._equivalent_columns)) + + @property + def type(self): + return self.mapper.class_ + + @property + def entity_zero_or_selectable(self): + return self.entity_zero + + def corresponds_to(self, entity): + if entity.is_aliased_class: + if self.is_aliased_class: + if entity._base_alias is self.entity_zero._base_alias: + return True + return False + elif self.is_aliased_class: + if self.entity_zero._use_mapper_path: + return entity in self._with_polymorphic + else: + return entity is self.entity_zero + + return entity.common_parent(self.entity_zero) + + def adapt_to_selectable(self, query, sel): + query._entities.append(self) + + def _get_entity_clauses(self, query, context): + + adapter = None + + if not self.is_aliased_class: + if query._polymorphic_adapters: + adapter = query._polymorphic_adapters.get(self.mapper, None) + else: + adapter = self.aliased_adapter + + if adapter: + if query._from_obj_alias: + ret = adapter.wrap(query._from_obj_alias) + else: + ret = adapter + else: + ret = query._from_obj_alias + + return ret + + def row_processor(self, query, context, result): + adapter = self._get_entity_clauses(query, context) + + if context.adapter and adapter: + adapter = adapter.wrap(context.adapter) + elif not adapter: + adapter = context.adapter + + # polymorphic mappers which have concrete tables in + # their hierarchy usually + # require row aliasing unconditionally. + if not adapter and self.mapper._requires_row_aliasing: + adapter = sql_util.ColumnAdapter( + self.selectable, + self.mapper._equivalent_columns) + + if query._primary_entity is self: + only_load_props = query._only_load_props + refresh_state = context.refresh_state + else: + only_load_props = refresh_state = None + + _instance = loading._instance_processor( + self.mapper, + context, + result, + self.path, + adapter, + only_load_props=only_load_props, + refresh_state=refresh_state, + polymorphic_discriminator=self._polymorphic_discriminator + ) + + return _instance, self._label_name + + def setup_context(self, query, context): + adapter = self._get_entity_clauses(query, context) + + # if self._adapted_selectable is None: + context.froms += (self.selectable,) + + if context.order_by is False and self.mapper.order_by: + context.order_by = self.mapper.order_by + + # apply adaptation to the mapper's order_by if needed. + if adapter: + context.order_by = adapter.adapt_list( + util.to_list( + context.order_by + ) + ) + + loading._setup_entity_query( + context, self.mapper, self, + self.path, adapter, context.primary_columns, + with_polymorphic=self._with_polymorphic, + only_load_props=query._only_load_props, + polymorphic_discriminator=self._polymorphic_discriminator) + + def __str__(self): + return str(self.mapper) + + +@inspection._self_inspects +class Bundle(InspectionAttr): + """A grouping of SQL expressions that are returned by a :class:`.Query` + under one namespace. + + The :class:`.Bundle` essentially allows nesting of the tuple-based + results returned by a column-oriented :class:`.Query` object. It also + is extensible via simple subclassing, where the primary capability + to override is that of how the set of expressions should be returned, + allowing post-processing as well as custom return types, without + involving ORM identity-mapped classes. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :ref:`bundles` + + """ + + single_entity = False + """If True, queries for a single Bundle will be returned as a single + entity, rather than an element within a keyed tuple.""" + + is_clause_element = False + + is_mapper = False + + is_aliased_class = False + + def __init__(self, name, *exprs, **kw): + r"""Construct a new :class:`.Bundle`. + + e.g.:: + + bn = Bundle("mybundle", MyClass.x, MyClass.y) + + for row in session.query(bn).filter( + bn.c.x == 5).filter(bn.c.y == 4): + print(row.mybundle.x, row.mybundle.y) + + :param name: name of the bundle. + :param \*exprs: columns or SQL expressions comprising the bundle. + :param single_entity=False: if True, rows for this :class:`.Bundle` + can be returned as a "single entity" outside of any enclosing tuple + in the same manner as a mapped entity. + + """ + self.name = self._label = name + self.exprs = exprs + self.c = self.columns = ColumnCollection() + self.columns.update((getattr(col, "key", col._label), col) + for col in exprs) + self.single_entity = kw.pop('single_entity', self.single_entity) + + columns = None + """A namespace of SQL expressions referred to by this :class:`.Bundle`. + + e.g.:: + + bn = Bundle("mybundle", MyClass.x, MyClass.y) + + q = sess.query(bn).filter(bn.c.x == 5) + + Nesting of bundles is also supported:: + + b1 = Bundle("b1", + Bundle('b2', MyClass.a, MyClass.b), + Bundle('b3', MyClass.x, MyClass.y) + ) + + q = sess.query(b1).filter( + b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9) + + .. seealso:: + + :attr:`.Bundle.c` + + """ + + c = None + """An alias for :attr:`.Bundle.columns`.""" + + def _clone(self): + cloned = self.__class__.__new__(self.__class__) + cloned.__dict__.update(self.__dict__) + return cloned + + def __clause_element__(self): + return expression.ClauseList(group=False, *self.c) + + @property + def clauses(self): + return self.__clause_element__().clauses + + def label(self, name): + """Provide a copy of this :class:`.Bundle` passing a new label.""" + + cloned = self._clone() + cloned.name = name + return cloned + + def create_row_processor(self, query, procs, labels): + """Produce the "row processing" function for this :class:`.Bundle`. + + May be overridden by subclasses. + + .. seealso:: + + :ref:`bundles` - includes an example of subclassing. + + """ + keyed_tuple = util.lightweight_named_tuple('result', labels) + + def proc(row): + return keyed_tuple([proc(row) for proc in procs]) + return proc + + +class _BundleEntity(_QueryEntity): + use_id_for_hash = False + + def __init__(self, query, bundle, setup_entities=True): + query._entities.append(self) + self.bundle = self.expr = bundle + self.type = type(bundle) + self._label_name = bundle.name + self._entities = [] + + if setup_entities: + for expr in bundle.exprs: + if isinstance(expr, Bundle): + _BundleEntity(self, expr) + else: + _ColumnEntity(self, expr, namespace=self) + + self.supports_single_entity = self.bundle.single_entity + + @property + def entities(self): + entities = [] + for ent in self._entities: + entities.extend(ent.entities) + return entities + + @property + def entity_zero(self): + for ent in self._entities: + ezero = ent.entity_zero + if ezero is not None: + return ezero + else: + return None + + def corresponds_to(self, entity): + # TODO: this seems to have no effect for + # _ColumnEntity either + return False + + @property + def entity_zero_or_selectable(self): + for ent in self._entities: + ezero = ent.entity_zero_or_selectable + if ezero is not None: + return ezero + else: + return None + + def adapt_to_selectable(self, query, sel): + c = _BundleEntity(query, self.bundle, setup_entities=False) + # c._label_name = self._label_name + # c.entity_zero = self.entity_zero + # c.entities = self.entities + + for ent in self._entities: + ent.adapt_to_selectable(c, sel) + + def setup_entity(self, ext_info, aliased_adapter): + for ent in self._entities: + ent.setup_entity(ext_info, aliased_adapter) + + def setup_context(self, query, context): + for ent in self._entities: + ent.setup_context(query, context) + + def row_processor(self, query, context, result): + procs, labels = zip( + *[ent.row_processor(query, context, result) + for ent in self._entities] + ) + + proc = self.bundle.create_row_processor(query, procs, labels) + + return proc, self._label_name + + +class _ColumnEntity(_QueryEntity): + """Column/expression based entity.""" + + def __init__(self, query, column, namespace=None): + self.expr = column + self.namespace = namespace + search_entities = True + check_column = False + + if isinstance(column, util.string_types): + column = sql.literal_column(column) + self._label_name = column.name + search_entities = False + check_column = True + _entity = None + elif isinstance(column, ( + attributes.QueryableAttribute, + interfaces.PropComparator + )): + _entity = getattr(column, '_parententity', None) + if _entity is not None: + search_entities = False + self._label_name = column.key + column = column._query_clause_element() + check_column = True + if isinstance(column, Bundle): + _BundleEntity(query, column) + return + + if not isinstance(column, sql.ColumnElement): + if hasattr(column, '_select_iterable'): + # break out an object like Table into + # individual columns + for c in column._select_iterable: + if c is column: + break + _ColumnEntity(query, c, namespace=column) + else: + return + + raise sa_exc.InvalidRequestError( + "SQL expression, column, or mapped entity " + "expected - got '%r'" % (column, ) + ) + elif not check_column: + self._label_name = getattr(column, 'key', None) + search_entities = True + + self.type = type_ = column.type + self.use_id_for_hash = not type_.hashable + + # If the Column is unnamed, give it a + # label() so that mutable column expressions + # can be located in the result even + # if the expression's identity has been changed + # due to adaption. + + if not column._label and not getattr(column, 'is_literal', False): + column = column.label(self._label_name) + + query._entities.append(self) + + self.column = column + self.froms = set() + + # look for ORM entities represented within the + # given expression. Try to count only entities + # for columns whose FROM object is in the actual list + # of FROMs for the overall expression - this helps + # subqueries which were built from ORM constructs from + # leaking out their entities into the main select construct + self.actual_froms = actual_froms = set(column._from_objects) + + if not search_entities: + self.entity_zero = _entity + if _entity: + self.entities = [_entity] + self.mapper = _entity.mapper + else: + self.entities = [] + self.mapper = None + self._from_entities = set(self.entities) + else: + all_elements = [ + elem for elem in sql_util.surface_column_elements(column) + if 'parententity' in elem._annotations + ] + + self.entities = util.unique_list([ + elem._annotations['parententity'] + for elem in all_elements + if 'parententity' in elem._annotations + ]) + + self._from_entities = set([ + elem._annotations['parententity'] + for elem in all_elements + if 'parententity' in elem._annotations + and actual_froms.intersection(elem._from_objects) + ]) + if self.entities: + self.entity_zero = self.entities[0] + self.mapper = self.entity_zero.mapper + elif self.namespace is not None: + self.entity_zero = self.namespace + self.mapper = None + else: + self.entity_zero = None + self.mapper = None + + supports_single_entity = False + + @property + def entity_zero_or_selectable(self): + if self.entity_zero is not None: + return self.entity_zero + elif self.actual_froms: + return list(self.actual_froms)[0] + else: + return None + + def adapt_to_selectable(self, query, sel): + c = _ColumnEntity(query, sel.corresponding_column(self.column)) + c._label_name = self._label_name + c.entity_zero = self.entity_zero + c.entities = self.entities + + def setup_entity(self, ext_info, aliased_adapter): + if 'selectable' not in self.__dict__: + self.selectable = ext_info.selectable + + if self.actual_froms.intersection(ext_info.selectable._from_objects): + self.froms.add(ext_info.selectable) + + def corresponds_to(self, entity): + # TODO: just returning False here, + # no tests fail + if self.entity_zero is None: + return False + elif _is_aliased_class(entity): + # TODO: polymorphic subclasses ? + return entity is self.entity_zero + else: + return not _is_aliased_class(self.entity_zero) and \ + entity.common_parent(self.entity_zero) + + def row_processor(self, query, context, result): + if ('fetch_column', self) in context.attributes: + column = context.attributes[('fetch_column', self)] + else: + column = query._adapt_clause(self.column, False, True) + + if context.adapter: + column = context.adapter.columns[column] + + getter = result._getter(column) + return getter, self._label_name + + def setup_context(self, query, context): + column = query._adapt_clause(self.column, False, True) + context.froms += tuple(self.froms) + context.primary_columns.append(column) + + context.attributes[('fetch_column', self)] = column + + def __str__(self): + return str(self.column) + + +class QueryContext(object): + __slots__ = ( + 'multi_row_eager_loaders', 'adapter', 'froms', 'for_update', + 'query', 'session', 'autoflush', 'populate_existing', + 'invoke_all_eagers', 'version_check', 'refresh_state', + 'primary_columns', 'secondary_columns', 'eager_order_by', + 'eager_joins', 'create_eager_joins', 'propagate_options', + 'attributes', 'statement', 'from_clause', 'whereclause', + 'order_by', 'labels', '_for_update_arg', 'runid', 'partials' + ) + + def __init__(self, query): + + if query._statement is not None: + if isinstance(query._statement, expression.SelectBase) and \ + not query._statement._textual and \ + not query._statement.use_labels: + self.statement = query._statement.apply_labels() + else: + self.statement = query._statement + else: + self.statement = None + self.from_clause = query._from_obj + self.whereclause = query._criterion + self.order_by = query._order_by + + self.multi_row_eager_loaders = False + self.adapter = None + self.froms = () + self.for_update = None + self.query = query + self.session = query.session + self.autoflush = query._autoflush + self.populate_existing = query._populate_existing + self.invoke_all_eagers = query._invoke_all_eagers + self.version_check = query._version_check + self.refresh_state = query._refresh_state + self.primary_columns = [] + self.secondary_columns = [] + self.eager_order_by = [] + self.eager_joins = {} + self.create_eager_joins = [] + self.propagate_options = set(o for o in query._with_options if + o.propagate_to_loaders) + self.attributes = query._attributes.copy() + + +class AliasOption(interfaces.MapperOption): + + def __init__(self, alias): + r"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query` + that the main table has been aliased. + + This is a seldom-used option to suit the + very rare case that :func:`.contains_eager` + is being used in conjunction with a user-defined SELECT + statement that aliases the parent table. E.g.:: + + # define an aliased UNION called 'ulist' + ulist = users.select(users.c.user_id==7).\ + union(users.select(users.c.user_id>7)).\ + alias('ulist') + + # add on an eager load of "addresses" + statement = ulist.outerjoin(addresses).\ + select().apply_labels() + + # create query, indicating "ulist" will be an + # alias for the main table, "addresses" + # property should be eager loaded + query = session.query(User).options( + contains_alias(ulist), + contains_eager(User.addresses)) + + # then get results via the statement + results = query.from_statement(statement).all() + + :param alias: is the string name of an alias, or a + :class:`~.sql.expression.Alias` object representing + the alias. + + """ + self.alias = alias + + def process_query(self, query): + if isinstance(self.alias, util.string_types): + alias = query._mapper_zero().mapped_table.alias(self.alias) + else: + alias = self.alias + query._from_obj_alias = sql_util.ColumnAdapter(alias) diff --git a/app/lib/sqlalchemy/orm/relationships.py b/app/lib/sqlalchemy/orm/relationships.py new file mode 100644 index 0000000..1298e88 --- /dev/null +++ b/app/lib/sqlalchemy/orm/relationships.py @@ -0,0 +1,2875 @@ +# orm/relationships.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Heuristics related to join conditions as used in +:func:`.relationship`. + +Provides the :class:`.JoinCondition` object, which encapsulates +SQL annotation and aliasing behavior focused on the `primaryjoin` +and `secondaryjoin` aspects of :func:`.relationship`. + +""" +from __future__ import absolute_import +from .. import sql, util, exc as sa_exc, schema, log + +import weakref +from .util import CascadeOptions, _orm_annotate, _orm_deannotate +from . import dependency +from . import attributes +from ..sql.util import ( + ClauseAdapter, + join_condition, _shallow_annotate, visit_binary_product, + _deep_deannotate, selectables_overlap, adapt_criterion_to_null +) +from ..sql import operators, expression, visitors +from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY, + StrategizedProperty, PropComparator) +from ..inspection import inspect +from . import mapper as mapperlib +import collections + + +def remote(expr): + """Annotate a portion of a primaryjoin expression + with a 'remote' annotation. + + See the section :ref:`relationship_custom_foreign` for a + description of use. + + .. versionadded:: 0.8 + + .. seealso:: + + :ref:`relationship_custom_foreign` + + :func:`.foreign` + + """ + return _annotate_columns(expression._clause_element_as_expr(expr), + {"remote": True}) + + +def foreign(expr): + """Annotate a portion of a primaryjoin expression + with a 'foreign' annotation. + + See the section :ref:`relationship_custom_foreign` for a + description of use. + + .. versionadded:: 0.8 + + .. seealso:: + + :ref:`relationship_custom_foreign` + + :func:`.remote` + + """ + + return _annotate_columns(expression._clause_element_as_expr(expr), + {"foreign": True}) + + +@log.class_logger +@util.langhelpers.dependency_for("sqlalchemy.orm.properties") +class RelationshipProperty(StrategizedProperty): + """Describes an object property that holds a single item or list + of items that correspond to a related database table. + + Public constructor is the :func:`.orm.relationship` function. + + See also: + + :ref:`relationship_config_toplevel` + + """ + + strategy_wildcard_key = 'relationship' + + _dependency_processor = None + + def __init__(self, argument, + secondary=None, primaryjoin=None, + secondaryjoin=None, + foreign_keys=None, + uselist=None, + order_by=False, + backref=None, + back_populates=None, + post_update=False, + cascade=False, extension=None, + viewonly=False, lazy=True, + collection_class=None, passive_deletes=False, + passive_updates=True, remote_side=None, + enable_typechecks=True, join_depth=None, + comparator_factory=None, + single_parent=False, innerjoin=False, + distinct_target_key=None, + doc=None, + active_history=False, + cascade_backrefs=True, + load_on_pending=False, + bake_queries=True, + _local_remote_pairs=None, + query_class=None, + info=None): + """Provide a relationship between two mapped classes. + + This corresponds to a parent-child or associative table relationship. + The constructed class is an instance of + :class:`.RelationshipProperty`. + + A typical :func:`.relationship`, used in a classical mapping:: + + mapper(Parent, properties={ + 'children': relationship(Child) + }) + + Some arguments accepted by :func:`.relationship` optionally accept a + callable function, which when called produces the desired value. + The callable is invoked by the parent :class:`.Mapper` at "mapper + initialization" time, which happens only when mappers are first used, + and is assumed to be after all mappings have been constructed. This + can be used to resolve order-of-declaration and other dependency + issues, such as if ``Child`` is declared below ``Parent`` in the same + file:: + + mapper(Parent, properties={ + "children":relationship(lambda: Child, + order_by=lambda: Child.id) + }) + + When using the :ref:`declarative_toplevel` extension, the Declarative + initializer allows string arguments to be passed to + :func:`.relationship`. These string arguments are converted into + callables that evaluate the string as Python code, using the + Declarative class-registry as a namespace. This allows the lookup of + related classes to be automatic via their string name, and removes the + need to import related classes at all into the local module space:: + + from sqlalchemy.ext.declarative import declarative_base + + Base = declarative_base() + + class Parent(Base): + __tablename__ = 'parent' + id = Column(Integer, primary_key=True) + children = relationship("Child", order_by="Child.id") + + .. seealso:: + + :ref:`relationship_config_toplevel` - Full introductory and + reference documentation for :func:`.relationship`. + + :ref:`orm_tutorial_relationship` - ORM tutorial introduction. + + :param argument: + a mapped class, or actual :class:`.Mapper` instance, representing + the target of the relationship. + + :paramref:`~.relationship.argument` may also be passed as a callable + function which is evaluated at mapper initialization time, and may + be passed as a Python-evaluable string when using Declarative. + + .. seealso:: + + :ref:`declarative_configuring_relationships` - further detail + on relationship configuration when using Declarative. + + :param secondary: + for a many-to-many relationship, specifies the intermediary + table, and is typically an instance of :class:`.Table`. + In less common circumstances, the argument may also be specified + as an :class:`.Alias` construct, or even a :class:`.Join` construct. + + :paramref:`~.relationship.secondary` may + also be passed as a callable function which is evaluated at + mapper initialization time. When using Declarative, it may also + be a string argument noting the name of a :class:`.Table` that is + present in the :class:`.MetaData` collection associated with the + parent-mapped :class:`.Table`. + + The :paramref:`~.relationship.secondary` keyword argument is + typically applied in the case where the intermediary :class:`.Table` + is not otherwise expressed in any direct class mapping. If the + "secondary" table is also explicitly mapped elsewhere (e.g. as in + :ref:`association_pattern`), one should consider applying the + :paramref:`~.relationship.viewonly` flag so that this + :func:`.relationship` is not used for persistence operations which + may conflict with those of the association object pattern. + + .. seealso:: + + :ref:`relationships_many_to_many` - Reference example of "many + to many". + + :ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to + many-to-many relationships. + + :ref:`self_referential_many_to_many` - Specifics on using + many-to-many in a self-referential case. + + :ref:`declarative_many_to_many` - Additional options when using + Declarative. + + :ref:`association_pattern` - an alternative to + :paramref:`~.relationship.secondary` when composing association + table relationships, allowing additional attributes to be + specified on the association table. + + :ref:`composite_secondary_join` - a lesser-used pattern which + in some cases can enable complex :func:`.relationship` SQL + conditions to be used. + + .. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works + more effectively when referring to a :class:`.Join` instance. + + :param active_history=False: + When ``True``, indicates that the "previous" value for a + many-to-one reference should be loaded when replaced, if + not already loaded. Normally, history tracking logic for + simple many-to-ones only needs to be aware of the "new" + value in order to perform a flush. This flag is available + for applications that make use of + :func:`.attributes.get_history` which also need to know + the "previous" value of the attribute. + + :param backref: + indicates the string name of a property to be placed on the related + mapper's class that will handle this relationship in the other + direction. The other property will be created automatically + when the mappers are configured. Can also be passed as a + :func:`.backref` object to control the configuration of the + new relationship. + + .. seealso:: + + :ref:`relationships_backref` - Introductory documentation and + examples. + + :paramref:`~.relationship.back_populates` - alternative form + of backref specification. + + :func:`.backref` - allows control over :func:`.relationship` + configuration when using :paramref:`~.relationship.backref`. + + + :param back_populates: + Takes a string name and has the same meaning as + :paramref:`~.relationship.backref`, except the complementing + property is **not** created automatically, and instead must be + configured explicitly on the other mapper. The complementing + property should also indicate + :paramref:`~.relationship.back_populates` to this relationship to + ensure proper functioning. + + .. seealso:: + + :ref:`relationships_backref` - Introductory documentation and + examples. + + :paramref:`~.relationship.backref` - alternative form + of backref specification. + + :param bake_queries=True: + Use the :class:`.BakedQuery` cache to cache the construction of SQL + used in lazy loads, when the :func:`.bake_lazy_loaders` function has + first been called. Defaults to True and is intended to provide an + "opt out" flag per-relationship when the baked query cache system is + in use. + + .. warning:: + + This flag **only** has an effect when the application-wide + :func:`.bake_lazy_loaders` function has been called. It + defaults to True so is an "opt out" flag. + + Setting this flag to False when baked queries are otherwise in + use might be to reduce + ORM memory use for this :func:`.relationship`, or to work around + unresolved stability issues observed within the baked query + cache system. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :ref:`baked_toplevel` + + :param cascade: + a comma-separated list of cascade rules which determines how + Session operations should be "cascaded" from parent to child. + This defaults to ``False``, which means the default cascade + should be used - this default cascade is ``"save-update, merge"``. + + The available cascades are ``save-update``, ``merge``, + ``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``. + An additional option, ``all`` indicates shorthand for + ``"save-update, merge, refresh-expire, + expunge, delete"``, and is often used as in ``"all, delete-orphan"`` + to indicate that related objects should follow along with the + parent object in all cases, and be deleted when de-associated. + + .. seealso:: + + :ref:`unitofwork_cascades` - Full detail on each of the available + cascade options. + + :ref:`tutorial_delete_cascade` - Tutorial example describing + a delete cascade. + + :param cascade_backrefs=True: + a boolean value indicating if the ``save-update`` cascade should + operate along an assignment event intercepted by a backref. + When set to ``False``, the attribute managed by this relationship + will not cascade an incoming transient object into the session of a + persistent parent, if the event is received via backref. + + .. seealso:: + + :ref:`backref_cascade` - Full discussion and examples on how + the :paramref:`~.relationship.cascade_backrefs` option is used. + + :param collection_class: + a class or callable that returns a new list-holding object. will + be used in place of a plain list for storing elements. + + .. seealso:: + + :ref:`custom_collections` - Introductory documentation and + examples. + + :param comparator_factory: + a class which extends :class:`.RelationshipProperty.Comparator` + which provides custom SQL clause generation for comparison + operations. + + .. seealso:: + + :class:`.PropComparator` - some detail on redefining comparators + at this level. + + :ref:`custom_comparators` - Brief intro to this feature. + + + :param distinct_target_key=None: + Indicate if a "subquery" eager load should apply the DISTINCT + keyword to the innermost SELECT statement. When left as ``None``, + the DISTINCT keyword will be applied in those cases when the target + columns do not comprise the full primary key of the target table. + When set to ``True``, the DISTINCT keyword is applied to the + innermost SELECT unconditionally. + + It may be desirable to set this flag to False when the DISTINCT is + reducing performance of the innermost subquery beyond that of what + duplicate innermost rows may be causing. + + .. versionadded:: 0.8.3 - + :paramref:`~.relationship.distinct_target_key` allows the + subquery eager loader to apply a DISTINCT modifier to the + innermost SELECT. + + .. versionchanged:: 0.9.0 - + :paramref:`~.relationship.distinct_target_key` now defaults to + ``None``, so that the feature enables itself automatically for + those cases where the innermost query targets a non-unique + key. + + .. seealso:: + + :ref:`loading_toplevel` - includes an introduction to subquery + eager loading. + + :param doc: + docstring which will be applied to the resulting descriptor. + + :param extension: + an :class:`.AttributeExtension` instance, or list of extensions, + which will be prepended to the list of attribute listeners for + the resulting descriptor placed on the class. + + .. deprecated:: 0.7 Please see :class:`.AttributeEvents`. + + :param foreign_keys: + + a list of columns which are to be used as "foreign key" + columns, or columns which refer to the value in a remote + column, within the context of this :func:`.relationship` + object's :paramref:`~.relationship.primaryjoin` condition. + That is, if the :paramref:`~.relationship.primaryjoin` + condition of this :func:`.relationship` is ``a.id == + b.a_id``, and the values in ``b.a_id`` are required to be + present in ``a.id``, then the "foreign key" column of this + :func:`.relationship` is ``b.a_id``. + + In normal cases, the :paramref:`~.relationship.foreign_keys` + parameter is **not required.** :func:`.relationship` will + automatically determine which columns in the + :paramref:`~.relationship.primaryjoin` conditition are to be + considered "foreign key" columns based on those + :class:`.Column` objects that specify :class:`.ForeignKey`, + or are otherwise listed as referencing columns in a + :class:`.ForeignKeyConstraint` construct. + :paramref:`~.relationship.foreign_keys` is only needed when: + + 1. There is more than one way to construct a join from the local + table to the remote table, as there are multiple foreign key + references present. Setting ``foreign_keys`` will limit the + :func:`.relationship` to consider just those columns specified + here as "foreign". + + .. versionchanged:: 0.8 + A multiple-foreign key join ambiguity can be resolved by + setting the :paramref:`~.relationship.foreign_keys` + parameter alone, without the need to explicitly set + :paramref:`~.relationship.primaryjoin` as well. + + 2. The :class:`.Table` being mapped does not actually have + :class:`.ForeignKey` or :class:`.ForeignKeyConstraint` + constructs present, often because the table + was reflected from a database that does not support foreign key + reflection (MySQL MyISAM). + + 3. The :paramref:`~.relationship.primaryjoin` argument is used to + construct a non-standard join condition, which makes use of + columns or expressions that do not normally refer to their + "parent" column, such as a join condition expressed by a + complex comparison using a SQL function. + + The :func:`.relationship` construct will raise informative + error messages that suggest the use of the + :paramref:`~.relationship.foreign_keys` parameter when + presented with an ambiguous condition. In typical cases, + if :func:`.relationship` doesn't raise any exceptions, the + :paramref:`~.relationship.foreign_keys` parameter is usually + not needed. + + :paramref:`~.relationship.foreign_keys` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. seealso:: + + :ref:`relationship_foreign_keys` + + :ref:`relationship_custom_foreign` + + :func:`.foreign` - allows direct annotation of the "foreign" + columns within a :paramref:`~.relationship.primaryjoin` condition. + + .. versionadded:: 0.8 + The :func:`.foreign` annotation can also be applied + directly to the :paramref:`~.relationship.primaryjoin` + expression, which is an alternate, more specific system of + describing which columns in a particular + :paramref:`~.relationship.primaryjoin` should be considered + "foreign". + + :param info: Optional data dictionary which will be populated into the + :attr:`.MapperProperty.info` attribute of this object. + + .. versionadded:: 0.8 + + :param innerjoin=False: + when ``True``, joined eager loads will use an inner join to join + against related tables instead of an outer join. The purpose + of this option is generally one of performance, as inner joins + generally perform better than outer joins. + + This flag can be set to ``True`` when the relationship references an + object via many-to-one using local foreign keys that are not + nullable, or when the reference is one-to-one or a collection that + is guaranteed to have one or at least one entry. + + The option supports the same "nested" and "unnested" options as + that of :paramref:`.joinedload.innerjoin`. See that flag + for details on nested / unnested behaviors. + + .. seealso:: + + :paramref:`.joinedload.innerjoin` - the option as specified by + loader option, including detail on nesting behavior. + + :ref:`what_kind_of_loading` - Discussion of some details of + various loader options. + + + :param join_depth: + when non-``None``, an integer value indicating how many levels + deep "eager" loaders should join on a self-referring or cyclical + relationship. The number counts how many times the same Mapper + shall be present in the loading condition along a particular join + branch. When left at its default of ``None``, eager loaders + will stop chaining when they encounter a the same target mapper + which is already higher up in the chain. This option applies + both to joined- and subquery- eager loaders. + + .. seealso:: + + :ref:`self_referential_eager_loading` - Introductory documentation + and examples. + + :param lazy='select': specifies + how the related items should be loaded. Default value is + ``select``. Values include: + + * ``select`` - items should be loaded lazily when the property is + first accessed, using a separate SELECT statement, or identity map + fetch for simple many-to-one references. + + * ``immediate`` - items should be loaded as the parents are loaded, + using a separate SELECT statement, or identity map fetch for + simple many-to-one references. + + * ``joined`` - items should be loaded "eagerly" in the same query as + that of the parent, using a JOIN or LEFT OUTER JOIN. Whether + the join is "outer" or not is determined by the + :paramref:`~.relationship.innerjoin` parameter. + + * ``subquery`` - items should be loaded "eagerly" as the parents are + loaded, using one additional SQL statement, which issues a JOIN to + a subquery of the original statement, for each collection + requested. + + * ``noload`` - no loading should occur at any time. This is to + support "write-only" attributes, or attributes which are + populated in some manner specific to the application. + + * ``raise`` - lazy loading is disallowed; accessing + the attribute, if its value were not already loaded via eager + loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`. + This strategy can be used when objects are to be detached from + their attached :class:`.Session` after they are loaded. + + .. versionadded:: 1.1 + + * ``raise_on_sql`` - lazy loading that emits SQL is disallowed; + accessing the attribute, if its value were not already loaded via + eager loading, will raise an + :exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load + needs to emit SQL**. If the lazy load can pull the related value + from the identity map or determine that it should be None, the + value is loaded. This strategy can be used when objects will + remain associated with the attached :class:`.Session`, however + additional SELECT statements should be blocked. + + .. versionadded:: 1.1 + + * ``dynamic`` - the attribute will return a pre-configured + :class:`.Query` object for all read + operations, onto which further filtering operations can be + applied before iterating the results. See + the section :ref:`dynamic_relationship` for more details. + + * True - a synonym for 'select' + + * False - a synonym for 'joined' + + * None - a synonym for 'noload' + + .. seealso:: + + :doc:`/orm/loading_relationships` - Full documentation on relationship loader + configuration. + + :ref:`dynamic_relationship` - detail on the ``dynamic`` option. + + :ref:`collections_noload_raiseload` - notes on "noload" and "raise" + + :param load_on_pending=False: + Indicates loading behavior for transient or pending parent objects. + + When set to ``True``, causes the lazy-loader to + issue a query for a parent object that is not persistent, meaning it + has never been flushed. This may take effect for a pending object + when autoflush is disabled, or for a transient object that has been + "attached" to a :class:`.Session` but is not part of its pending + collection. + + The :paramref:`~.relationship.load_on_pending` flag does not improve + behavior when the ORM is used normally - object references should be + constructed at the object level, not at the foreign key level, so + that they are present in an ordinary way before a flush proceeds. + This flag is not not intended for general use. + + .. seealso:: + + :meth:`.Session.enable_relationship_loading` - this method + establishes "load on pending" behavior for the whole object, and + also allows loading on objects that remain transient or + detached. + + :param order_by: + indicates the ordering that should be applied when loading these + items. :paramref:`~.relationship.order_by` is expected to refer to + one of the :class:`.Column` objects to which the target class is + mapped, or the attribute itself bound to the target class which + refers to the column. + + :paramref:`~.relationship.order_by` may also be passed as a callable + function which is evaluated at mapper initialization time, and may + be passed as a Python-evaluable string when using Declarative. + + :param passive_deletes=False: + Indicates loading behavior during delete operations. + + A value of True indicates that unloaded child items should not + be loaded during a delete operation on the parent. Normally, + when a parent item is deleted, all child items are loaded so + that they can either be marked as deleted, or have their + foreign key to the parent set to NULL. Marking this flag as + True usually implies an ON DELETE rule is in + place which will handle updating/deleting child rows on the + database side. + + Additionally, setting the flag to the string value 'all' will + disable the "nulling out" of the child foreign keys, when there + is no delete or delete-orphan cascade enabled. This is + typically used when a triggering or error raise scenario is in + place on the database side. Note that the foreign key + attributes on in-session child objects will not be changed + after a flush occurs so this is a very special use-case + setting. + + .. seealso:: + + :ref:`passive_deletes` - Introductory documentation + and examples. + + :param passive_updates=True: + Indicates the persistence behavior to take when a referenced + primary key value changes in place, indicating that the referencing + foreign key columns will also need their value changed. + + When True, it is assumed that ``ON UPDATE CASCADE`` is configured on + the foreign key in the database, and that the database will + handle propagation of an UPDATE from a source column to + dependent rows. When False, the SQLAlchemy :func:`.relationship` + construct will attempt to emit its own UPDATE statements to + modify related targets. However note that SQLAlchemy **cannot** + emit an UPDATE for more than one level of cascade. Also, + setting this flag to False is not compatible in the case where + the database is in fact enforcing referential integrity, unless + those constraints are explicitly "deferred", if the target backend + supports it. + + It is highly advised that an application which is employing + mutable primary keys keeps ``passive_updates`` set to True, + and instead uses the referential integrity features of the database + itself in order to handle the change efficiently and fully. + + .. seealso:: + + :ref:`passive_updates` - Introductory documentation and + examples. + + :paramref:`.mapper.passive_updates` - a similar flag which + takes effect for joined-table inheritance mappings. + + :param post_update: + this indicates that the relationship should be handled by a + second UPDATE statement after an INSERT or before a + DELETE. Currently, it also will issue an UPDATE after the + instance was UPDATEd as well, although this technically should + be improved. This flag is used to handle saving bi-directional + dependencies between two individual rows (i.e. each row + references the other), where it would otherwise be impossible to + INSERT or DELETE both rows fully since one row exists before the + other. Use this flag when a particular mapping arrangement will + incur two rows that are dependent on each other, such as a table + that has a one-to-many relationship to a set of child rows, and + also has a column that references a single child row within that + list (i.e. both tables contain a foreign key to each other). If + a flush operation returns an error that a "cyclical + dependency" was detected, this is a cue that you might want to + use :paramref:`~.relationship.post_update` to "break" the cycle. + + .. seealso:: + + :ref:`post_update` - Introductory documentation and examples. + + :param primaryjoin: + a SQL expression that will be used as the primary + join of this child object against the parent object, or in a + many-to-many relationship the join of the primary object to the + association table. By default, this value is computed based on the + foreign key relationships of the parent and child tables (or + association table). + + :paramref:`~.relationship.primaryjoin` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. seealso:: + + :ref:`relationship_primaryjoin` + + :param remote_side: + used for self-referential relationships, indicates the column or + list of columns that form the "remote side" of the relationship. + + :paramref:`.relationship.remote_side` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. versionchanged:: 0.8 + The :func:`.remote` annotation can also be applied + directly to the ``primaryjoin`` expression, which is an + alternate, more specific system of describing which columns in a + particular ``primaryjoin`` should be considered "remote". + + .. seealso:: + + :ref:`self_referential` - in-depth explanation of how + :paramref:`~.relationship.remote_side` + is used to configure self-referential relationships. + + :func:`.remote` - an annotation function that accomplishes the + same purpose as :paramref:`~.relationship.remote_side`, typically + when a custom :paramref:`~.relationship.primaryjoin` condition + is used. + + :param query_class: + a :class:`.Query` subclass that will be used as the base of the + "appender query" returned by a "dynamic" relationship, that + is, a relationship that specifies ``lazy="dynamic"`` or was + otherwise constructed using the :func:`.orm.dynamic_loader` + function. + + .. seealso:: + + :ref:`dynamic_relationship` - Introduction to "dynamic" + relationship loaders. + + :param secondaryjoin: + a SQL expression that will be used as the join of + an association table to the child object. By default, this value is + computed based on the foreign key relationships of the association + and child tables. + + :paramref:`~.relationship.secondaryjoin` may also be passed as a + callable function which is evaluated at mapper initialization time, + and may be passed as a Python-evaluable string when using + Declarative. + + .. seealso:: + + :ref:`relationship_primaryjoin` + + :param single_parent: + when True, installs a validator which will prevent objects + from being associated with more than one parent at a time. + This is used for many-to-one or many-to-many relationships that + should be treated either as one-to-one or one-to-many. Its usage + is optional, except for :func:`.relationship` constructs which + are many-to-one or many-to-many and also + specify the ``delete-orphan`` cascade option. The + :func:`.relationship` construct itself will raise an error + instructing when this option is required. + + .. seealso:: + + :ref:`unitofwork_cascades` - includes detail on when the + :paramref:`~.relationship.single_parent` flag may be appropriate. + + :param uselist: + a boolean that indicates if this property should be loaded as a + list or a scalar. In most cases, this value is determined + automatically by :func:`.relationship` at mapper configuration + time, based on the type and direction + of the relationship - one to many forms a list, many to one + forms a scalar, many to many is a list. If a scalar is desired + where normally a list would be present, such as a bi-directional + one-to-one relationship, set :paramref:`~.relationship.uselist` to + False. + + The :paramref:`~.relationship.uselist` flag is also available on an + existing :func:`.relationship` construct as a read-only attribute, + which can be used to determine if this :func:`.relationship` deals + with collections or scalar attributes:: + + >>> User.addresses.property.uselist + True + + .. seealso:: + + :ref:`relationships_one_to_one` - Introduction to the "one to + one" relationship pattern, which is typically when the + :paramref:`~.relationship.uselist` flag is needed. + + :param viewonly=False: + when set to True, the relationship is used only for loading objects, + and not for any persistence operation. A :func:`.relationship` + which specifies :paramref:`~.relationship.viewonly` can work + with a wider range of SQL operations within the + :paramref:`~.relationship.primaryjoin` condition, including + operations that feature the use of a variety of comparison operators + as well as SQL functions such as :func:`~.sql.expression.cast`. The + :paramref:`~.relationship.viewonly` flag is also of general use when + defining any kind of :func:`~.relationship` that doesn't represent + the full set of related objects, to prevent modifications of the + collection from resulting in persistence operations. + + + """ + super(RelationshipProperty, self).__init__() + + self.uselist = uselist + self.argument = argument + self.secondary = secondary + self.primaryjoin = primaryjoin + self.secondaryjoin = secondaryjoin + self.post_update = post_update + self.direction = None + self.viewonly = viewonly + self.lazy = lazy + self.single_parent = single_parent + self._user_defined_foreign_keys = foreign_keys + self.collection_class = collection_class + self.passive_deletes = passive_deletes + self.cascade_backrefs = cascade_backrefs + self.passive_updates = passive_updates + self.remote_side = remote_side + self.enable_typechecks = enable_typechecks + self.query_class = query_class + self.innerjoin = innerjoin + self.distinct_target_key = distinct_target_key + self.doc = doc + self.active_history = active_history + self.join_depth = join_depth + self.local_remote_pairs = _local_remote_pairs + self.extension = extension + self.bake_queries = bake_queries + self.load_on_pending = load_on_pending + self.comparator_factory = comparator_factory or \ + RelationshipProperty.Comparator + self.comparator = self.comparator_factory(self, None) + util.set_creation_order(self) + + if info is not None: + self.info = info + + self.strategy_key = (("lazy", self.lazy), ) + + self._reverse_property = set() + + self.cascade = cascade if cascade is not False \ + else "save-update, merge" + + self.order_by = order_by + + self.back_populates = back_populates + + if self.back_populates: + if backref: + raise sa_exc.ArgumentError( + "backref and back_populates keyword arguments " + "are mutually exclusive") + self.backref = None + else: + self.backref = backref + + def instrument_class(self, mapper): + attributes.register_descriptor( + mapper.class_, + self.key, + comparator=self.comparator_factory(self, mapper), + parententity=mapper, + doc=self.doc, + ) + + class Comparator(PropComparator): + """Produce boolean, comparison, and other operators for + :class:`.RelationshipProperty` attributes. + + See the documentation for :class:`.PropComparator` for a brief + overview of ORM level operator definition. + + See also: + + :class:`.PropComparator` + + :class:`.ColumnProperty.Comparator` + + :class:`.ColumnOperators` + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + """ + + _of_type = None + + def __init__( + self, prop, parentmapper, adapt_to_entity=None, of_type=None): + """Construction of :class:`.RelationshipProperty.Comparator` + is internal to the ORM's attribute mechanics. + + """ + self.prop = prop + self._parententity = parentmapper + self._adapt_to_entity = adapt_to_entity + if of_type: + self._of_type = of_type + + def adapt_to_entity(self, adapt_to_entity): + return self.__class__(self.property, self._parententity, + adapt_to_entity=adapt_to_entity, + of_type=self._of_type) + + @util.memoized_property + def mapper(self): + """The target :class:`.Mapper` referred to by this + :class:`.RelationshipProperty.Comparator`. + + This is the "target" or "remote" side of the + :func:`.relationship`. + + """ + return self.property.mapper + + @util.memoized_property + def _parententity(self): + return self.property.parent + + def _source_selectable(self): + if self._adapt_to_entity: + return self._adapt_to_entity.selectable + else: + return self.property.parent._with_polymorphic_selectable + + def __clause_element__(self): + adapt_from = self._source_selectable() + if self._of_type: + of_type = inspect(self._of_type).mapper + else: + of_type = None + + pj, sj, source, dest, \ + secondary, target_adapter = self.property._create_joins( + source_selectable=adapt_from, + source_polymorphic=True, + of_type=of_type) + if sj is not None: + return pj & sj + else: + return pj + + def of_type(self, cls): + """Produce a construct that represents a particular 'subtype' of + attribute for the parent class. + + Currently this is usable in conjunction with :meth:`.Query.join` + and :meth:`.Query.outerjoin`. + + """ + return RelationshipProperty.Comparator( + self.property, + self._parententity, + adapt_to_entity=self._adapt_to_entity, + of_type=cls) + + def in_(self, other): + """Produce an IN clause - this is not implemented + for :func:`~.orm.relationship`-based attributes at this time. + + """ + raise NotImplementedError('in_() not yet supported for ' + 'relationships. For a simple ' + 'many-to-one, use in_() against ' + 'the set of foreign key values.') + + __hash__ = None + + def __eq__(self, other): + """Implement the ``==`` operator. + + In a many-to-one context, such as:: + + MyClass.some_prop == + + this will typically produce a + clause such as:: + + mytable.related_id == + + Where ```` is the primary key of the given + object. + + The ``==`` operator provides partial functionality for non- + many-to-one comparisons: + + * Comparisons against collections are not supported. + Use :meth:`~.RelationshipProperty.Comparator.contains`. + * Compared to a scalar one-to-many, will produce a + clause that compares the target columns in the parent to + the given target. + * Compared to a scalar many-to-many, an alias + of the association table will be rendered as + well, forming a natural join that is part of the + main body of the query. This will not work for + queries that go beyond simple AND conjunctions of + comparisons, such as those which use OR. Use + explicit joins, outerjoins, or + :meth:`~.RelationshipProperty.Comparator.has` for + more comprehensive non-many-to-one scalar + membership tests. + * Comparisons against ``None`` given in a one-to-many + or many-to-many context produce a NOT EXISTS clause. + + """ + if isinstance(other, (util.NoneType, expression.Null)): + if self.property.direction in [ONETOMANY, MANYTOMANY]: + return ~self._criterion_exists() + else: + return _orm_annotate(self.property._optimized_compare( + None, adapt_source=self.adapter)) + elif self.property.uselist: + raise sa_exc.InvalidRequestError( + "Can't compare a collection to an object or collection; " + "use contains() to test for membership.") + else: + return _orm_annotate( + self.property._optimized_compare( + other, adapt_source=self.adapter)) + + def _criterion_exists(self, criterion=None, **kwargs): + if getattr(self, '_of_type', None): + info = inspect(self._of_type) + target_mapper, to_selectable, is_aliased_class = \ + info.mapper, info.selectable, info.is_aliased_class + if self.property._is_self_referential and not \ + is_aliased_class: + to_selectable = to_selectable.alias() + + single_crit = target_mapper._single_table_criterion + if single_crit is not None: + if criterion is not None: + criterion = single_crit & criterion + else: + criterion = single_crit + else: + is_aliased_class = False + to_selectable = None + + if self.adapter: + source_selectable = self._source_selectable() + else: + source_selectable = None + + pj, sj, source, dest, secondary, target_adapter = \ + self.property._create_joins( + dest_polymorphic=True, + dest_selectable=to_selectable, + source_selectable=source_selectable) + + for k in kwargs: + crit = getattr(self.property.mapper.class_, k) == kwargs[k] + if criterion is None: + criterion = crit + else: + criterion = criterion & crit + + # annotate the *local* side of the join condition, in the case + # of pj + sj this is the full primaryjoin, in the case of just + # pj its the local side of the primaryjoin. + if sj is not None: + j = _orm_annotate(pj) & sj + else: + j = _orm_annotate(pj, exclude=self.property.remote_side) + + if criterion is not None and target_adapter and not \ + is_aliased_class: + # limit this adapter to annotated only? + criterion = target_adapter.traverse(criterion) + + # only have the "joined left side" of what we + # return be subject to Query adaption. The right + # side of it is used for an exists() subquery and + # should not correlate or otherwise reach out + # to anything in the enclosing query. + if criterion is not None: + criterion = criterion._annotate( + {'no_replacement_traverse': True}) + + crit = j & sql.True_._ifnone(criterion) + + ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest) + if secondary is not None: + ex = ex.correlate_except(secondary) + return ex + + def any(self, criterion=None, **kwargs): + """Produce an expression that tests a collection against + particular criterion, using EXISTS. + + An expression like:: + + session.query(MyClass).filter( + MyClass.somereference.any(SomeRelated.x==2) + ) + + + Will produce a query like:: + + SELECT * FROM my_table WHERE + EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id + AND related.x=2) + + Because :meth:`~.RelationshipProperty.Comparator.any` uses + a correlated subquery, its performance is not nearly as + good when compared against large target tables as that of + using a join. + + :meth:`~.RelationshipProperty.Comparator.any` is particularly + useful for testing for empty collections:: + + session.query(MyClass).filter( + ~MyClass.somereference.any() + ) + + will produce:: + + SELECT * FROM my_table WHERE + NOT EXISTS (SELECT 1 FROM related WHERE + related.my_id=my_table.id) + + :meth:`~.RelationshipProperty.Comparator.any` is only + valid for collections, i.e. a :func:`.relationship` + that has ``uselist=True``. For scalar references, + use :meth:`~.RelationshipProperty.Comparator.has`. + + """ + if not self.property.uselist: + raise sa_exc.InvalidRequestError( + "'any()' not implemented for scalar " + "attributes. Use has()." + ) + + return self._criterion_exists(criterion, **kwargs) + + def has(self, criterion=None, **kwargs): + """Produce an expression that tests a scalar reference against + particular criterion, using EXISTS. + + An expression like:: + + session.query(MyClass).filter( + MyClass.somereference.has(SomeRelated.x==2) + ) + + + Will produce a query like:: + + SELECT * FROM my_table WHERE + EXISTS (SELECT 1 FROM related WHERE + related.id==my_table.related_id AND related.x=2) + + Because :meth:`~.RelationshipProperty.Comparator.has` uses + a correlated subquery, its performance is not nearly as + good when compared against large target tables as that of + using a join. + + :meth:`~.RelationshipProperty.Comparator.has` is only + valid for scalar references, i.e. a :func:`.relationship` + that has ``uselist=False``. For collection references, + use :meth:`~.RelationshipProperty.Comparator.any`. + + """ + if self.property.uselist: + raise sa_exc.InvalidRequestError( + "'has()' not implemented for collections. " + "Use any().") + return self._criterion_exists(criterion, **kwargs) + + def contains(self, other, **kwargs): + """Return a simple expression that tests a collection for + containment of a particular item. + + :meth:`~.RelationshipProperty.Comparator.contains` is + only valid for a collection, i.e. a + :func:`~.orm.relationship` that implements + one-to-many or many-to-many with ``uselist=True``. + + When used in a simple one-to-many context, an + expression like:: + + MyClass.contains(other) + + Produces a clause like:: + + mytable.id == + + Where ```` is the value of the foreign key + attribute on ``other`` which refers to the primary + key of its parent object. From this it follows that + :meth:`~.RelationshipProperty.Comparator.contains` is + very useful when used with simple one-to-many + operations. + + For many-to-many operations, the behavior of + :meth:`~.RelationshipProperty.Comparator.contains` + has more caveats. The association table will be + rendered in the statement, producing an "implicit" + join, that is, includes multiple tables in the FROM + clause which are equated in the WHERE clause:: + + query(MyClass).filter(MyClass.contains(other)) + + Produces a query like:: + + SELECT * FROM my_table, my_association_table AS + my_association_table_1 WHERE + my_table.id = my_association_table_1.parent_id + AND my_association_table_1.child_id = + + Where ```` would be the primary key of + ``other``. From the above, it is clear that + :meth:`~.RelationshipProperty.Comparator.contains` + will **not** work with many-to-many collections when + used in queries that move beyond simple AND + conjunctions, such as multiple + :meth:`~.RelationshipProperty.Comparator.contains` + expressions joined by OR. In such cases subqueries or + explicit "outer joins" will need to be used instead. + See :meth:`~.RelationshipProperty.Comparator.any` for + a less-performant alternative using EXISTS, or refer + to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins` + for more details on constructing outer joins. + + """ + if not self.property.uselist: + raise sa_exc.InvalidRequestError( + "'contains' not implemented for scalar " + "attributes. Use ==") + clause = self.property._optimized_compare( + other, adapt_source=self.adapter) + + if self.property.secondaryjoin is not None: + clause.negation_clause = \ + self.__negated_contains_or_equals(other) + + return clause + + def __negated_contains_or_equals(self, other): + if self.property.direction == MANYTOONE: + state = attributes.instance_state(other) + + def state_bindparam(x, state, col): + dict_ = state.dict + return sql.bindparam( + x, unique=True, + callable_=self.property._get_attr_w_warn_on_none( + col, + self.property.mapper._get_state_attr_by_column, + state, dict_, col, passive=attributes.PASSIVE_OFF + ) + ) + + def adapt(col): + if self.adapter: + return self.adapter(col) + else: + return col + + if self.property._use_get: + return sql.and_(*[ + sql.or_( + adapt(x) != state_bindparam(adapt(x), state, y), + adapt(x) == None) + for (x, y) in self.property.local_remote_pairs]) + + criterion = sql.and_(*[ + x == y for (x, y) in + zip( + self.property.mapper.primary_key, + self.property.mapper.primary_key_from_instance(other) + ) + ]) + + return ~self._criterion_exists(criterion) + + def __ne__(self, other): + """Implement the ``!=`` operator. + + In a many-to-one context, such as:: + + MyClass.some_prop != + + This will typically produce a clause such as:: + + mytable.related_id != + + Where ```` is the primary key of the + given object. + + The ``!=`` operator provides partial functionality for non- + many-to-one comparisons: + + * Comparisons against collections are not supported. + Use + :meth:`~.RelationshipProperty.Comparator.contains` + in conjunction with :func:`~.expression.not_`. + * Compared to a scalar one-to-many, will produce a + clause that compares the target columns in the parent to + the given target. + * Compared to a scalar many-to-many, an alias + of the association table will be rendered as + well, forming a natural join that is part of the + main body of the query. This will not work for + queries that go beyond simple AND conjunctions of + comparisons, such as those which use OR. Use + explicit joins, outerjoins, or + :meth:`~.RelationshipProperty.Comparator.has` in + conjunction with :func:`~.expression.not_` for + more comprehensive non-many-to-one scalar + membership tests. + * Comparisons against ``None`` given in a one-to-many + or many-to-many context produce an EXISTS clause. + + """ + if isinstance(other, (util.NoneType, expression.Null)): + if self.property.direction == MANYTOONE: + return _orm_annotate(~self.property._optimized_compare( + None, adapt_source=self.adapter)) + + else: + return self._criterion_exists() + elif self.property.uselist: + raise sa_exc.InvalidRequestError( + "Can't compare a collection" + " to an object or collection; use " + "contains() to test for membership.") + else: + return _orm_annotate(self.__negated_contains_or_equals(other)) + + @util.memoized_property + def property(self): + if mapperlib.Mapper._new_mappers: + mapperlib.Mapper._configure_all() + return self.prop + + def _with_parent(self, instance, alias_secondary=True): + assert instance is not None + return self._optimized_compare( + instance, value_is_parent=True, alias_secondary=alias_secondary) + + def _optimized_compare(self, state, value_is_parent=False, + adapt_source=None, + alias_secondary=True): + if state is not None: + state = attributes.instance_state(state) + + reverse_direction = not value_is_parent + + if state is None: + return self._lazy_none_clause( + reverse_direction, + adapt_source=adapt_source) + + if not reverse_direction: + criterion, bind_to_col = \ + self._lazy_strategy._lazywhere, \ + self._lazy_strategy._bind_to_col + else: + criterion, bind_to_col = \ + self._lazy_strategy._rev_lazywhere, \ + self._lazy_strategy._rev_bind_to_col + + if reverse_direction: + mapper = self.mapper + else: + mapper = self.parent + + dict_ = attributes.instance_dict(state.obj()) + + def visit_bindparam(bindparam): + if bindparam._identifying_key in bind_to_col: + bindparam.callable = self._get_attr_w_warn_on_none( + bind_to_col[bindparam._identifying_key], + mapper._get_state_attr_by_column, + state, dict_, + bind_to_col[bindparam._identifying_key], + passive=attributes.PASSIVE_OFF) + + if self.secondary is not None and alias_secondary: + criterion = ClauseAdapter( + self.secondary.alias()).\ + traverse(criterion) + + criterion = visitors.cloned_traverse( + criterion, {}, {'bindparam': visit_bindparam}) + + if adapt_source: + criterion = adapt_source(criterion) + return criterion + + def _get_attr_w_warn_on_none(self, column, fn, *arg, **kw): + def _go(): + value = fn(*arg, **kw) + if value is None: + util.warn( + "Got None for value of column %s; this is unsupported " + "for a relationship comparison and will not " + "currently produce an IS comparison " + "(but may in a future release)" % column) + return value + return _go + + def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): + if not reverse_direction: + criterion, bind_to_col = \ + self._lazy_strategy._lazywhere, \ + self._lazy_strategy._bind_to_col + else: + criterion, bind_to_col = \ + self._lazy_strategy._rev_lazywhere, \ + self._lazy_strategy._rev_bind_to_col + + criterion = adapt_criterion_to_null(criterion, bind_to_col) + + if adapt_source: + criterion = adapt_source(criterion) + return criterion + + def __str__(self): + return str(self.parent.class_.__name__) + "." + self.key + + def merge(self, + session, + source_state, + source_dict, + dest_state, + dest_dict, + load, _recursive, _resolve_conflict_map): + + if load: + for r in self._reverse_property: + if (source_state, r) in _recursive: + return + + if "merge" not in self._cascade: + return + + if self.key not in source_dict: + return + + if self.uselist: + instances = source_state.get_impl(self.key).\ + get(source_state, source_dict) + if hasattr(instances, '_sa_adapter'): + # convert collections to adapters to get a true iterator + instances = instances._sa_adapter + + if load: + # for a full merge, pre-load the destination collection, + # so that individual _merge of each item pulls from identity + # map for those already present. + # also assumes CollectionAttrbiuteImpl behavior of loading + # "old" list in any case + dest_state.get_impl(self.key).get(dest_state, dest_dict) + + dest_list = [] + for current in instances: + current_state = attributes.instance_state(current) + current_dict = attributes.instance_dict(current) + _recursive[(current_state, self)] = True + obj = session._merge( + current_state, current_dict, + load=load, _recursive=_recursive, + _resolve_conflict_map=_resolve_conflict_map) + if obj is not None: + dest_list.append(obj) + + if not load: + coll = attributes.init_state_collection(dest_state, + dest_dict, self.key) + for c in dest_list: + coll.append_without_event(c) + else: + dest_state.get_impl(self.key).set( + dest_state, dest_dict, dest_list, + _adapt=False) + else: + current = source_dict[self.key] + if current is not None: + current_state = attributes.instance_state(current) + current_dict = attributes.instance_dict(current) + _recursive[(current_state, self)] = True + obj = session._merge( + current_state, current_dict, + load=load, _recursive=_recursive, + _resolve_conflict_map=_resolve_conflict_map) + else: + obj = None + + if not load: + dest_dict[self.key] = obj + else: + dest_state.get_impl(self.key).set(dest_state, + dest_dict, obj, None) + + def _value_as_iterable(self, state, dict_, key, + passive=attributes.PASSIVE_OFF): + """Return a list of tuples (state, obj) for the given + key. + + returns an empty list if the value is None/empty/PASSIVE_NO_RESULT + """ + + impl = state.manager[key].impl + x = impl.get(state, dict_, passive=passive) + if x is attributes.PASSIVE_NO_RESULT or x is None: + return [] + elif hasattr(impl, 'get_collection'): + return [ + (attributes.instance_state(o), o) for o in + impl.get_collection(state, dict_, x, passive=passive) + ] + else: + return [(attributes.instance_state(x), x)] + + def cascade_iterator(self, type_, state, dict_, + visited_states, halt_on=None): + # assert type_ in self._cascade + + # only actively lazy load on the 'delete' cascade + if type_ != 'delete' or self.passive_deletes: + passive = attributes.PASSIVE_NO_INITIALIZE + else: + passive = attributes.PASSIVE_OFF + + if type_ == 'save-update': + tuples = state.manager[self.key].impl.\ + get_all_pending(state, dict_) + + else: + tuples = self._value_as_iterable(state, dict_, self.key, + passive=passive) + + skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \ + not in self._cascade + + for instance_state, c in tuples: + if instance_state in visited_states: + continue + + if c is None: + # would like to emit a warning here, but + # would not be consistent with collection.append(None) + # current behavior of silently skipping. + # see [ticket:2229] + continue + + instance_dict = attributes.instance_dict(c) + + if halt_on and halt_on(instance_state): + continue + + if skip_pending and not instance_state.key: + continue + + instance_mapper = instance_state.manager.mapper + + if not instance_mapper.isa(self.mapper.class_manager.mapper): + raise AssertionError("Attribute '%s' on class '%s' " + "doesn't handle objects " + "of type '%s'" % ( + self.key, + self.parent.class_, + c.__class__ + )) + + visited_states.add(instance_state) + + yield c, instance_mapper, instance_state, instance_dict + + def _add_reverse_property(self, key): + other = self.mapper.get_property(key, _configure_mappers=False) + self._reverse_property.add(other) + other._reverse_property.add(self) + + if not other.mapper.common_parent(self.parent): + raise sa_exc.ArgumentError( + 'reverse_property %r on ' + 'relationship %s references relationship %s, which ' + 'does not reference mapper %s' % + (key, self, other, self.parent)) + + if self.direction in (ONETOMANY, MANYTOONE) and self.direction \ + == other.direction: + raise sa_exc.ArgumentError( + '%s and back-reference %s are ' + 'both of the same direction %r. Did you mean to ' + 'set remote_side on the many-to-one side ?' % + (other, self, self.direction)) + + @util.memoized_property + def mapper(self): + """Return the targeted :class:`.Mapper` for this + :class:`.RelationshipProperty`. + + This is a lazy-initializing static attribute. + + """ + if util.callable(self.argument) and \ + not isinstance(self.argument, (type, mapperlib.Mapper)): + argument = self.argument() + else: + argument = self.argument + + if isinstance(argument, type): + mapper_ = mapperlib.class_mapper(argument, + configure=False) + elif isinstance(self.argument, mapperlib.Mapper): + mapper_ = argument + else: + raise sa_exc.ArgumentError( + "relationship '%s' expects " + "a class or a mapper argument (received: %s)" + % (self.key, type(argument))) + return mapper_ + + @util.memoized_property + @util.deprecated("0.7", "Use .target") + def table(self): + """Return the selectable linked to this + :class:`.RelationshipProperty` object's target + :class:`.Mapper`. + """ + return self.target + + def do_init(self): + self._check_conflicts() + self._process_dependent_arguments() + self._setup_join_conditions() + self._check_cascade_settings(self._cascade) + self._post_init() + self._generate_backref() + self._join_condition._warn_for_conflicting_sync_targets() + super(RelationshipProperty, self).do_init() + self._lazy_strategy = self._get_strategy((("lazy", "select"),)) + + def _process_dependent_arguments(self): + """Convert incoming configuration arguments to their + proper form. + + Callables are resolved, ORM annotations removed. + + """ + # accept callables for other attributes which may require + # deferred initialization. This technique is used + # by declarative "string configs" and some recipes. + for attr in ( + 'order_by', 'primaryjoin', 'secondaryjoin', + 'secondary', '_user_defined_foreign_keys', 'remote_side', + ): + attr_value = getattr(self, attr) + if util.callable(attr_value): + setattr(self, attr, attr_value()) + + # remove "annotations" which are present if mapped class + # descriptors are used to create the join expression. + for attr in 'primaryjoin', 'secondaryjoin': + val = getattr(self, attr) + if val is not None: + setattr(self, attr, _orm_deannotate( + expression._only_column_elements(val, attr)) + ) + + # ensure expressions in self.order_by, foreign_keys, + # remote_side are all columns, not strings. + if self.order_by is not False and self.order_by is not None: + self.order_by = [ + expression._only_column_elements(x, "order_by") + for x in + util.to_list(self.order_by)] + + self._user_defined_foreign_keys = \ + util.column_set( + expression._only_column_elements(x, "foreign_keys") + for x in util.to_column_set( + self._user_defined_foreign_keys + )) + + self.remote_side = \ + util.column_set( + expression._only_column_elements(x, "remote_side") + for x in + util.to_column_set(self.remote_side)) + + self.target = self.mapper.mapped_table + + def _setup_join_conditions(self): + self._join_condition = jc = JoinCondition( + parent_selectable=self.parent.mapped_table, + child_selectable=self.mapper.mapped_table, + parent_local_selectable=self.parent.local_table, + child_local_selectable=self.mapper.local_table, + primaryjoin=self.primaryjoin, + secondary=self.secondary, + secondaryjoin=self.secondaryjoin, + parent_equivalents=self.parent._equivalent_columns, + child_equivalents=self.mapper._equivalent_columns, + consider_as_foreign_keys=self._user_defined_foreign_keys, + local_remote_pairs=self.local_remote_pairs, + remote_side=self.remote_side, + self_referential=self._is_self_referential, + prop=self, + support_sync=not self.viewonly, + can_be_synced_fn=self._columns_are_mapped + ) + self.primaryjoin = jc.deannotated_primaryjoin + self.secondaryjoin = jc.deannotated_secondaryjoin + self.direction = jc.direction + self.local_remote_pairs = jc.local_remote_pairs + self.remote_side = jc.remote_columns + self.local_columns = jc.local_columns + self.synchronize_pairs = jc.synchronize_pairs + self._calculated_foreign_keys = jc.foreign_key_columns + self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs + + def _check_conflicts(self): + """Test that this relationship is legal, warn about + inheritance conflicts.""" + + if self.parent.non_primary and not mapperlib.class_mapper( + self.parent.class_, + configure=False).has_property(self.key): + raise sa_exc.ArgumentError( + "Attempting to assign a new " + "relationship '%s' to a non-primary mapper on " + "class '%s'. New relationships can only be added " + "to the primary mapper, i.e. the very first mapper " + "created for class '%s' " % + (self.key, self.parent.class_.__name__, + self.parent.class_.__name__)) + + def _get_cascade(self): + """Return the current cascade setting for this + :class:`.RelationshipProperty`. + """ + return self._cascade + + def _set_cascade(self, cascade): + cascade = CascadeOptions(cascade) + if 'mapper' in self.__dict__: + self._check_cascade_settings(cascade) + self._cascade = cascade + + if self._dependency_processor: + self._dependency_processor.cascade = cascade + + cascade = property(_get_cascade, _set_cascade) + + def _check_cascade_settings(self, cascade): + if cascade.delete_orphan and not self.single_parent \ + and (self.direction is MANYTOMANY or self.direction + is MANYTOONE): + raise sa_exc.ArgumentError( + 'On %s, delete-orphan cascade is not supported ' + 'on a many-to-many or many-to-one relationship ' + 'when single_parent is not set. Set ' + 'single_parent=True on the relationship().' + % self) + if self.direction is MANYTOONE and self.passive_deletes: + util.warn("On %s, 'passive_deletes' is normally configured " + "on one-to-many, one-to-one, many-to-many " + "relationships only." + % self) + + if self.passive_deletes == 'all' and \ + ("delete" in cascade or + "delete-orphan" in cascade): + raise sa_exc.ArgumentError( + "On %s, can't set passive_deletes='all' in conjunction " + "with 'delete' or 'delete-orphan' cascade" % self) + + if cascade.delete_orphan: + self.mapper.primary_mapper()._delete_orphans.append( + (self.key, self.parent.class_) + ) + + def _columns_are_mapped(self, *cols): + """Return True if all columns in the given collection are + mapped by the tables referenced by this :class:`.Relationship`. + + """ + for c in cols: + if self.secondary is not None \ + and self.secondary.c.contains_column(c): + continue + if not self.parent.mapped_table.c.contains_column(c) and \ + not self.target.c.contains_column(c): + return False + return True + + def _generate_backref(self): + """Interpret the 'backref' instruction to create a + :func:`.relationship` complementary to this one.""" + + if self.parent.non_primary: + return + if self.backref is not None and not self.back_populates: + if isinstance(self.backref, util.string_types): + backref_key, kwargs = self.backref, {} + else: + backref_key, kwargs = self.backref + mapper = self.mapper.primary_mapper() + + if not mapper.concrete: + check = set(mapper.iterate_to_root()).\ + union(mapper.self_and_descendants) + for m in check: + if m.has_property(backref_key) and not m.concrete: + raise sa_exc.ArgumentError( + "Error creating backref " + "'%s' on relationship '%s': property of that " + "name exists on mapper '%s'" % + (backref_key, self, m)) + + # determine primaryjoin/secondaryjoin for the + # backref. Use the one we had, so that + # a custom join doesn't have to be specified in + # both directions. + if self.secondary is not None: + # for many to many, just switch primaryjoin/ + # secondaryjoin. use the annotated + # pj/sj on the _join_condition. + pj = kwargs.pop( + 'primaryjoin', + self._join_condition.secondaryjoin_minus_local) + sj = kwargs.pop( + 'secondaryjoin', + self._join_condition.primaryjoin_minus_local) + else: + pj = kwargs.pop( + 'primaryjoin', + self._join_condition.primaryjoin_reverse_remote) + sj = kwargs.pop('secondaryjoin', None) + if sj: + raise sa_exc.InvalidRequestError( + "Can't assign 'secondaryjoin' on a backref " + "against a non-secondary relationship." + ) + + foreign_keys = kwargs.pop('foreign_keys', + self._user_defined_foreign_keys) + parent = self.parent.primary_mapper() + kwargs.setdefault('viewonly', self.viewonly) + kwargs.setdefault('post_update', self.post_update) + kwargs.setdefault('passive_updates', self.passive_updates) + self.back_populates = backref_key + relationship = RelationshipProperty( + parent, self.secondary, + pj, sj, + foreign_keys=foreign_keys, + back_populates=self.key, + **kwargs) + mapper._configure_property(backref_key, relationship) + + if self.back_populates: + self._add_reverse_property(self.back_populates) + + def _post_init(self): + if self.uselist is None: + self.uselist = self.direction is not MANYTOONE + if not self.viewonly: + self._dependency_processor = \ + dependency.DependencyProcessor.from_relationship(self) + + @util.memoized_property + def _use_get(self): + """memoize the 'use_get' attribute of this RelationshipLoader's + lazyloader.""" + + strategy = self._lazy_strategy + return strategy.use_get + + @util.memoized_property + def _is_self_referential(self): + return self.mapper.common_parent(self.parent) + + def _create_joins(self, source_polymorphic=False, + source_selectable=None, dest_polymorphic=False, + dest_selectable=None, of_type=None): + if source_selectable is None: + if source_polymorphic and self.parent.with_polymorphic: + source_selectable = self.parent._with_polymorphic_selectable + + aliased = False + if dest_selectable is None: + if dest_polymorphic and self.mapper.with_polymorphic: + dest_selectable = self.mapper._with_polymorphic_selectable + aliased = True + else: + dest_selectable = self.mapper.mapped_table + + if self._is_self_referential and source_selectable is None: + dest_selectable = dest_selectable.alias() + aliased = True + else: + aliased = True + + dest_mapper = of_type or self.mapper + + single_crit = dest_mapper._single_table_criterion + aliased = aliased or (source_selectable is not None) + + primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \ + self._join_condition.join_targets( + source_selectable, dest_selectable, aliased, single_crit + ) + if source_selectable is None: + source_selectable = self.parent.local_table + if dest_selectable is None: + dest_selectable = self.mapper.local_table + return (primaryjoin, secondaryjoin, source_selectable, + dest_selectable, secondary, target_adapter) + + +def _annotate_columns(element, annotations): + def clone(elem): + if isinstance(elem, expression.ColumnClause): + elem = elem._annotate(annotations.copy()) + elem._copy_internals(clone=clone) + return elem + + if element is not None: + element = clone(element) + return element + + +class JoinCondition(object): + def __init__(self, + parent_selectable, + child_selectable, + parent_local_selectable, + child_local_selectable, + primaryjoin=None, + secondary=None, + secondaryjoin=None, + parent_equivalents=None, + child_equivalents=None, + consider_as_foreign_keys=None, + local_remote_pairs=None, + remote_side=None, + self_referential=False, + prop=None, + support_sync=True, + can_be_synced_fn=lambda *c: True + ): + self.parent_selectable = parent_selectable + self.parent_local_selectable = parent_local_selectable + self.child_selectable = child_selectable + self.child_local_selectable = child_local_selectable + self.parent_equivalents = parent_equivalents + self.child_equivalents = child_equivalents + self.primaryjoin = primaryjoin + self.secondaryjoin = secondaryjoin + self.secondary = secondary + self.consider_as_foreign_keys = consider_as_foreign_keys + self._local_remote_pairs = local_remote_pairs + self._remote_side = remote_side + self.prop = prop + self.self_referential = self_referential + self.support_sync = support_sync + self.can_be_synced_fn = can_be_synced_fn + self._determine_joins() + self._annotate_fks() + self._annotate_remote() + self._annotate_local() + self._setup_pairs() + self._check_foreign_cols(self.primaryjoin, True) + if self.secondaryjoin is not None: + self._check_foreign_cols(self.secondaryjoin, False) + self._determine_direction() + self._check_remote_side() + self._log_joins() + + def _log_joins(self): + if self.prop is None: + return + log = self.prop.logger + log.info('%s setup primary join %s', self.prop, + self.primaryjoin) + log.info('%s setup secondary join %s', self.prop, + self.secondaryjoin) + log.info('%s synchronize pairs [%s]', self.prop, + ','.join('(%s => %s)' % (l, r) for (l, r) in + self.synchronize_pairs)) + log.info('%s secondary synchronize pairs [%s]', self.prop, + ','.join('(%s => %s)' % (l, r) for (l, r) in + self.secondary_synchronize_pairs or [])) + log.info('%s local/remote pairs [%s]', self.prop, + ','.join('(%s / %s)' % (l, r) for (l, r) in + self.local_remote_pairs)) + log.info('%s remote columns [%s]', self.prop, + ','.join('%s' % col for col in self.remote_columns) + ) + log.info('%s local columns [%s]', self.prop, + ','.join('%s' % col for col in self.local_columns) + ) + log.info('%s relationship direction %s', self.prop, + self.direction) + + def _determine_joins(self): + """Determine the 'primaryjoin' and 'secondaryjoin' attributes, + if not passed to the constructor already. + + This is based on analysis of the foreign key relationships + between the parent and target mapped selectables. + + """ + if self.secondaryjoin is not None and self.secondary is None: + raise sa_exc.ArgumentError( + "Property %s specified with secondary " + "join condition but " + "no secondary argument" % self.prop) + + # find a join between the given mapper's mapped table and + # the given table. will try the mapper's local table first + # for more specificity, then if not found will try the more + # general mapped table, which in the case of inheritance is + # a join. + try: + consider_as_foreign_keys = self.consider_as_foreign_keys or None + if self.secondary is not None: + if self.secondaryjoin is None: + self.secondaryjoin = \ + join_condition( + self.child_selectable, + self.secondary, + a_subset=self.child_local_selectable, + consider_as_foreign_keys=consider_as_foreign_keys + ) + if self.primaryjoin is None: + self.primaryjoin = \ + join_condition( + self.parent_selectable, + self.secondary, + a_subset=self.parent_local_selectable, + consider_as_foreign_keys=consider_as_foreign_keys + ) + else: + if self.primaryjoin is None: + self.primaryjoin = \ + join_condition( + self.parent_selectable, + self.child_selectable, + a_subset=self.parent_local_selectable, + consider_as_foreign_keys=consider_as_foreign_keys + ) + except sa_exc.NoForeignKeysError: + if self.secondary is not None: + raise sa_exc.NoForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are no foreign keys " + "linking these tables via secondary table '%s'. " + "Ensure that referencing columns are associated " + "with a ForeignKey or ForeignKeyConstraint, or " + "specify 'primaryjoin' and 'secondaryjoin' " + "expressions." % (self.prop, self.secondary)) + else: + raise sa_exc.NoForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are no foreign keys " + "linking these tables. " + "Ensure that referencing columns are associated " + "with a ForeignKey or ForeignKeyConstraint, or " + "specify a 'primaryjoin' expression." % self.prop) + except sa_exc.AmbiguousForeignKeysError: + if self.secondary is not None: + raise sa_exc.AmbiguousForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are multiple foreign key " + "paths linking the tables via secondary table '%s'. " + "Specify the 'foreign_keys' " + "argument, providing a list of those columns which " + "should be counted as containing a foreign key " + "reference from the secondary table to each of the " + "parent and child tables." + % (self.prop, self.secondary)) + else: + raise sa_exc.AmbiguousForeignKeysError( + "Could not determine join " + "condition between parent/child tables on " + "relationship %s - there are multiple foreign key " + "paths linking the tables. Specify the " + "'foreign_keys' argument, providing a list of those " + "columns which should be counted as containing a " + "foreign key reference to the parent table." + % self.prop) + + @property + def primaryjoin_minus_local(self): + return _deep_deannotate(self.primaryjoin, values=("local", "remote")) + + @property + def secondaryjoin_minus_local(self): + return _deep_deannotate(self.secondaryjoin, + values=("local", "remote")) + + @util.memoized_property + def primaryjoin_reverse_remote(self): + """Return the primaryjoin condition suitable for the + "reverse" direction. + + If the primaryjoin was delivered here with pre-existing + "remote" annotations, the local/remote annotations + are reversed. Otherwise, the local/remote annotations + are removed. + + """ + if self._has_remote_annotations: + def replace(element): + if "remote" in element._annotations: + v = element._annotations.copy() + del v['remote'] + v['local'] = True + return element._with_annotations(v) + elif "local" in element._annotations: + v = element._annotations.copy() + del v['local'] + v['remote'] = True + return element._with_annotations(v) + return visitors.replacement_traverse( + self.primaryjoin, {}, replace) + else: + if self._has_foreign_annotations: + # TODO: coverage + return _deep_deannotate(self.primaryjoin, + values=("local", "remote")) + else: + return _deep_deannotate(self.primaryjoin) + + def _has_annotation(self, clause, annotation): + for col in visitors.iterate(clause, {}): + if annotation in col._annotations: + return True + else: + return False + + @util.memoized_property + def _has_foreign_annotations(self): + return self._has_annotation(self.primaryjoin, "foreign") + + @util.memoized_property + def _has_remote_annotations(self): + return self._has_annotation(self.primaryjoin, "remote") + + def _annotate_fks(self): + """Annotate the primaryjoin and secondaryjoin + structures with 'foreign' annotations marking columns + considered as foreign. + + """ + if self._has_foreign_annotations: + return + + if self.consider_as_foreign_keys: + self._annotate_from_fk_list() + else: + self._annotate_present_fks() + + def _annotate_from_fk_list(self): + def check_fk(col): + if col in self.consider_as_foreign_keys: + return col._annotate({"foreign": True}) + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, + {}, + check_fk + ) + if self.secondaryjoin is not None: + self.secondaryjoin = visitors.replacement_traverse( + self.secondaryjoin, + {}, + check_fk + ) + + def _annotate_present_fks(self): + if self.secondary is not None: + secondarycols = util.column_set(self.secondary.c) + else: + secondarycols = set() + + def is_foreign(a, b): + if isinstance(a, schema.Column) and \ + isinstance(b, schema.Column): + if a.references(b): + return a + elif b.references(a): + return b + + if secondarycols: + if a in secondarycols and b not in secondarycols: + return a + elif b in secondarycols and a not in secondarycols: + return b + + def visit_binary(binary): + if not isinstance(binary.left, sql.ColumnElement) or \ + not isinstance(binary.right, sql.ColumnElement): + return + + if "foreign" not in binary.left._annotations and \ + "foreign" not in binary.right._annotations: + col = is_foreign(binary.left, binary.right) + if col is not None: + if col.compare(binary.left): + binary.left = binary.left._annotate( + {"foreign": True}) + elif col.compare(binary.right): + binary.right = binary.right._annotate( + {"foreign": True}) + + self.primaryjoin = visitors.cloned_traverse( + self.primaryjoin, + {}, + {"binary": visit_binary} + ) + if self.secondaryjoin is not None: + self.secondaryjoin = visitors.cloned_traverse( + self.secondaryjoin, + {}, + {"binary": visit_binary} + ) + + def _refers_to_parent_table(self): + """Return True if the join condition contains column + comparisons where both columns are in both tables. + + """ + pt = self.parent_selectable + mt = self.child_selectable + result = [False] + + def visit_binary(binary): + c, f = binary.left, binary.right + if ( + isinstance(c, expression.ColumnClause) and + isinstance(f, expression.ColumnClause) and + pt.is_derived_from(c.table) and + pt.is_derived_from(f.table) and + mt.is_derived_from(c.table) and + mt.is_derived_from(f.table) + ): + result[0] = True + visitors.traverse( + self.primaryjoin, + {}, + {"binary": visit_binary} + ) + return result[0] + + def _tables_overlap(self): + """Return True if parent/child tables have some overlap.""" + + return selectables_overlap( + self.parent_selectable, self.child_selectable) + + def _annotate_remote(self): + """Annotate the primaryjoin and secondaryjoin + structures with 'remote' annotations marking columns + considered as part of the 'remote' side. + + """ + if self._has_remote_annotations: + return + + if self.secondary is not None: + self._annotate_remote_secondary() + elif self._local_remote_pairs or self._remote_side: + self._annotate_remote_from_args() + elif self._refers_to_parent_table(): + self._annotate_selfref(lambda col: "foreign" in col._annotations, False) + elif self._tables_overlap(): + self._annotate_remote_with_overlap() + else: + self._annotate_remote_distinct_selectables() + + def _annotate_remote_secondary(self): + """annotate 'remote' in primaryjoin, secondaryjoin + when 'secondary' is present. + + """ + def repl(element): + if self.secondary.c.contains_column(element): + return element._annotate({"remote": True}) + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, repl) + self.secondaryjoin = visitors.replacement_traverse( + self.secondaryjoin, {}, repl) + + def _annotate_selfref(self, fn, remote_side_given): + """annotate 'remote' in primaryjoin, secondaryjoin + when the relationship is detected as self-referential. + + """ + def visit_binary(binary): + equated = binary.left.compare(binary.right) + if isinstance(binary.left, expression.ColumnClause) and \ + isinstance(binary.right, expression.ColumnClause): + # assume one to many - FKs are "remote" + if fn(binary.left): + binary.left = binary.left._annotate({"remote": True}) + if fn(binary.right) and not equated: + binary.right = binary.right._annotate( + {"remote": True}) + elif not remote_side_given: + self._warn_non_column_elements() + + self.primaryjoin = visitors.cloned_traverse( + self.primaryjoin, {}, + {"binary": visit_binary}) + + def _annotate_remote_from_args(self): + """annotate 'remote' in primaryjoin, secondaryjoin + when the 'remote_side' or '_local_remote_pairs' + arguments are used. + + """ + if self._local_remote_pairs: + if self._remote_side: + raise sa_exc.ArgumentError( + "remote_side argument is redundant " + "against more detailed _local_remote_side " + "argument.") + + remote_side = [r for (l, r) in self._local_remote_pairs] + else: + remote_side = self._remote_side + + if self._refers_to_parent_table(): + self._annotate_selfref(lambda col: col in remote_side, True) + else: + def repl(element): + if element in remote_side: + return element._annotate({"remote": True}) + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, repl) + + def _annotate_remote_with_overlap(self): + """annotate 'remote' in primaryjoin, secondaryjoin + when the parent/child tables have some set of + tables in common, though is not a fully self-referential + relationship. + + """ + def visit_binary(binary): + binary.left, binary.right = proc_left_right(binary.left, + binary.right) + binary.right, binary.left = proc_left_right(binary.right, + binary.left) + + check_entities = self.prop is not None and \ + self.prop.mapper is not self.prop.parent + + def proc_left_right(left, right): + if isinstance(left, expression.ColumnClause) and \ + isinstance(right, expression.ColumnClause): + if self.child_selectable.c.contains_column(right) and \ + self.parent_selectable.c.contains_column(left): + right = right._annotate({"remote": True}) + elif check_entities and \ + right._annotations.get('parentmapper') is self.prop.mapper: + right = right._annotate({"remote": True}) + elif check_entities and \ + left._annotations.get('parentmapper') is self.prop.mapper: + left = left._annotate({"remote": True}) + else: + self._warn_non_column_elements() + + return left, right + + self.primaryjoin = visitors.cloned_traverse( + self.primaryjoin, {}, + {"binary": visit_binary}) + + def _annotate_remote_distinct_selectables(self): + """annotate 'remote' in primaryjoin, secondaryjoin + when the parent/child tables are entirely + separate. + + """ + def repl(element): + if self.child_selectable.c.contains_column(element) and \ + (not self.parent_local_selectable.c. + contains_column(element) or + self.child_local_selectable.c. + contains_column(element)): + return element._annotate({"remote": True}) + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, repl) + + def _warn_non_column_elements(self): + util.warn( + "Non-simple column elements in primary " + "join condition for property %s - consider using " + "remote() annotations to mark the remote side." + % self.prop + ) + + def _annotate_local(self): + """Annotate the primaryjoin and secondaryjoin + structures with 'local' annotations. + + This annotates all column elements found + simultaneously in the parent table + and the join condition that don't have a + 'remote' annotation set up from + _annotate_remote() or user-defined. + + """ + if self._has_annotation(self.primaryjoin, "local"): + return + + if self._local_remote_pairs: + local_side = util.column_set([l for (l, r) + in self._local_remote_pairs]) + else: + local_side = util.column_set(self.parent_selectable.c) + + def locals_(elem): + if "remote" not in elem._annotations and \ + elem in local_side: + return elem._annotate({"local": True}) + self.primaryjoin = visitors.replacement_traverse( + self.primaryjoin, {}, locals_ + ) + + def _check_remote_side(self): + if not self.local_remote_pairs: + raise sa_exc.ArgumentError( + 'Relationship %s could ' + 'not determine any unambiguous local/remote column ' + 'pairs based on join condition and remote_side ' + 'arguments. ' + 'Consider using the remote() annotation to ' + 'accurately mark those elements of the join ' + 'condition that are on the remote side of ' + 'the relationship.' % (self.prop, )) + + def _check_foreign_cols(self, join_condition, primary): + """Check the foreign key columns collected and emit error + messages.""" + + can_sync = False + + foreign_cols = self._gather_columns_with_annotation( + join_condition, "foreign") + + has_foreign = bool(foreign_cols) + + if primary: + can_sync = bool(self.synchronize_pairs) + else: + can_sync = bool(self.secondary_synchronize_pairs) + + if self.support_sync and can_sync or \ + (not self.support_sync and has_foreign): + return + + # from here below is just determining the best error message + # to report. Check for a join condition using any operator + # (not just ==), perhaps they need to turn on "viewonly=True". + if self.support_sync and has_foreign and not can_sync: + err = "Could not locate any simple equality expressions "\ + "involving locally mapped foreign key columns for "\ + "%s join condition "\ + "'%s' on relationship %s." % ( + primary and 'primary' or 'secondary', + join_condition, + self.prop + ) + err += \ + " Ensure that referencing columns are associated "\ + "with a ForeignKey or ForeignKeyConstraint, or are "\ + "annotated in the join condition with the foreign() "\ + "annotation. To allow comparison operators other than "\ + "'==', the relationship can be marked as viewonly=True." + + raise sa_exc.ArgumentError(err) + else: + err = "Could not locate any relevant foreign key columns "\ + "for %s join condition '%s' on relationship %s." % ( + primary and 'primary' or 'secondary', + join_condition, + self.prop + ) + err += \ + ' Ensure that referencing columns are associated '\ + 'with a ForeignKey or ForeignKeyConstraint, or are '\ + 'annotated in the join condition with the foreign() '\ + 'annotation.' + raise sa_exc.ArgumentError(err) + + def _determine_direction(self): + """Determine if this relationship is one to many, many to one, + many to many. + + """ + if self.secondaryjoin is not None: + self.direction = MANYTOMANY + else: + parentcols = util.column_set(self.parent_selectable.c) + targetcols = util.column_set(self.child_selectable.c) + + # fk collection which suggests ONETOMANY. + onetomany_fk = targetcols.intersection( + self.foreign_key_columns) + + # fk collection which suggests MANYTOONE. + + manytoone_fk = parentcols.intersection( + self.foreign_key_columns) + + if onetomany_fk and manytoone_fk: + # fks on both sides. test for overlap of local/remote + # with foreign key. + # we will gather columns directly from their annotations + # without deannotating, so that we can distinguish on a column + # that refers to itself. + + # 1. columns that are both remote and FK suggest + # onetomany. + onetomany_local = self._gather_columns_with_annotation( + self.primaryjoin, "remote", "foreign") + + # 2. columns that are FK but are not remote (e.g. local) + # suggest manytoone. + manytoone_local = set([c for c in + self._gather_columns_with_annotation( + self.primaryjoin, + "foreign") + if "remote" not in c._annotations]) + + # 3. if both collections are present, remove columns that + # refer to themselves. This is for the case of + # and_(Me.id == Me.remote_id, Me.version == Me.version) + if onetomany_local and manytoone_local: + self_equated = self.remote_columns.intersection( + self.local_columns + ) + onetomany_local = onetomany_local.difference(self_equated) + manytoone_local = manytoone_local.difference(self_equated) + + # at this point, if only one or the other collection is + # present, we know the direction, otherwise it's still + # ambiguous. + + if onetomany_local and not manytoone_local: + self.direction = ONETOMANY + elif manytoone_local and not onetomany_local: + self.direction = MANYTOONE + else: + raise sa_exc.ArgumentError( + "Can't determine relationship" + " direction for relationship '%s' - foreign " + "key columns within the join condition are present " + "in both the parent and the child's mapped tables. " + "Ensure that only those columns referring " + "to a parent column are marked as foreign, " + "either via the foreign() annotation or " + "via the foreign_keys argument." % self.prop) + elif onetomany_fk: + self.direction = ONETOMANY + elif manytoone_fk: + self.direction = MANYTOONE + else: + raise sa_exc.ArgumentError( + "Can't determine relationship " + "direction for relationship '%s' - foreign " + "key columns are present in neither the parent " + "nor the child's mapped tables" % self.prop) + + def _deannotate_pairs(self, collection): + """provide deannotation for the various lists of + pairs, so that using them in hashes doesn't incur + high-overhead __eq__() comparisons against + original columns mapped. + + """ + return [(x._deannotate(), y._deannotate()) + for x, y in collection] + + def _setup_pairs(self): + sync_pairs = [] + lrp = util.OrderedSet([]) + secondary_sync_pairs = [] + + def go(joincond, collection): + def visit_binary(binary, left, right): + if "remote" in right._annotations and \ + "remote" not in left._annotations and \ + self.can_be_synced_fn(left): + lrp.add((left, right)) + elif "remote" in left._annotations and \ + "remote" not in right._annotations and \ + self.can_be_synced_fn(right): + lrp.add((right, left)) + if binary.operator is operators.eq and \ + self.can_be_synced_fn(left, right): + if "foreign" in right._annotations: + collection.append((left, right)) + elif "foreign" in left._annotations: + collection.append((right, left)) + visit_binary_product(visit_binary, joincond) + + for joincond, collection in [ + (self.primaryjoin, sync_pairs), + (self.secondaryjoin, secondary_sync_pairs) + ]: + if joincond is None: + continue + go(joincond, collection) + + self.local_remote_pairs = self._deannotate_pairs(lrp) + self.synchronize_pairs = self._deannotate_pairs(sync_pairs) + self.secondary_synchronize_pairs = \ + self._deannotate_pairs(secondary_sync_pairs) + + _track_overlapping_sync_targets = weakref.WeakKeyDictionary() + + def _warn_for_conflicting_sync_targets(self): + if not self.support_sync: + return + + # we would like to detect if we are synchronizing any column + # pairs in conflict with another relationship that wishes to sync + # an entirely different column to the same target. This is a + # very rare edge case so we will try to minimize the memory/overhead + # impact of this check + for from_, to_ in [ + (from_, to_) for (from_, to_) in self.synchronize_pairs + ] + [ + (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs + ]: + # save ourselves a ton of memory and overhead by only + # considering columns that are subject to a overlapping + # FK constraints at the core level. This condition can arise + # if multiple relationships overlap foreign() directly, but + # we're going to assume it's typically a ForeignKeyConstraint- + # level configuration that benefits from this warning. + if len(to_.foreign_keys) < 2: + continue + + if to_ not in self._track_overlapping_sync_targets: + self._track_overlapping_sync_targets[to_] = \ + weakref.WeakKeyDictionary({self.prop: from_}) + else: + other_props = [] + prop_to_from = self._track_overlapping_sync_targets[to_] + for pr, fr_ in prop_to_from.items(): + if pr.mapper in mapperlib._mapper_registry and \ + fr_ is not from_ and \ + pr not in self.prop._reverse_property: + other_props.append((pr, fr_)) + + if other_props: + util.warn( + "relationship '%s' will copy column %s to column %s, " + "which conflicts with relationship(s): %s. " + "Consider applying " + "viewonly=True to read-only relationships, or provide " + "a primaryjoin condition marking writable columns " + "with the foreign() annotation." % ( + self.prop, + from_, to_, + ", ".join( + "'%s' (copies %s to %s)" % (pr, fr_, to_) + for (pr, fr_) in other_props) + ) + ) + self._track_overlapping_sync_targets[to_][self.prop] = from_ + + @util.memoized_property + def remote_columns(self): + return self._gather_join_annotations("remote") + + @util.memoized_property + def local_columns(self): + return self._gather_join_annotations("local") + + @util.memoized_property + def foreign_key_columns(self): + return self._gather_join_annotations("foreign") + + @util.memoized_property + def deannotated_primaryjoin(self): + return _deep_deannotate(self.primaryjoin) + + @util.memoized_property + def deannotated_secondaryjoin(self): + if self.secondaryjoin is not None: + return _deep_deannotate(self.secondaryjoin) + else: + return None + + def _gather_join_annotations(self, annotation): + s = set( + self._gather_columns_with_annotation( + self.primaryjoin, annotation) + ) + if self.secondaryjoin is not None: + s.update( + self._gather_columns_with_annotation( + self.secondaryjoin, annotation) + ) + return set([x._deannotate() for x in s]) + + def _gather_columns_with_annotation(self, clause, *annotation): + annotation = set(annotation) + return set([ + col for col in visitors.iterate(clause, {}) + if annotation.issubset(col._annotations) + ]) + + def join_targets(self, source_selectable, + dest_selectable, + aliased, + single_crit=None): + """Given a source and destination selectable, create a + join between them. + + This takes into account aliasing the join clause + to reference the appropriate corresponding columns + in the target objects, as well as the extra child + criterion, equivalent column sets, etc. + + """ + + # place a barrier on the destination such that + # replacement traversals won't ever dig into it. + # its internal structure remains fixed + # regardless of context. + dest_selectable = _shallow_annotate( + dest_selectable, + {'no_replacement_traverse': True}) + + primaryjoin, secondaryjoin, secondary = self.primaryjoin, \ + self.secondaryjoin, self.secondary + + # adjust the join condition for single table inheritance, + # in the case that the join is to a subclass + # this is analogous to the + # "_adjust_for_single_table_inheritance()" method in Query. + + if single_crit is not None: + if secondaryjoin is not None: + secondaryjoin = secondaryjoin & single_crit + else: + primaryjoin = primaryjoin & single_crit + + if aliased: + if secondary is not None: + secondary = secondary.alias(flat=True) + primary_aliasizer = ClauseAdapter(secondary) + secondary_aliasizer = \ + ClauseAdapter(dest_selectable, + equivalents=self.child_equivalents).\ + chain(primary_aliasizer) + if source_selectable is not None: + primary_aliasizer = \ + ClauseAdapter(secondary).\ + chain(ClauseAdapter( + source_selectable, + equivalents=self.parent_equivalents)) + secondaryjoin = \ + secondary_aliasizer.traverse(secondaryjoin) + else: + primary_aliasizer = ClauseAdapter( + dest_selectable, + exclude_fn=_ColInAnnotations("local"), + equivalents=self.child_equivalents) + if source_selectable is not None: + primary_aliasizer.chain( + ClauseAdapter(source_selectable, + exclude_fn=_ColInAnnotations("remote"), + equivalents=self.parent_equivalents)) + secondary_aliasizer = None + + primaryjoin = primary_aliasizer.traverse(primaryjoin) + target_adapter = secondary_aliasizer or primary_aliasizer + target_adapter.exclude_fn = None + else: + target_adapter = None + return primaryjoin, secondaryjoin, secondary, \ + target_adapter, dest_selectable + + def create_lazy_clause(self, reverse_direction=False): + binds = util.column_dict() + equated_columns = util.column_dict() + + has_secondary = self.secondaryjoin is not None + + if has_secondary: + lookup = collections.defaultdict(list) + for l, r in self.local_remote_pairs: + lookup[l].append((l, r)) + equated_columns[r] = l + elif not reverse_direction: + for l, r in self.local_remote_pairs: + equated_columns[r] = l + else: + for l, r in self.local_remote_pairs: + equated_columns[l] = r + + def col_to_bind(col): + + if ( + (not reverse_direction and 'local' in col._annotations) or + reverse_direction and ( + (has_secondary and col in lookup) or + (not has_secondary and 'remote' in col._annotations) + ) + ): + if col not in binds: + binds[col] = sql.bindparam( + None, None, type_=col.type, unique=True) + return binds[col] + return None + + lazywhere = self.primaryjoin + if self.secondaryjoin is None or not reverse_direction: + lazywhere = visitors.replacement_traverse( + lazywhere, {}, col_to_bind) + + if self.secondaryjoin is not None: + secondaryjoin = self.secondaryjoin + if reverse_direction: + secondaryjoin = visitors.replacement_traverse( + secondaryjoin, {}, col_to_bind) + lazywhere = sql.and_(lazywhere, secondaryjoin) + + bind_to_col = dict((binds[col].key, col) for col in binds) + + # this is probably not necessary + lazywhere = _deep_deannotate(lazywhere) + + return lazywhere, bind_to_col, equated_columns + + +class _ColInAnnotations(object): + """Seralizable equivalent to: + + lambda c: "name" in c._annotations + """ + + def __init__(self, name): + self.name = name + + def __call__(self, c): + return self.name in c._annotations diff --git a/app/lib/sqlalchemy/orm/scoping.py b/app/lib/sqlalchemy/orm/scoping.py new file mode 100644 index 0000000..05b8813 --- /dev/null +++ b/app/lib/sqlalchemy/orm/scoping.py @@ -0,0 +1,184 @@ +# orm/scoping.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .. import exc as sa_exc +from ..util import ScopedRegistry, ThreadLocalRegistry, warn +from . import class_mapper, exc as orm_exc +from .session import Session + + +__all__ = ['scoped_session'] + + +class scoped_session(object): + """Provides scoped management of :class:`.Session` objects. + + See :ref:`unitofwork_contextual` for a tutorial. + + """ + + session_factory = None + """The `session_factory` provided to `__init__` is stored in this + attribute and may be accessed at a later time. This can be useful when + a new non-scoped :class:`.Session` or :class:`.Connection` to the + database is needed.""" + + def __init__(self, session_factory, scopefunc=None): + """Construct a new :class:`.scoped_session`. + + :param session_factory: a factory to create new :class:`.Session` + instances. This is usually, but not necessarily, an instance + of :class:`.sessionmaker`. + :param scopefunc: optional function which defines + the current scope. If not passed, the :class:`.scoped_session` + object assumes "thread-local" scope, and will use + a Python ``threading.local()`` in order to maintain the current + :class:`.Session`. If passed, the function should return + a hashable token; this token will be used as the key in a + dictionary in order to store and retrieve the current + :class:`.Session`. + + """ + self.session_factory = session_factory + + if scopefunc: + self.registry = ScopedRegistry(session_factory, scopefunc) + else: + self.registry = ThreadLocalRegistry(session_factory) + + def __call__(self, **kw): + r"""Return the current :class:`.Session`, creating it + using the :attr:`.scoped_session.session_factory` if not present. + + :param \**kw: Keyword arguments will be passed to the + :attr:`.scoped_session.session_factory` callable, if an existing + :class:`.Session` is not present. If the :class:`.Session` is present + and keyword arguments have been passed, + :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. + + """ + if kw: + scope = kw.pop('scope', False) + if scope is not None: + if self.registry.has(): + raise sa_exc.InvalidRequestError( + "Scoped session is already present; " + "no new arguments may be specified.") + else: + sess = self.session_factory(**kw) + self.registry.set(sess) + return sess + else: + return self.session_factory(**kw) + else: + return self.registry() + + def remove(self): + """Dispose of the current :class:`.Session`, if present. + + This will first call :meth:`.Session.close` method + on the current :class:`.Session`, which releases any existing + transactional/connection resources still being held; transactions + specifically are rolled back. The :class:`.Session` is then + discarded. Upon next usage within the same scope, + the :class:`.scoped_session` will produce a new + :class:`.Session` object. + + """ + + if self.registry.has(): + self.registry().close() + self.registry.clear() + + def configure(self, **kwargs): + """reconfigure the :class:`.sessionmaker` used by this + :class:`.scoped_session`. + + See :meth:`.sessionmaker.configure`. + + """ + + if self.registry.has(): + warn('At least one scoped session is already present. ' + ' configure() can not affect sessions that have ' + 'already been created.') + + self.session_factory.configure(**kwargs) + + def query_property(self, query_cls=None): + """return a class property which produces a :class:`.Query` object + against the class and the current :class:`.Session` when called. + + e.g.:: + + Session = scoped_session(sessionmaker()) + + class MyClass(object): + query = Session.query_property() + + # after mappers are defined + result = MyClass.query.filter(MyClass.name=='foo').all() + + Produces instances of the session's configured query class by + default. To override and use a custom implementation, provide + a ``query_cls`` callable. The callable will be invoked with + the class's mapper as a positional argument and a session + keyword argument. + + There is no limit to the number of query properties placed on + a class. + + """ + class query(object): + def __get__(s, instance, owner): + try: + mapper = class_mapper(owner) + if mapper: + if query_cls: + # custom query class + return query_cls(mapper, session=self.registry()) + else: + # session's configured query class + return self.registry().query(mapper) + except orm_exc.UnmappedClassError: + return None + return query() + +ScopedSession = scoped_session +"""Old name for backwards compatibility.""" + + +def instrument(name): + def do(self, *args, **kwargs): + return getattr(self.registry(), name)(*args, **kwargs) + return do + +for meth in Session.public_methods: + setattr(scoped_session, meth, instrument(meth)) + + +def makeprop(name): + def set(self, attr): + setattr(self.registry(), name, attr) + + def get(self): + return getattr(self.registry(), name) + + return property(get, set) + +for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', + 'is_active', 'autoflush', 'no_autoflush', 'info'): + setattr(scoped_session, prop, makeprop(prop)) + + +def clslevel(name): + def do(cls, *args, **kwargs): + return getattr(Session, name)(*args, **kwargs) + return classmethod(do) + +for prop in ('close_all', 'object_session', 'identity_key'): + setattr(scoped_session, prop, clslevel(prop)) diff --git a/app/lib/sqlalchemy/orm/session.py b/app/lib/sqlalchemy/orm/session.py new file mode 100644 index 0000000..0819204 --- /dev/null +++ b/app/lib/sqlalchemy/orm/session.py @@ -0,0 +1,2970 @@ +# orm/session.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Provides the Session class and related utilities.""" + + +import weakref +from .. import util, sql, engine, exc as sa_exc +from ..sql import util as sql_util, expression +from . import ( + SessionExtension, attributes, exc, query, + loading, identity +) +from ..inspection import inspect +from .base import ( + object_mapper, class_mapper, + _class_to_mapper, _state_mapper, object_state, + _none_set, state_str, instance_str +) +import itertools +from . import persistence +from .unitofwork import UOWTransaction +from . import state as statelib +import sys + +__all__ = ['Session', 'SessionTransaction', + 'SessionExtension', 'sessionmaker'] + +_sessions = weakref.WeakValueDictionary() +"""Weak-referencing dictionary of :class:`.Session` objects. +""" + + +def _state_session(state): + """Given an :class:`.InstanceState`, return the :class:`.Session` + associated, if any. + """ + if state.session_id: + try: + return _sessions[state.session_id] + except KeyError: + pass + return None + + +class _SessionClassMethods(object): + """Class-level methods for :class:`.Session`, :class:`.sessionmaker`.""" + + @classmethod + def close_all(cls): + """Close *all* sessions in memory.""" + + for sess in _sessions.values(): + sess.close() + + @classmethod + @util.dependencies("sqlalchemy.orm.util") + def identity_key(cls, orm_util, *args, **kwargs): + """Return an identity key. + + This is an alias of :func:`.util.identity_key`. + + """ + return orm_util.identity_key(*args, **kwargs) + + @classmethod + def object_session(cls, instance): + """Return the :class:`.Session` to which an object belongs. + + This is an alias of :func:`.object_session`. + + """ + + return object_session(instance) + + +ACTIVE = util.symbol('ACTIVE') +PREPARED = util.symbol('PREPARED') +COMMITTED = util.symbol('COMMITTED') +DEACTIVE = util.symbol('DEACTIVE') +CLOSED = util.symbol('CLOSED') + + +class SessionTransaction(object): + """A :class:`.Session`-level transaction. + + :class:`.SessionTransaction` is a mostly behind-the-scenes object + not normally referenced directly by application code. It coordinates + among multiple :class:`.Connection` objects, maintaining a database + transaction for each one individually, committing or rolling them + back all at once. It also provides optional two-phase commit behavior + which can augment this coordination operation. + + The :attr:`.Session.transaction` attribute of :class:`.Session` + refers to the current :class:`.SessionTransaction` object in use, if any. + The :attr:`.SessionTransaction.parent` attribute refers to the parent + :class:`.SessionTransaction` in the stack of :class:`.SessionTransaction` + objects. If this attribute is ``None``, then this is the top of the stack. + If non-``None``, then this :class:`.SessionTransaction` refers either + to a so-called "subtransaction" or a "nested" transaction. A + "subtransaction" is a scoping concept that demarcates an inner portion + of the outermost "real" transaction. A nested transaction, which + is indicated when the :attr:`.SessionTransaction.nested` + attribute is also True, indicates that this :class:`.SessionTransaction` + corresponds to a SAVEPOINT. + + **Life Cycle** + + A :class:`.SessionTransaction` is associated with a :class:`.Session` + in its default mode of ``autocommit=False`` immediately, associated + with no database connections. As the :class:`.Session` is called upon + to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection` + objects, a corresponding :class:`.Connection` and associated + :class:`.Transaction` is added to a collection within the + :class:`.SessionTransaction` object, becoming one of the + connection/transaction pairs maintained by the + :class:`.SessionTransaction`. The start of a :class:`.SessionTransaction` + can be tracked using the :meth:`.SessionEvents.after_transaction_create` + event. + + The lifespan of the :class:`.SessionTransaction` ends when the + :meth:`.Session.commit`, :meth:`.Session.rollback` or + :meth:`.Session.close` methods are called. At this point, the + :class:`.SessionTransaction` removes its association with its parent + :class:`.Session`. A :class:`.Session` that is in ``autocommit=False`` + mode will create a new :class:`.SessionTransaction` to replace it + immediately, whereas a :class:`.Session` that's in ``autocommit=True`` + mode will remain without a :class:`.SessionTransaction` until the + :meth:`.Session.begin` method is called. The end of a + :class:`.SessionTransaction` can be tracked using the + :meth:`.SessionEvents.after_transaction_end` event. + + **Nesting and Subtransactions** + + Another detail of :class:`.SessionTransaction` behavior is that it is + capable of "nesting". This means that the :meth:`.Session.begin` method + can be called while an existing :class:`.SessionTransaction` is already + present, producing a new :class:`.SessionTransaction` that temporarily + replaces the parent :class:`.SessionTransaction`. When a + :class:`.SessionTransaction` is produced as nested, it assigns itself to + the :attr:`.Session.transaction` attribute, and it additionally will assign + the previous :class:`.SessionTransaction` to its :attr:`.Session.parent` + attribute. The behavior is effectively a + stack, where :attr:`.Session.transaction` refers to the current head of + the stack, and the :attr:`.SessionTransaction.parent` attribute allows + traversal up the stack until :attr:`.SessionTransaction.parent` is + ``None``, indicating the top of the stack. + + When the scope of :class:`.SessionTransaction` is ended via + :meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its + parent :class:`.SessionTransaction` back onto the + :attr:`.Session.transaction` attribute. + + The purpose of this stack is to allow nesting of + :meth:`.Session.rollback` or :meth:`.Session.commit` calls in context + with various flavors of :meth:`.Session.begin`. This nesting behavior + applies to when :meth:`.Session.begin_nested` is used to emit a + SAVEPOINT transaction, and is also used to produce a so-called + "subtransaction" which allows a block of code to use a + begin/rollback/commit sequence regardless of whether or not its enclosing + code block has begun a transaction. The :meth:`.flush` method, whether + called explicitly or via autoflush, is the primary consumer of the + "subtransaction" feature, in that it wishes to guarantee that it works + within in a transaction block regardless of whether or not the + :class:`.Session` is in transactional mode when the method is called. + + Note that the flush process that occurs within the "autoflush" feature + as well as when the :meth:`.Session.flush` method is used **always** + creates a :class:`.SessionTransaction` object. This object is normally + a subtransaction, unless the :class:`.Session` is in autocommit mode + and no transaction exists at all, in which case it's the outermost + transaction. Any event-handling logic or other inspection logic + needs to take into account whether a :class:`.SessionTransaction` + is the outermost transaction, a subtransaction, or a "nested" / SAVEPOINT + transaction. + + .. seealso:: + + :meth:`.Session.rollback` + + :meth:`.Session.commit` + + :meth:`.Session.begin` + + :meth:`.Session.begin_nested` + + :attr:`.Session.is_active` + + :meth:`.SessionEvents.after_transaction_create` + + :meth:`.SessionEvents.after_transaction_end` + + :meth:`.SessionEvents.after_commit` + + :meth:`.SessionEvents.after_rollback` + + :meth:`.SessionEvents.after_soft_rollback` + + """ + + _rollback_exception = None + + def __init__(self, session, parent=None, nested=False): + self.session = session + self._connections = {} + self._parent = parent + self.nested = nested + self._state = ACTIVE + if not parent and nested: + raise sa_exc.InvalidRequestError( + "Can't start a SAVEPOINT transaction when no existing " + "transaction is in progress") + + if self.session._enable_transaction_accounting: + self._take_snapshot() + + self.session.dispatch.after_transaction_create(self.session, self) + + @property + def parent(self): + """The parent :class:`.SessionTransaction` of this + :class:`.SessionTransaction`. + + If this attribute is ``None``, indicates this + :class:`.SessionTransaction` is at the top of the stack, and + corresponds to a real "COMMIT"/"ROLLBACK" + block. If non-``None``, then this is either a "subtransaction" + or a "nested" / SAVEPOINT transaction. If the + :attr:`.SessionTransaction.nested` attribute is ``True``, then + this is a SAVEPOINT, and if ``False``, indicates this a subtransaction. + + .. versionadded:: 1.0.16 - use ._parent for previous versions + + """ + return self._parent + + nested = False + """Indicates if this is a nested, or SAVEPOINT, transaction. + + When :attr:`.SessionTransaction.nested` is True, it is expected + that :attr:`.SessionTransaction.parent` will be True as well. + + """ + + @property + def is_active(self): + return self.session is not None and self._state is ACTIVE + + def _assert_active(self, prepared_ok=False, + rollback_ok=False, + deactive_ok=False, + closed_msg="This transaction is closed"): + if self._state is COMMITTED: + raise sa_exc.InvalidRequestError( + "This session is in 'committed' state; no further " + "SQL can be emitted within this transaction." + ) + elif self._state is PREPARED: + if not prepared_ok: + raise sa_exc.InvalidRequestError( + "This session is in 'prepared' state; no further " + "SQL can be emitted within this transaction." + ) + elif self._state is DEACTIVE: + if not deactive_ok and not rollback_ok: + if self._rollback_exception: + raise sa_exc.InvalidRequestError( + "This Session's transaction has been rolled back " + "due to a previous exception during flush." + " To begin a new transaction with this Session, " + "first issue Session.rollback()." + " Original exception was: %s" + % self._rollback_exception + ) + elif not deactive_ok: + raise sa_exc.InvalidRequestError( + "This Session's transaction has been rolled back " + "by a nested rollback() call. To begin a new " + "transaction, issue Session.rollback() first." + ) + elif self._state is CLOSED: + raise sa_exc.ResourceClosedError(closed_msg) + + @property + def _is_transaction_boundary(self): + return self.nested or not self._parent + + def connection(self, bindkey, execution_options=None, **kwargs): + self._assert_active() + bind = self.session.get_bind(bindkey, **kwargs) + return self._connection_for_bind(bind, execution_options) + + def _begin(self, nested=False): + self._assert_active() + return SessionTransaction( + self.session, self, nested=nested) + + def _iterate_self_and_parents(self, upto=None): + + current = self + result = () + while current: + result += (current, ) + if current._parent is upto: + break + elif current._parent is None: + raise sa_exc.InvalidRequestError( + "Transaction %s is not on the active transaction list" % ( + upto)) + else: + current = current._parent + + return result + + def _take_snapshot(self): + if not self._is_transaction_boundary: + self._new = self._parent._new + self._deleted = self._parent._deleted + self._dirty = self._parent._dirty + self._key_switches = self._parent._key_switches + return + + if not self.session._flushing: + self.session.flush() + + self._new = weakref.WeakKeyDictionary() + self._deleted = weakref.WeakKeyDictionary() + self._dirty = weakref.WeakKeyDictionary() + self._key_switches = weakref.WeakKeyDictionary() + + def _restore_snapshot(self, dirty_only=False): + """Restore the restoration state taken before a transaction began. + + Corresponds to a rollback. + + """ + assert self._is_transaction_boundary + + self.session._expunge_states( + set(self._new).union(self.session._new), + to_transient=True) + + for s, (oldkey, newkey) in self._key_switches.items(): + self.session.identity_map.safe_discard(s) + s.key = oldkey + self.session.identity_map.replace(s) + + for s in set(self._deleted).union(self.session._deleted): + self.session._update_impl(s, revert_deletion=True) + + assert not self.session._deleted + + for s in self.session.identity_map.all_states(): + if not dirty_only or s.modified or s in self._dirty: + s._expire(s.dict, self.session.identity_map._modified) + + def _remove_snapshot(self): + """Remove the restoration state taken before a transaction began. + + Corresponds to a commit. + + """ + assert self._is_transaction_boundary + + if not self.nested and self.session.expire_on_commit: + for s in self.session.identity_map.all_states(): + s._expire(s.dict, self.session.identity_map._modified) + + statelib.InstanceState._detach_states( + list(self._deleted), self.session) + self._deleted.clear() + elif self.nested: + self._parent._new.update(self._new) + self._parent._dirty.update(self._dirty) + self._parent._deleted.update(self._deleted) + self._parent._key_switches.update(self._key_switches) + + def _connection_for_bind(self, bind, execution_options): + self._assert_active() + + if bind in self._connections: + if execution_options: + util.warn( + "Connection is already established for the " + "given bind; execution_options ignored") + return self._connections[bind][0] + + if self._parent: + conn = self._parent._connection_for_bind(bind, execution_options) + if not self.nested: + return conn + else: + if isinstance(bind, engine.Connection): + conn = bind + if conn.engine in self._connections: + raise sa_exc.InvalidRequestError( + "Session already has a Connection associated for the " + "given Connection's Engine") + else: + conn = bind.contextual_connect() + + if execution_options: + conn = conn.execution_options(**execution_options) + + if self.session.twophase and self._parent is None: + transaction = conn.begin_twophase() + elif self.nested: + transaction = conn.begin_nested() + else: + transaction = conn.begin() + + self._connections[conn] = self._connections[conn.engine] = \ + (conn, transaction, conn is not bind) + self.session.dispatch.after_begin(self.session, self, conn) + return conn + + def prepare(self): + if self._parent is not None or not self.session.twophase: + raise sa_exc.InvalidRequestError( + "'twophase' mode not enabled, or not root transaction; " + "can't prepare.") + self._prepare_impl() + + def _prepare_impl(self): + self._assert_active() + if self._parent is None or self.nested: + self.session.dispatch.before_commit(self.session) + + stx = self.session.transaction + if stx is not self: + for subtransaction in stx._iterate_self_and_parents(upto=self): + subtransaction.commit() + + if not self.session._flushing: + for _flush_guard in range(100): + if self.session._is_clean(): + break + self.session.flush() + else: + raise exc.FlushError( + "Over 100 subsequent flushes have occurred within " + "session.commit() - is an after_flush() hook " + "creating new objects?") + + if self._parent is None and self.session.twophase: + try: + for t in set(self._connections.values()): + t[1].prepare() + except: + with util.safe_reraise(): + self.rollback() + + self._state = PREPARED + + def commit(self): + self._assert_active(prepared_ok=True) + if self._state is not PREPARED: + self._prepare_impl() + + if self._parent is None or self.nested: + for t in set(self._connections.values()): + t[1].commit() + + self._state = COMMITTED + self.session.dispatch.after_commit(self.session) + + if self.session._enable_transaction_accounting: + self._remove_snapshot() + + self.close() + return self._parent + + def rollback(self, _capture_exception=False): + self._assert_active(prepared_ok=True, rollback_ok=True) + + stx = self.session.transaction + if stx is not self: + for subtransaction in stx._iterate_self_and_parents(upto=self): + subtransaction.close() + + boundary = self + rollback_err = None + if self._state in (ACTIVE, PREPARED): + for transaction in self._iterate_self_and_parents(): + if transaction._parent is None or transaction.nested: + try: + transaction._rollback_impl() + except: + rollback_err = sys.exc_info() + transaction._state = DEACTIVE + boundary = transaction + break + else: + transaction._state = DEACTIVE + + sess = self.session + + if not rollback_err and sess._enable_transaction_accounting and \ + not sess._is_clean(): + + # if items were added, deleted, or mutated + # here, we need to re-restore the snapshot + util.warn( + "Session's state has been changed on " + "a non-active transaction - this state " + "will be discarded.") + boundary._restore_snapshot(dirty_only=boundary.nested) + + self.close() + + if self._parent and _capture_exception: + self._parent._rollback_exception = sys.exc_info()[1] + + if rollback_err: + util.reraise(*rollback_err) + + sess.dispatch.after_soft_rollback(sess, self) + + return self._parent + + def _rollback_impl(self): + try: + for t in set(self._connections.values()): + t[1].rollback() + finally: + if self.session._enable_transaction_accounting: + self._restore_snapshot(dirty_only=self.nested) + + self.session.dispatch.after_rollback(self.session) + + def close(self, invalidate=False): + self.session.transaction = self._parent + if self._parent is None: + for connection, transaction, autoclose in \ + set(self._connections.values()): + if invalidate: + connection.invalidate() + if autoclose: + connection.close() + else: + transaction.close() + + self._state = CLOSED + self.session.dispatch.after_transaction_end(self.session, self) + + if self._parent is None: + if not self.session.autocommit: + self.session.begin() + self.session = None + self._connections = None + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self._assert_active(deactive_ok=True, prepared_ok=True) + if self.session.transaction is None: + return + if type is None: + try: + self.commit() + except: + with util.safe_reraise(): + self.rollback() + else: + self.rollback() + + +class Session(_SessionClassMethods): + """Manages persistence operations for ORM-mapped objects. + + The Session's usage paradigm is described at :doc:`/orm/session`. + + + """ + + public_methods = ( + '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', + 'close', 'commit', 'connection', 'delete', 'execute', 'expire', + 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', + 'is_modified', 'bulk_save_objects', 'bulk_insert_mappings', + 'bulk_update_mappings', + 'merge', 'query', 'refresh', 'rollback', + 'scalar') + + def __init__(self, bind=None, autoflush=True, expire_on_commit=True, + _enable_transaction_accounting=True, + autocommit=False, twophase=False, + weak_identity_map=True, binds=None, extension=None, + info=None, + query_cls=query.Query): + r"""Construct a new Session. + + See also the :class:`.sessionmaker` function which is used to + generate a :class:`.Session`-producing callable with a given + set of arguments. + + :param autocommit: + + .. warning:: + + The autocommit flag is **not for general use**, and if it is + used, queries should only be invoked within the span of a + :meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing + queries outside of a demarcated transaction is a legacy mode + of usage, and can in some cases lead to concurrent connection + checkouts. + + Defaults to ``False``. When ``True``, the + :class:`.Session` does not keep a persistent transaction running, + and will acquire connections from the engine on an as-needed basis, + returning them immediately after their use. Flushes will begin and + commit (or possibly rollback) their own transaction if no + transaction is present. When using this mode, the + :meth:`.Session.begin` method is used to explicitly start + transactions. + + .. seealso:: + + :ref:`session_autocommit` + + :param autoflush: When ``True``, all query operations will issue a + :meth:`~.Session.flush` call to this ``Session`` before proceeding. + This is a convenience feature so that :meth:`~.Session.flush` need + not be called repeatedly in order for database queries to retrieve + results. It's typical that ``autoflush`` is used in conjunction + with ``autocommit=False``. In this scenario, explicit calls to + :meth:`~.Session.flush` are rarely needed; you usually only need to + call :meth:`~.Session.commit` (which flushes) to finalize changes. + + :param bind: An optional :class:`.Engine` or :class:`.Connection` to + which this ``Session`` should be bound. When specified, all SQL + operations performed by this session will execute via this + connectable. + + :param binds: An optional dictionary which contains more granular + "bind" information than the ``bind`` parameter provides. This + dictionary can map individual :class`.Table` + instances as well as :class:`~.Mapper` instances to individual + :class:`.Engine` or :class:`.Connection` objects. Operations which + proceed relative to a particular :class:`.Mapper` will consult this + dictionary for the direct :class:`.Mapper` instance as + well as the mapper's ``mapped_table`` attribute in order to locate + a connectable to use. The full resolution is described in the + :meth:`.Session.get_bind`. + Usage looks like:: + + Session = sessionmaker(binds={ + SomeMappedClass: create_engine('postgresql://engine1'), + somemapper: create_engine('postgresql://engine2'), + some_table: create_engine('postgresql://engine3'), + }) + + Also see the :meth:`.Session.bind_mapper` + and :meth:`.Session.bind_table` methods. + + :param \class_: Specify an alternate class other than + ``sqlalchemy.orm.session.Session`` which should be used by the + returned class. This is the only argument that is local to the + :class:`.sessionmaker` function, and is not sent directly to the + constructor for ``Session``. + + :param _enable_transaction_accounting: Defaults to ``True``. A + legacy-only flag which when ``False`` disables *all* 0.5-style + object accounting on transaction boundaries, including auto-expiry + of instances on rollback and commit, maintenance of the "new" and + "deleted" lists upon rollback, and autoflush of pending changes + upon :meth:`~.Session.begin`, all of which are interdependent. + + :param expire_on_commit: Defaults to ``True``. When ``True``, all + instances will be fully expired after each :meth:`~.commit`, + so that all attribute/object access subsequent to a completed + transaction will load from the most recent database state. + + :param extension: An optional + :class:`~.SessionExtension` instance, or a list + of such instances, which will receive pre- and post- commit and + flush events, as well as a post-rollback event. **Deprecated.** + Please see :class:`.SessionEvents`. + + :param info: optional dictionary of arbitrary data to be associated + with this :class:`.Session`. Is available via the + :attr:`.Session.info` attribute. Note the dictionary is copied at + construction time so that modifications to the per- + :class:`.Session` dictionary will be local to that + :class:`.Session`. + + .. versionadded:: 0.9.0 + + :param query_cls: Class which should be used to create new Query + objects, as returned by the :meth:`~.Session.query` method. + Defaults to :class:`.Query`. + + :param twophase: When ``True``, all transactions will be started as + a "two phase" transaction, i.e. using the "two phase" semantics + of the database in use along with an XID. During a + :meth:`~.commit`, after :meth:`~.flush` has been issued for all + attached databases, the :meth:`~.TwoPhaseTransaction.prepare` + method on each database's :class:`.TwoPhaseTransaction` will be + called. This allows each database to roll back the entire + transaction, before each transaction is committed. + + :param weak_identity_map: Defaults to ``True`` - when set to + ``False``, objects placed in the :class:`.Session` will be + strongly referenced until explicitly removed or the + :class:`.Session` is closed. **Deprecated** - The strong + reference identity map is legacy. See the + recipe at :ref:`session_referencing_behavior` for + an event-based approach to maintaining strong identity + references. + + """ + + if weak_identity_map: + self._identity_cls = identity.WeakInstanceDict + else: + util.warn_deprecated( + "weak_identity_map=False is deprecated. " + "See the documentation on 'Session Referencing Behavior' " + "for an event-based approach to maintaining strong identity " + "references.") + + self._identity_cls = identity.StrongInstanceDict + self.identity_map = self._identity_cls() + + self._new = {} # InstanceState->object, strong refs object + self._deleted = {} # same + self.bind = bind + self.__binds = {} + self._flushing = False + self._warn_on_events = False + self.transaction = None + self.hash_key = _new_sessionid() + self.autoflush = autoflush + self.autocommit = autocommit + self.expire_on_commit = expire_on_commit + self._enable_transaction_accounting = _enable_transaction_accounting + self.twophase = twophase + self._query_cls = query_cls + if info: + self.info.update(info) + + if extension: + for ext in util.to_list(extension): + SessionExtension._adapt_listener(self, ext) + + if binds is not None: + for key, bind in binds.items(): + self._add_bind(key, bind) + + if not self.autocommit: + self.begin() + _sessions[self.hash_key] = self + + connection_callable = None + + transaction = None + """The current active or inactive :class:`.SessionTransaction`.""" + + @util.memoized_property + def info(self): + """A user-modifiable dictionary. + + The initial value of this dictionary can be populated using the + ``info`` argument to the :class:`.Session` constructor or + :class:`.sessionmaker` constructor or factory methods. The dictionary + here is always local to this :class:`.Session` and can be modified + independently of all other :class:`.Session` objects. + + .. versionadded:: 0.9.0 + + """ + return {} + + def begin(self, subtransactions=False, nested=False): + """Begin a transaction on this :class:`.Session`. + + If this Session is already within a transaction, either a plain + transaction or nested transaction, an error is raised, unless + ``subtransactions=True`` or ``nested=True`` is specified. + + The ``subtransactions=True`` flag indicates that this + :meth:`~.Session.begin` can create a subtransaction if a transaction + is already in progress. For documentation on subtransactions, please + see :ref:`session_subtransactions`. + + The ``nested`` flag begins a SAVEPOINT transaction and is equivalent + to calling :meth:`~.Session.begin_nested`. For documentation on + SAVEPOINT transactions, please see :ref:`session_begin_nested`. + + """ + if self.transaction is not None: + if subtransactions or nested: + self.transaction = self.transaction._begin( + nested=nested) + else: + raise sa_exc.InvalidRequestError( + "A transaction is already begun. Use " + "subtransactions=True to allow subtransactions.") + else: + self.transaction = SessionTransaction( + self, nested=nested) + return self.transaction # needed for __enter__/__exit__ hook + + def begin_nested(self): + """Begin a `nested` transaction on this Session. + + The target database(s) must support SQL SAVEPOINTs or a + SQLAlchemy-supported vendor implementation of the idea. + + For documentation on SAVEPOINT + transactions, please see :ref:`session_begin_nested`. + + """ + return self.begin(nested=True) + + def rollback(self): + """Rollback the current transaction in progress. + + If no transaction is in progress, this method is a pass-through. + + This method rolls back the current transaction or nested transaction + regardless of subtransactions being in effect. All subtransactions up + to the first real transaction are closed. Subtransactions occur when + :meth:`.begin` is called multiple times. + + .. seealso:: + + :ref:`session_rollback` + + """ + if self.transaction is None: + pass + else: + self.transaction.rollback() + + def commit(self): + """Flush pending changes and commit the current transaction. + + If no transaction is in progress, this method raises an + :exc:`~sqlalchemy.exc.InvalidRequestError`. + + By default, the :class:`.Session` also expires all database + loaded state on all ORM-managed attributes after transaction commit. + This so that subsequent operations load the most recent + data from the database. This behavior can be disabled using + the ``expire_on_commit=False`` option to :class:`.sessionmaker` or + the :class:`.Session` constructor. + + If a subtransaction is in effect (which occurs when begin() is called + multiple times), the subtransaction will be closed, and the next call + to ``commit()`` will operate on the enclosing transaction. + + When using the :class:`.Session` in its default mode of + ``autocommit=False``, a new transaction will + be begun immediately after the commit, but note that the newly begun + transaction does *not* use any connection resources until the first + SQL is actually emitted. + + .. seealso:: + + :ref:`session_committing` + + """ + if self.transaction is None: + if not self.autocommit: + self.begin() + else: + raise sa_exc.InvalidRequestError("No transaction is begun.") + + self.transaction.commit() + + def prepare(self): + """Prepare the current transaction in progress for two phase commit. + + If no transaction is in progress, this method raises an + :exc:`~sqlalchemy.exc.InvalidRequestError`. + + Only root transactions of two phase sessions can be prepared. If the + current transaction is not such, an + :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. + + """ + if self.transaction is None: + if not self.autocommit: + self.begin() + else: + raise sa_exc.InvalidRequestError("No transaction is begun.") + + self.transaction.prepare() + + def connection(self, mapper=None, clause=None, + bind=None, + close_with_result=False, + execution_options=None, + **kw): + r"""Return a :class:`.Connection` object corresponding to this + :class:`.Session` object's transactional state. + + If this :class:`.Session` is configured with ``autocommit=False``, + either the :class:`.Connection` corresponding to the current + transaction is returned, or if no transaction is in progress, a new + one is begun and the :class:`.Connection` returned (note that no + transactional state is established with the DBAPI until the first + SQL statement is emitted). + + Alternatively, if this :class:`.Session` is configured with + ``autocommit=True``, an ad-hoc :class:`.Connection` is returned + using :meth:`.Engine.contextual_connect` on the underlying + :class:`.Engine`. + + Ambiguity in multi-bind or unbound :class:`.Session` objects can be + resolved through any of the optional keyword arguments. This + ultimately makes usage of the :meth:`.get_bind` method for resolution. + + :param bind: + Optional :class:`.Engine` to be used as the bind. If + this engine is already involved in an ongoing transaction, + that connection will be used. This argument takes precedence + over ``mapper``, ``clause``. + + :param mapper: + Optional :func:`.mapper` mapped class, used to identify + the appropriate bind. This argument takes precedence over + ``clause``. + + :param clause: + A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, + :func:`~.sql.expression.text`, + etc.) which will be used to locate a bind, if a bind + cannot otherwise be identified. + + :param close_with_result: Passed to :meth:`.Engine.connect`, + indicating the :class:`.Connection` should be considered + "single use", automatically closing when the first result set is + closed. This flag only has an effect if this :class:`.Session` is + configured with ``autocommit=True`` and does not already have a + transaction in progress. + + :param execution_options: a dictionary of execution options that will + be passed to :meth:`.Connection.execution_options`, **when the + connection is first procured only**. If the connection is already + present within the :class:`.Session`, a warning is emitted and + the arguments are ignored. + + .. versionadded:: 0.9.9 + + .. seealso:: + + :ref:`session_transaction_isolation` + + :param \**kw: + Additional keyword arguments are sent to :meth:`get_bind()`, + allowing additional arguments to be passed to custom + implementations of :meth:`get_bind`. + + """ + if bind is None: + bind = self.get_bind(mapper, clause=clause, **kw) + + return self._connection_for_bind(bind, + close_with_result=close_with_result, + execution_options=execution_options) + + def _connection_for_bind(self, engine, execution_options=None, **kw): + if self.transaction is not None: + return self.transaction._connection_for_bind( + engine, execution_options) + else: + conn = engine.contextual_connect(**kw) + if execution_options: + conn = conn.execution_options(**execution_options) + return conn + + def execute(self, clause, params=None, mapper=None, bind=None, **kw): + r"""Execute a SQL expression construct or string statement within + the current transaction. + + Returns a :class:`.ResultProxy` representing + results of the statement execution, in the same manner as that of an + :class:`.Engine` or + :class:`.Connection`. + + E.g.:: + + result = session.execute( + user_table.select().where(user_table.c.id == 5) + ) + + :meth:`~.Session.execute` accepts any executable clause construct, + such as :func:`~.sql.expression.select`, + :func:`~.sql.expression.insert`, + :func:`~.sql.expression.update`, + :func:`~.sql.expression.delete`, and + :func:`~.sql.expression.text`. Plain SQL strings can be passed + as well, which in the case of :meth:`.Session.execute` only + will be interpreted the same as if it were passed via a + :func:`~.expression.text` construct. That is, the following usage:: + + result = session.execute( + "SELECT * FROM user WHERE id=:param", + {"param":5} + ) + + is equivalent to:: + + from sqlalchemy import text + result = session.execute( + text("SELECT * FROM user WHERE id=:param"), + {"param":5} + ) + + The second positional argument to :meth:`.Session.execute` is an + optional parameter set. Similar to that of + :meth:`.Connection.execute`, whether this is passed as a single + dictionary, or a list of dictionaries, determines whether the DBAPI + cursor's ``execute()`` or ``executemany()`` is used to execute the + statement. An INSERT construct may be invoked for a single row:: + + result = session.execute( + users.insert(), {"id": 7, "name": "somename"}) + + or for multiple rows:: + + result = session.execute(users.insert(), [ + {"id": 7, "name": "somename7"}, + {"id": 8, "name": "somename8"}, + {"id": 9, "name": "somename9"} + ]) + + The statement is executed within the current transactional context of + this :class:`.Session`. The :class:`.Connection` which is used + to execute the statement can also be acquired directly by + calling the :meth:`.Session.connection` method. Both methods use + a rule-based resolution scheme in order to determine the + :class:`.Connection`, which in the average case is derived directly + from the "bind" of the :class:`.Session` itself, and in other cases + can be based on the :func:`.mapper` + and :class:`.Table` objects passed to the method; see the + documentation for :meth:`.Session.get_bind` for a full description of + this scheme. + + The :meth:`.Session.execute` method does *not* invoke autoflush. + + The :class:`.ResultProxy` returned by the :meth:`.Session.execute` + method is returned with the "close_with_result" flag set to true; + the significance of this flag is that if this :class:`.Session` is + autocommitting and does not have a transaction-dedicated + :class:`.Connection` available, a temporary :class:`.Connection` is + established for the statement execution, which is closed (meaning, + returned to the connection pool) when the :class:`.ResultProxy` has + consumed all available data. This applies *only* when the + :class:`.Session` is configured with autocommit=True and no + transaction has been started. + + :param clause: + An executable statement (i.e. an :class:`.Executable` expression + such as :func:`.expression.select`) or string SQL statement + to be executed. + + :param params: + Optional dictionary, or list of dictionaries, containing + bound parameter values. If a single dictionary, single-row + execution occurs; if a list of dictionaries, an + "executemany" will be invoked. The keys in each dictionary + must correspond to parameter names present in the statement. + + :param mapper: + Optional :func:`.mapper` or mapped class, used to identify + the appropriate bind. This argument takes precedence over + ``clause`` when locating a bind. See :meth:`.Session.get_bind` + for more details. + + :param bind: + Optional :class:`.Engine` to be used as the bind. If + this engine is already involved in an ongoing transaction, + that connection will be used. This argument takes + precedence over ``mapper`` and ``clause`` when locating + a bind. + + :param \**kw: + Additional keyword arguments are sent to :meth:`.Session.get_bind()` + to allow extensibility of "bind" schemes. + + .. seealso:: + + :ref:`sqlexpression_toplevel` - Tutorial on using Core SQL + constructs. + + :ref:`connections_toplevel` - Further information on direct + statement execution. + + :meth:`.Connection.execute` - core level statement execution + method, which is :meth:`.Session.execute` ultimately uses + in order to execute the statement. + + """ + clause = expression._literal_as_text(clause) + + if bind is None: + bind = self.get_bind(mapper, clause=clause, **kw) + + return self._connection_for_bind( + bind, close_with_result=True).execute(clause, params or {}) + + def scalar(self, clause, params=None, mapper=None, bind=None, **kw): + """Like :meth:`~.Session.execute` but return a scalar result.""" + + return self.execute( + clause, params=params, mapper=mapper, bind=bind, **kw).scalar() + + def close(self): + """Close this Session. + + This clears all items and ends any transaction in progress. + + If this session were created with ``autocommit=False``, a new + transaction is immediately begun. Note that this new transaction does + not use any connection resources until they are first needed. + + """ + self._close_impl(invalidate=False) + + def invalidate(self): + """Close this Session, using connection invalidation. + + This is a variant of :meth:`.Session.close` that will additionally + ensure that the :meth:`.Connection.invalidate` method will be called + on all :class:`.Connection` objects. This can be called when + the database is known to be in a state where the connections are + no longer safe to be used. + + E.g.:: + + try: + sess = Session() + sess.add(User()) + sess.commit() + except gevent.Timeout: + sess.invalidate() + raise + except: + sess.rollback() + raise + + This clears all items and ends any transaction in progress. + + If this session were created with ``autocommit=False``, a new + transaction is immediately begun. Note that this new transaction does + not use any connection resources until they are first needed. + + .. versionadded:: 0.9.9 + + """ + self._close_impl(invalidate=True) + + def _close_impl(self, invalidate): + self.expunge_all() + if self.transaction is not None: + for transaction in self.transaction._iterate_self_and_parents(): + transaction.close(invalidate) + + def expunge_all(self): + """Remove all object instances from this ``Session``. + + This is equivalent to calling ``expunge(obj)`` on all objects in this + ``Session``. + + """ + + all_states = self.identity_map.all_states() + list(self._new) + self.identity_map = self._identity_cls() + self._new = {} + self._deleted = {} + + statelib.InstanceState._detach_states( + all_states, self + ) + + def _add_bind(self, key, bind): + try: + insp = inspect(key) + except sa_exc.NoInspectionAvailable: + if not isinstance(key, type): + raise sa_exc.ArgumentError( + "Not an acceptable bind target: %s" % key) + else: + self.__binds[key] = bind + else: + if insp.is_selectable: + self.__binds[insp] = bind + elif insp.is_mapper: + self.__binds[insp.class_] = bind + for selectable in insp._all_tables: + self.__binds[selectable] = bind + else: + raise sa_exc.ArgumentError( + "Not an acceptable bind target: %s" % key) + + def bind_mapper(self, mapper, bind): + """Associate a :class:`.Mapper` with a "bind", e.g. a :class:`.Engine` + or :class:`.Connection`. + + The given mapper is added to a lookup used by the + :meth:`.Session.get_bind` method. + + """ + self._add_bind(mapper, bind) + + def bind_table(self, table, bind): + """Associate a :class:`.Table` with a "bind", e.g. a :class:`.Engine` + or :class:`.Connection`. + + The given mapper is added to a lookup used by the + :meth:`.Session.get_bind` method. + + """ + self._add_bind(table, bind) + + def get_bind(self, mapper=None, clause=None): + """Return a "bind" to which this :class:`.Session` is bound. + + The "bind" is usually an instance of :class:`.Engine`, + except in the case where the :class:`.Session` has been + explicitly bound directly to a :class:`.Connection`. + + For a multiply-bound or unbound :class:`.Session`, the + ``mapper`` or ``clause`` arguments are used to determine the + appropriate bind to return. + + Note that the "mapper" argument is usually present + when :meth:`.Session.get_bind` is called via an ORM + operation such as a :meth:`.Session.query`, each + individual INSERT/UPDATE/DELETE operation within a + :meth:`.Session.flush`, call, etc. + + The order of resolution is: + + 1. if mapper given and session.binds is present, + locate a bind based on mapper. + 2. if clause given and session.binds is present, + locate a bind based on :class:`.Table` objects + found in the given clause present in session.binds. + 3. if session.bind is present, return that. + 4. if clause given, attempt to return a bind + linked to the :class:`.MetaData` ultimately + associated with the clause. + 5. if mapper given, attempt to return a bind + linked to the :class:`.MetaData` ultimately + associated with the :class:`.Table` or other + selectable to which the mapper is mapped. + 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError` + is raised. + + :param mapper: + Optional :func:`.mapper` mapped class or instance of + :class:`.Mapper`. The bind can be derived from a :class:`.Mapper` + first by consulting the "binds" map associated with this + :class:`.Session`, and secondly by consulting the :class:`.MetaData` + associated with the :class:`.Table` to which the :class:`.Mapper` + is mapped for a bind. + + :param clause: + A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, + :func:`~.sql.expression.text`, + etc.). If the ``mapper`` argument is not present or could not + produce a bind, the given expression construct will be searched + for a bound element, typically a :class:`.Table` associated with + bound :class:`.MetaData`. + + """ + + if mapper is clause is None: + if self.bind: + return self.bind + else: + raise sa_exc.UnboundExecutionError( + "This session is not bound to a single Engine or " + "Connection, and no context was provided to locate " + "a binding.") + + if mapper is not None: + try: + mapper = inspect(mapper) + except sa_exc.NoInspectionAvailable: + if isinstance(mapper, type): + raise exc.UnmappedClassError(mapper) + else: + raise + + if self.__binds: + if mapper: + for cls in mapper.class_.__mro__: + if cls in self.__binds: + return self.__binds[cls] + if clause is None: + clause = mapper.mapped_table + + if clause is not None: + for t in sql_util.find_tables(clause, include_crud=True): + if t in self.__binds: + return self.__binds[t] + + if self.bind: + return self.bind + + if isinstance(clause, sql.expression.ClauseElement) and clause.bind: + return clause.bind + + if mapper and mapper.mapped_table.bind: + return mapper.mapped_table.bind + + context = [] + if mapper is not None: + context.append('mapper %s' % mapper) + if clause is not None: + context.append('SQL expression') + + raise sa_exc.UnboundExecutionError( + "Could not locate a bind configured on %s or this Session" % ( + ', '.join(context))) + + def query(self, *entities, **kwargs): + """Return a new :class:`.Query` object corresponding to this + :class:`.Session`.""" + + return self._query_cls(entities, self, **kwargs) + + @property + @util.contextmanager + def no_autoflush(self): + """Return a context manager that disables autoflush. + + e.g.:: + + with session.no_autoflush: + + some_object = SomeClass() + session.add(some_object) + # won't autoflush + some_object.related_thing = session.query(SomeRelated).first() + + Operations that proceed within the ``with:`` block + will not be subject to flushes occurring upon query + access. This is useful when initializing a series + of objects which involve existing database queries, + where the uncompleted object should not yet be flushed. + + .. versionadded:: 0.7.6 + + """ + autoflush = self.autoflush + self.autoflush = False + try: + yield self + finally: + self.autoflush = autoflush + + def _autoflush(self): + if self.autoflush and not self._flushing: + try: + self.flush() + except sa_exc.StatementError as e: + # note we are reraising StatementError as opposed to + # raising FlushError with "chaining" to remain compatible + # with code that catches StatementError, IntegrityError, + # etc. + e.add_detail( + "raised as a result of Query-invoked autoflush; " + "consider using a session.no_autoflush block if this " + "flush is occurring prematurely") + util.raise_from_cause(e) + + def refresh(self, instance, attribute_names=None, lockmode=None): + """Expire and refresh the attributes on the given instance. + + A query will be issued to the database and all attributes will be + refreshed with their current database value. + + Lazy-loaded relational attributes will remain lazily loaded, so that + the instance-wide refresh operation will be followed immediately by + the lazy load of that attribute. + + Eagerly-loaded relational attributes will eagerly load within the + single refresh operation. + + Note that a highly isolated transaction will return the same values as + were previously read in that same transaction, regardless of changes + in database state outside of that transaction - usage of + :meth:`~Session.refresh` usually only makes sense if non-ORM SQL + statement were emitted in the ongoing transaction, or if autocommit + mode is turned on. + + :param attribute_names: optional. An iterable collection of + string attribute names indicating a subset of attributes to + be refreshed. + + :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query` + as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.expire_all` + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + + self._expire_state(state, attribute_names) + + if loading.load_on_ident( + self.query(object_mapper(instance)), + state.key, refresh_state=state, + lockmode=lockmode, + only_load_props=attribute_names) is None: + raise sa_exc.InvalidRequestError( + "Could not refresh instance '%s'" % + instance_str(instance)) + + def expire_all(self): + """Expires all persistent instances within this Session. + + When any attributes on a persistent instance is next accessed, + a query will be issued using the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire individual objects and individual attributes + on those objects, use :meth:`Session.expire`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire_all` should not be needed when + autocommit is ``False``, assuming the transaction is isolated. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + """ + for state in self.identity_map.all_states(): + state._expire(state.dict, self.identity_map._modified) + + def expire(self, instance, attribute_names=None): + """Expire the attributes on an instance. + + Marks the attributes of an instance as out of date. When an expired + attribute is next accessed, a query will be issued to the + :class:`.Session` object's current transactional context in order to + load all expired attributes for the given instance. Note that + a highly isolated transaction will return the same values as were + previously read in that same transaction, regardless of changes + in database state outside of that transaction. + + To expire all objects in the :class:`.Session` simultaneously, + use :meth:`Session.expire_all`. + + The :class:`.Session` object's default behavior is to + expire all state whenever the :meth:`Session.rollback` + or :meth:`Session.commit` methods are called, so that new + state can be loaded for the new transaction. For this reason, + calling :meth:`Session.expire` only makes sense for the specific + case that a non-ORM SQL statement was emitted in the current + transaction. + + :param instance: The instance to be refreshed. + :param attribute_names: optional list of string attribute names + indicating a subset of attributes to be expired. + + .. seealso:: + + :ref:`session_expire` - introductory material + + :meth:`.Session.expire` + + :meth:`.Session.refresh` + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + self._expire_state(state, attribute_names) + + def _expire_state(self, state, attribute_names): + self._validate_persistent(state) + if attribute_names: + state._expire_attributes(state.dict, attribute_names) + else: + # pre-fetch the full cascade since the expire is going to + # remove associations + cascaded = list(state.manager.mapper.cascade_iterator( + 'refresh-expire', state)) + self._conditional_expire(state) + for o, m, st_, dct_ in cascaded: + self._conditional_expire(st_) + + def _conditional_expire(self, state): + """Expire a state if persistent, else expunge if pending""" + + if state.key: + state._expire(state.dict, self.identity_map._modified) + elif state in self._new: + self._new.pop(state) + state._detach(self) + + @util.deprecated("0.7", "The non-weak-referencing identity map " + "feature is no longer needed.") + def prune(self): + """Remove unreferenced instances cached in the identity map. + + Note that this method is only meaningful if "weak_identity_map" is set + to False. The default weak identity map is self-pruning. + + Removes any object in this Session's identity map that is not + referenced in user code, modified, new or scheduled for deletion. + Returns the number of objects pruned. + + """ + return self.identity_map.prune() + + def expunge(self, instance): + """Remove the `instance` from this ``Session``. + + This will free all internal references to the instance. Cascading + will be applied according to the *expunge* cascade rule. + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + if state.session_id is not self.hash_key: + raise sa_exc.InvalidRequestError( + "Instance %s is not present in this Session" % + state_str(state)) + + cascaded = list(state.manager.mapper.cascade_iterator( + 'expunge', state)) + self._expunge_states( + [state] + [st_ for o, m, st_, dct_ in cascaded] + ) + + def _expunge_states(self, states, to_transient=False): + for state in states: + if state in self._new: + self._new.pop(state) + elif self.identity_map.contains_state(state): + self.identity_map.safe_discard(state) + self._deleted.pop(state, None) + elif self.transaction: + # state is "detached" from being deleted, but still present + # in the transaction snapshot + self.transaction._deleted.pop(state, None) + statelib.InstanceState._detach_states( + states, self, to_transient=to_transient) + + def _register_newly_persistent(self, states): + pending_to_persistent = self.dispatch.pending_to_persistent or None + for state in states: + mapper = _state_mapper(state) + + # prevent against last minute dereferences of the object + obj = state.obj() + if obj is not None: + + instance_key = mapper._identity_key_from_state(state) + + if _none_set.intersection(instance_key[1]) and \ + not mapper.allow_partial_pks or \ + _none_set.issuperset(instance_key[1]): + raise exc.FlushError( + "Instance %s has a NULL identity key. If this is an " + "auto-generated value, check that the database table " + "allows generation of new primary key values, and " + "that the mapped Column object is configured to " + "expect these generated values. Ensure also that " + "this flush() is not occurring at an inappropriate " + "time, such aswithin a load() event." + % state_str(state) + ) + + if state.key is None: + state.key = instance_key + elif state.key != instance_key: + # primary key switch. use safe_discard() in case another + # state has already replaced this one in the identity + # map (see test/orm/test_naturalpks.py ReversePKsTest) + self.identity_map.safe_discard(state) + if state in self.transaction._key_switches: + orig_key = self.transaction._key_switches[state][0] + else: + orig_key = state.key + self.transaction._key_switches[state] = ( + orig_key, instance_key) + state.key = instance_key + + self.identity_map.replace(state) + + statelib.InstanceState._commit_all_states( + ((state, state.dict) for state in states), + self.identity_map + ) + + self._register_altered(states) + + if pending_to_persistent is not None: + for state in states: + pending_to_persistent(self, state.obj()) + + # remove from new last, might be the last strong ref + for state in set(states).intersection(self._new): + self._new.pop(state) + + def _register_altered(self, states): + if self._enable_transaction_accounting and self.transaction: + for state in states: + if state in self._new: + self.transaction._new[state] = True + else: + self.transaction._dirty[state] = True + + def _remove_newly_deleted(self, states): + persistent_to_deleted = self.dispatch.persistent_to_deleted or None + for state in states: + if self._enable_transaction_accounting and self.transaction: + self.transaction._deleted[state] = True + + if persistent_to_deleted is not None: + # get a strong reference before we pop out of + # self._deleted + obj = state.obj() + + self.identity_map.safe_discard(state) + self._deleted.pop(state, None) + state._deleted = True + # can't call state._detach() here, because this state + # is still in the transaction snapshot and needs to be + # tracked as part of that + if persistent_to_deleted is not None: + persistent_to_deleted(self, obj) + + def add(self, instance, _warn=True): + """Place an object in the ``Session``. + + Its state will be persisted to the database on the next flush + operation. + + Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` + is ``expunge()``. + + """ + if _warn and self._warn_on_events: + self._flush_warning("Session.add()") + + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + + self._save_or_update_state(state) + + def add_all(self, instances): + """Add the given collection of instances to this ``Session``.""" + + if self._warn_on_events: + self._flush_warning("Session.add_all()") + + for instance in instances: + self.add(instance, _warn=False) + + def _save_or_update_state(self, state): + self._save_or_update_impl(state) + + mapper = _state_mapper(state) + for o, m, st_, dct_ in mapper.cascade_iterator( + 'save-update', + state, + halt_on=self._contains_state): + self._save_or_update_impl(st_) + + def delete(self, instance): + """Mark an instance as deleted. + + The database delete operation occurs upon ``flush()``. + + """ + if self._warn_on_events: + self._flush_warning("Session.delete()") + + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + + self._delete_impl(state, instance, head=True) + + def _delete_impl(self, state, obj, head): + + if state.key is None: + if head: + raise sa_exc.InvalidRequestError( + "Instance '%s' is not persisted" % + state_str(state)) + else: + return + + to_attach = self._before_attach(state, obj) + + if state in self._deleted: + return + + self.identity_map.add(state) + + if to_attach: + self._after_attach(state, obj) + + if head: + # grab the cascades before adding the item to the deleted list + # so that autoflush does not delete the item + # the strong reference to the instance itself is significant here + cascade_states = list(state.manager.mapper.cascade_iterator( + 'delete', state)) + + self._deleted[state] = obj + + if head: + for o, m, st_, dct_ in cascade_states: + self._delete_impl(st_, o, False) + + def merge(self, instance, load=True): + """Copy the state of a given instance into a corresponding instance + within this :class:`.Session`. + + :meth:`.Session.merge` examines the primary key attributes of the + source instance, and attempts to reconcile it with an instance of the + same primary key in the session. If not found locally, it attempts + to load the object from the database based on primary key, and if + none can be located, creates a new instance. The state of each + attribute on the source instance is then copied to the target + instance. The resulting target instance is then returned by the + method; the original source instance is left unmodified, and + un-associated with the :class:`.Session` if not already. + + This operation cascades to associated instances if the association is + mapped with ``cascade="merge"``. + + See :ref:`unitofwork_merging` for a detailed discussion of merging. + + .. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile + pending objects with overlapping primary keys in the same way + as persistent. See :ref:`change_3601` for discussion. + + :param instance: Instance to be merged. + :param load: Boolean, when False, :meth:`.merge` switches into + a "high performance" mode which causes it to forego emitting history + events as well as all database access. This flag is used for + cases such as transferring graphs of objects into a :class:`.Session` + from a second level cache, or to transfer just-loaded objects + into the :class:`.Session` owned by a worker thread or process + without re-querying the database. + + The ``load=False`` use case adds the caveat that the given + object has to be in a "clean" state, that is, has no pending changes + to be flushed - even if the incoming object is detached from any + :class:`.Session`. This is so that when + the merge operation populates local attributes and + cascades to related objects and + collections, the values can be "stamped" onto the + target object as is, without generating any history or attribute + events, and without the need to reconcile the incoming data with + any existing related objects or collections that might not + be loaded. The resulting objects from ``load=False`` are always + produced as "clean", so it is only appropriate that the given objects + should be "clean" as well, else this suggests a mis-use of the + method. + + + """ + + if self._warn_on_events: + self._flush_warning("Session.merge()") + + _recursive = {} + _resolve_conflict_map = {} + + if load: + # flush current contents if we expect to load data + self._autoflush() + + object_mapper(instance) # verify mapped + autoflush = self.autoflush + try: + self.autoflush = False + return self._merge( + attributes.instance_state(instance), + attributes.instance_dict(instance), + load=load, _recursive=_recursive, + _resolve_conflict_map=_resolve_conflict_map) + finally: + self.autoflush = autoflush + + def _merge(self, state, state_dict, load=True, _recursive=None, + _resolve_conflict_map=None): + mapper = _state_mapper(state) + if state in _recursive: + return _recursive[state] + + new_instance = False + key = state.key + + if key is None: + if not load: + raise sa_exc.InvalidRequestError( + "merge() with load=False option does not support " + "objects transient (i.e. unpersisted) objects. flush() " + "all changes on mapped instances before merging with " + "load=False.") + key = mapper._identity_key_from_state(state) + key_is_persistent = attributes.NEVER_SET not in key[1] + else: + key_is_persistent = True + + if key in self.identity_map: + merged = self.identity_map[key] + elif key_is_persistent and key in _resolve_conflict_map: + merged = _resolve_conflict_map[key] + + elif not load: + if state.modified: + raise sa_exc.InvalidRequestError( + "merge() with load=False option does not support " + "objects marked as 'dirty'. flush() all changes on " + "mapped instances before merging with load=False.") + merged = mapper.class_manager.new_instance() + merged_state = attributes.instance_state(merged) + merged_state.key = key + self._update_impl(merged_state) + new_instance = True + + elif key_is_persistent and ( + not _none_set.intersection(key[1]) or + (mapper.allow_partial_pks and + not _none_set.issuperset(key[1]))): + merged = self.query(mapper.class_).get(key[1]) + else: + merged = None + + if merged is None: + merged = mapper.class_manager.new_instance() + merged_state = attributes.instance_state(merged) + merged_dict = attributes.instance_dict(merged) + new_instance = True + self._save_or_update_state(merged_state) + else: + merged_state = attributes.instance_state(merged) + merged_dict = attributes.instance_dict(merged) + + _recursive[state] = merged + _resolve_conflict_map[key] = merged + + # check that we didn't just pull the exact same + # state out. + if state is not merged_state: + # version check if applicable + if mapper.version_id_col is not None: + existing_version = mapper._get_state_attr_by_column( + state, + state_dict, + mapper.version_id_col, + passive=attributes.PASSIVE_NO_INITIALIZE) + + merged_version = mapper._get_state_attr_by_column( + merged_state, + merged_dict, + mapper.version_id_col, + passive=attributes.PASSIVE_NO_INITIALIZE) + + if existing_version is not attributes.PASSIVE_NO_RESULT and \ + merged_version is not attributes.PASSIVE_NO_RESULT and \ + existing_version != merged_version: + raise exc.StaleDataError( + "Version id '%s' on merged state %s " + "does not match existing version '%s'. " + "Leave the version attribute unset when " + "merging to update the most recent version." + % ( + existing_version, + state_str(merged_state), + merged_version + )) + + merged_state.load_path = state.load_path + merged_state.load_options = state.load_options + + # since we are copying load_options, we need to copy + # the callables_ that would have been generated by those + # load_options. + # assumes that the callables we put in state.callables_ + # are not instance-specific (which they should not be) + merged_state._copy_callables(state) + + for prop in mapper.iterate_properties: + prop.merge(self, state, state_dict, + merged_state, merged_dict, + load, _recursive, _resolve_conflict_map) + + if not load: + # remove any history + merged_state._commit_all(merged_dict, self.identity_map) + + if new_instance: + merged_state.manager.dispatch.load(merged_state, None) + return merged + + def _validate_persistent(self, state): + if not self.identity_map.contains_state(state): + raise sa_exc.InvalidRequestError( + "Instance '%s' is not persistent within this Session" % + state_str(state)) + + def _save_impl(self, state): + if state.key is not None: + raise sa_exc.InvalidRequestError( + "Object '%s' already has an identity - " + "it can't be registered as pending" % state_str(state)) + + obj = state.obj() + to_attach = self._before_attach(state, obj) + if state not in self._new: + self._new[state] = obj + state.insert_order = len(self._new) + if to_attach: + self._after_attach(state, obj) + + def _update_impl(self, state, revert_deletion=False): + if state.key is None: + raise sa_exc.InvalidRequestError( + "Instance '%s' is not persisted" % + state_str(state)) + + if state._deleted: + if revert_deletion: + if not state._attached: + return + del state._deleted + else: + raise sa_exc.InvalidRequestError( + "Instance '%s' has been deleted. " + "Use the make_transient() " + "function to send this object back " + "to the transient state." % + state_str(state) + ) + + obj = state.obj() + + # check for late gc + if obj is None: + return + + to_attach = self._before_attach(state, obj) + + self._deleted.pop(state, None) + if revert_deletion: + self.identity_map.replace(state) + else: + self.identity_map.add(state) + + if to_attach: + self._after_attach(state, obj) + elif revert_deletion: + self.dispatch.deleted_to_persistent(self, obj) + + def _save_or_update_impl(self, state): + if state.key is None: + self._save_impl(state) + else: + self._update_impl(state) + + def enable_relationship_loading(self, obj): + """Associate an object with this :class:`.Session` for related + object loading. + + .. warning:: + + :meth:`.enable_relationship_loading` exists to serve special + use cases and is not recommended for general use. + + Accesses of attributes mapped with :func:`.relationship` + will attempt to load a value from the database using this + :class:`.Session` as the source of connectivity. The values + will be loaded based on foreign key values present on this + object - it follows that this functionality + generally only works for many-to-one-relationships. + + The object will be attached to this session, but will + **not** participate in any persistence operations; its state + for almost all purposes will remain either "transient" or + "detached", except for the case of relationship loading. + + Also note that backrefs will often not work as expected. + Altering a relationship-bound attribute on the target object + may not fire off a backref event, if the effective value + is what was already loaded from a foreign-key-holding value. + + The :meth:`.Session.enable_relationship_loading` method is + similar to the ``load_on_pending`` flag on :func:`.relationship`. + Unlike that flag, :meth:`.Session.enable_relationship_loading` allows + an object to remain transient while still being able to load + related items. + + To make a transient object associated with a :class:`.Session` + via :meth:`.Session.enable_relationship_loading` pending, add + it to the :class:`.Session` using :meth:`.Session.add` normally. + + :meth:`.Session.enable_relationship_loading` does not improve + behavior when the ORM is used normally - object references should be + constructed at the object level, not at the foreign key level, so + that they are present in an ordinary way before flush() + proceeds. This method is not intended for general use. + + .. versionadded:: 0.8 + + .. seealso:: + + ``load_on_pending`` at :func:`.relationship` - this flag + allows per-relationship loading of many-to-ones on items that + are pending. + + """ + state = attributes.instance_state(obj) + to_attach = self._before_attach(state, obj) + state._load_pending = True + if to_attach: + self._after_attach(state, obj) + + def _before_attach(self, state, obj): + if state.session_id == self.hash_key: + return False + + if state.session_id and state.session_id in _sessions: + raise sa_exc.InvalidRequestError( + "Object '%s' is already attached to session '%s' " + "(this is '%s')" % (state_str(state), + state.session_id, self.hash_key)) + + self.dispatch.before_attach(self, obj) + + return True + + def _after_attach(self, state, obj): + state.session_id = self.hash_key + if state.modified and state._strong_obj is None: + state._strong_obj = obj + self.dispatch.after_attach(self, obj) + + if state.key: + self.dispatch.detached_to_persistent(self, obj) + else: + self.dispatch.transient_to_pending(self, obj) + + def __contains__(self, instance): + """Return True if the instance is associated with this session. + + The instance may be pending or persistent within the Session for a + result of True. + + """ + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + return self._contains_state(state) + + def __iter__(self): + """Iterate over all pending or persistent instances within this + Session. + + """ + return iter( + list(self._new.values()) + list(self.identity_map.values())) + + def _contains_state(self, state): + return state in self._new or self.identity_map.contains_state(state) + + def flush(self, objects=None): + """Flush all the object changes to the database. + + Writes out all pending object creations, deletions and modifications + to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are + automatically ordered by the Session's unit of work dependency + solver. + + Database operations will be issued in the current transactional + context and do not affect the state of the transaction, unless an + error occurs, in which case the entire transaction is rolled back. + You may flush() as often as you like within a transaction to move + changes from Python to the database's transaction buffer. + + For ``autocommit`` Sessions with no active manual transaction, flush() + will create a transaction on the fly that surrounds the entire set of + operations into the flush. + + :param objects: Optional; restricts the flush operation to operate + only on elements that are in the given collection. + + This feature is for an extremely narrow set of use cases where + particular objects may need to be operated upon before the + full flush() occurs. It is not intended for general use. + + """ + + if self._flushing: + raise sa_exc.InvalidRequestError("Session is already flushing") + + if self._is_clean(): + return + try: + self._flushing = True + self._flush(objects) + finally: + self._flushing = False + + def _flush_warning(self, method): + util.warn( + "Usage of the '%s' operation is not currently supported " + "within the execution stage of the flush process. " + "Results may not be consistent. Consider using alternative " + "event listeners or connection-level operations instead." + % method) + + def _is_clean(self): + return not self.identity_map.check_modified() and \ + not self._deleted and \ + not self._new + + def _flush(self, objects=None): + + dirty = self._dirty_states + if not dirty and not self._deleted and not self._new: + self.identity_map._modified.clear() + return + + flush_context = UOWTransaction(self) + + if self.dispatch.before_flush: + self.dispatch.before_flush(self, flush_context, objects) + # re-establish "dirty states" in case the listeners + # added + dirty = self._dirty_states + + deleted = set(self._deleted) + new = set(self._new) + + dirty = set(dirty).difference(deleted) + + # create the set of all objects we want to operate upon + if objects: + # specific list passed in + objset = set() + for o in objects: + try: + state = attributes.instance_state(o) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(o) + objset.add(state) + else: + objset = None + + # store objects whose fate has been decided + processed = set() + + # put all saves/updates into the flush context. detect top-level + # orphans and throw them into deleted. + if objset: + proc = new.union(dirty).intersection(objset).difference(deleted) + else: + proc = new.union(dirty).difference(deleted) + + for state in proc: + is_orphan = ( + _state_mapper(state)._is_orphan(state) and state.has_identity) + _reg = flush_context.register_object(state, isdelete=is_orphan) + assert _reg, "Failed to add object to the flush context!" + processed.add(state) + + # put all remaining deletes into the flush context. + if objset: + proc = deleted.intersection(objset).difference(processed) + else: + proc = deleted.difference(processed) + for state in proc: + _reg = flush_context.register_object(state, isdelete=True) + assert _reg, "Failed to add object to the flush context!" + + if not flush_context.has_work: + return + + flush_context.transaction = transaction = self.begin( + subtransactions=True) + try: + self._warn_on_events = True + try: + flush_context.execute() + finally: + self._warn_on_events = False + + self.dispatch.after_flush(self, flush_context) + + flush_context.finalize_flush_changes() + + if not objects and self.identity_map._modified: + len_ = len(self.identity_map._modified) + + statelib.InstanceState._commit_all_states( + [(state, state.dict) for state in + self.identity_map._modified], + instance_dict=self.identity_map) + util.warn("Attribute history events accumulated on %d " + "previously clean instances " + "within inner-flush event handlers have been " + "reset, and will not result in database updates. " + "Consider using set_committed_value() within " + "inner-flush event handlers to avoid this warning." + % len_) + + # useful assertions: + # if not objects: + # assert not self.identity_map._modified + # else: + # assert self.identity_map._modified == \ + # self.identity_map._modified.difference(objects) + + self.dispatch.after_flush_postexec(self, flush_context) + + transaction.commit() + + except: + with util.safe_reraise(): + transaction.rollback(_capture_exception=True) + + def bulk_save_objects( + self, objects, return_defaults=False, update_changed_only=True): + """Perform a bulk save of the given list of objects. + + The bulk save feature allows mapped objects to be used as the + source of simple INSERT and UPDATE operations which can be more easily + grouped together into higher performing "executemany" + operations; the extraction of data from the objects is also performed + using a lower-latency process that ignores whether or not attributes + have actually been modified in the case of UPDATEs, and also ignores + SQL expressions. + + The objects as given are not added to the session and no additional + state is established on them, unless the ``return_defaults`` flag + is also set, in which case primary key attributes and server-side + default values will be populated. + + .. versionadded:: 1.0.0 + + .. warning:: + + The bulk save feature allows for a lower-latency INSERT/UPDATE + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + INSERT/UPDATES of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** + + :param objects: a list of mapped object instances. The mapped + objects are persisted as is, and are **not** associated with the + :class:`.Session` afterwards. + + For each object, whether the object is sent as an INSERT or an + UPDATE is dependent on the same rules used by the :class:`.Session` + in traditional operation; if the object has the + :attr:`.InstanceState.key` + attribute set, then the object is assumed to be "detached" and + will result in an UPDATE. Otherwise, an INSERT is used. + + In the case of an UPDATE, statements are grouped based on which + attributes have changed, and are thus to be the subject of each + SET clause. If ``update_changed_only`` is False, then all + attributes present within each object are applied to the UPDATE + statement, which may help in allowing the statements to be grouped + together into a larger executemany(), and will also reduce the + overhead of checking history on attributes. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary key values ahead of time; however, + :paramref:`.Session.bulk_save_objects.return_defaults` **greatly + reduces the performance gains** of the method overall. + + :param update_changed_only: when True, UPDATE statements are rendered + based on those attributes in each state that have logged changes. + When False, all attributes present are rendered into the SET clause + with the exception of primary key attributes. + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_update_mappings` + + """ + for (mapper, isupdate), states in itertools.groupby( + (attributes.instance_state(obj) for obj in objects), + lambda state: (state.mapper, state.key is not None) + ): + self._bulk_save_mappings( + mapper, states, isupdate, True, + return_defaults, update_changed_only, False) + + def bulk_insert_mappings( + self, mapper, mappings, return_defaults=False, render_nulls=False): + """Perform a bulk insert of the given list of mapping dictionaries. + + The bulk insert feature allows plain Python dictionaries to be used as + the source of simple INSERT operations which can be more easily + grouped together into higher performing "executemany" + operations. Using dictionaries, there is no "history" or session + state management features in use, reducing latency when inserting + large numbers of simple rows. + + The values within the dictionaries as given are typically passed + without modification into Core :meth:`.Insert` constructs, after + organizing the values within them across the tables to which + the given mapper is mapped. + + .. versionadded:: 1.0.0 + + .. warning:: + + The bulk insert feature allows for a lower-latency INSERT + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + INSERT of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** + + :param mapper: a mapped class, or the actual :class:`.Mapper` object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a list of dictionaries, each one containing the state + of the mapped row to be inserted, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary must contain + all keys to be populated into all tables. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary + key values ahead of time; however, + :paramref:`.Session.bulk_insert_mappings.return_defaults` + **greatly reduces the performance gains** of the method overall. + If the rows + to be inserted only refer to a single table, then there is no + reason this flag should be set as the returned default information + is not used. + + :param render_nulls: When True, a value of ``None`` will result + in a NULL value being included in the INSERT statement, rather + than the column being omitted from the INSERT. This allows all + the rows being INSERTed to have the identical set of columns which + allows the full set of rows to be batched to the DBAPI. Normally, + each column-set that contains a different combination of NULL values + than the previous row must omit a different series of columns from + the rendered INSERT statement, which means it must be emitted as a + separate statement. By passing this flag, the full set of rows + are guaranteed to be batchable into one batch; the cost however is + that server-side defaults which are invoked by an omitted column will + be skipped, so care must be taken to ensure that these are not + necessary. + + .. warning:: + + When this flag is set, **server side default SQL values will + not be invoked** for those columns that are inserted as NULL; + the NULL value will be sent explicitly. Care must be taken + to ensure that no server-side default functions need to be + invoked for the operation as a whole. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_save_objects` + + :meth:`.Session.bulk_update_mappings` + + """ + self._bulk_save_mappings( + mapper, mappings, False, False, + return_defaults, False, render_nulls) + + def bulk_update_mappings(self, mapper, mappings): + """Perform a bulk update of the given list of mapping dictionaries. + + The bulk update feature allows plain Python dictionaries to be used as + the source of simple UPDATE operations which can be more easily + grouped together into higher performing "executemany" + operations. Using dictionaries, there is no "history" or session + state management features in use, reducing latency when updating + large numbers of simple rows. + + .. versionadded:: 1.0.0 + + .. warning:: + + The bulk update feature allows for a lower-latency UPDATE + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + UPDATES of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** + + :param mapper: a mapped class, or the actual :class:`.Mapper` object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a list of dictionaries, each one containing the state + of the mapped row to be updated, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary may contain + keys corresponding to all tables. All those keys which are present + and are not part of the primary key are applied to the SET clause + of the UPDATE statement; the primary key values, which are required, + are applied to the WHERE clause. + + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_save_objects` + + """ + self._bulk_save_mappings( + mapper, mappings, True, False, False, False, False) + + def _bulk_save_mappings( + self, mapper, mappings, isupdate, isstates, + return_defaults, update_changed_only, render_nulls): + mapper = _class_to_mapper(mapper) + self._flushing = True + + transaction = self.begin( + subtransactions=True) + try: + if isupdate: + persistence._bulk_update( + mapper, mappings, transaction, + isstates, update_changed_only) + else: + persistence._bulk_insert( + mapper, mappings, transaction, + isstates, return_defaults, render_nulls) + transaction.commit() + + except: + with util.safe_reraise(): + transaction.rollback(_capture_exception=True) + finally: + self._flushing = False + + def is_modified(self, instance, include_collections=True, + passive=True): + r"""Return ``True`` if the given instance has locally + modified attributes. + + This method retrieves the history for each instrumented + attribute on the instance and performs a comparison of the current + value to its previously committed value, if any. + + It is in effect a more expensive and accurate + version of checking for the given instance in the + :attr:`.Session.dirty` collection; a full test for + each attribute's net "dirty" status is performed. + + E.g.:: + + return session.is_modified(someobject) + + .. versionchanged:: 0.8 + When using SQLAlchemy 0.7 and earlier, the ``passive`` + flag should **always** be explicitly set to ``True``, + else SQL loads/autoflushes may proceed which can affect + the modified state itself: + ``session.is_modified(someobject, passive=True)``\ . + In 0.8 and above, the behavior is corrected and + this flag is ignored. + + A few caveats to this method apply: + + * Instances present in the :attr:`.Session.dirty` collection may + report ``False`` when tested with this method. This is because + the object may have received change events via attribute mutation, + thus placing it in :attr:`.Session.dirty`, but ultimately the state + is the same as that loaded from the database, resulting in no net + change here. + * Scalar attributes may not have recorded the previously set + value when a new value was applied, if the attribute was not loaded, + or was expired, at the time the new value was received - in these + cases, the attribute is assumed to have a change, even if there is + ultimately no net change against its database value. SQLAlchemy in + most cases does not need the "old" value when a set event occurs, so + it skips the expense of a SQL call if the old value isn't present, + based on the assumption that an UPDATE of the scalar value is + usually needed, and in those few cases where it isn't, is less + expensive on average than issuing a defensive SELECT. + + The "old" value is fetched unconditionally upon set only if the + attribute container has the ``active_history`` flag set to ``True``. + This flag is set typically for primary key attributes and scalar + object references that are not a simple many-to-one. To set this + flag for any arbitrary mapped column, use the ``active_history`` + argument with :func:`.column_property`. + + :param instance: mapped instance to be tested for pending changes. + :param include_collections: Indicates if multivalued collections + should be included in the operation. Setting this to ``False`` is a + way to detect only local-column based properties (i.e. scalar columns + or many-to-one foreign keys) that would result in an UPDATE for this + instance upon flush. + :param passive: + + .. versionchanged:: 0.8 + Ignored for backwards compatibility. + When using SQLAlchemy 0.7 and earlier, this flag should always + be set to ``True``. + + """ + state = object_state(instance) + + if not state.modified: + return False + + dict_ = state.dict + + for attr in state.manager.attributes: + if \ + ( + not include_collections and + hasattr(attr.impl, 'get_collection') + ) or not hasattr(attr.impl, 'get_history'): + continue + + (added, unchanged, deleted) = \ + attr.impl.get_history(state, dict_, + passive=attributes.NO_CHANGE) + + if added or deleted: + return True + else: + return False + + @property + def is_active(self): + """True if this :class:`.Session` is in "transaction mode" and + is not in "partial rollback" state. + + The :class:`.Session` in its default mode of ``autocommit=False`` + is essentially always in "transaction mode", in that a + :class:`.SessionTransaction` is associated with it as soon as + it is instantiated. This :class:`.SessionTransaction` is immediately + replaced with a new one as soon as it is ended, due to a rollback, + commit, or close operation. + + "Transaction mode" does *not* indicate whether + or not actual database connection resources are in use; the + :class:`.SessionTransaction` object coordinates among zero or more + actual database transactions, and starts out with none, accumulating + individual DBAPI connections as different data sources are used + within its scope. The best way to track when a particular + :class:`.Session` has actually begun to use DBAPI resources is to + implement a listener using the :meth:`.SessionEvents.after_begin` + method, which will deliver both the :class:`.Session` as well as the + target :class:`.Connection` to a user-defined event listener. + + The "partial rollback" state refers to when an "inner" transaction, + typically used during a flush, encounters an error and emits a + rollback of the DBAPI connection. At this point, the + :class:`.Session` is in "partial rollback" and awaits for the user to + call :meth:`.Session.rollback`, in order to close out the + transaction stack. It is in this "partial rollback" period that the + :attr:`.is_active` flag returns False. After the call to + :meth:`.Session.rollback`, the :class:`.SessionTransaction` is + replaced with a new one and :attr:`.is_active` returns ``True`` again. + + When a :class:`.Session` is used in ``autocommit=True`` mode, the + :class:`.SessionTransaction` is only instantiated within the scope + of a flush call, or when :meth:`.Session.begin` is called. So + :attr:`.is_active` will always be ``False`` outside of a flush or + :meth:`.Session.begin` block in this mode, and will be ``True`` + within the :meth:`.Session.begin` block as long as it doesn't enter + "partial rollback" state. + + From all the above, it follows that the only purpose to this flag is + for application frameworks that wish to detect is a "rollback" is + necessary within a generic error handling routine, for + :class:`.Session` objects that would otherwise be in + "partial rollback" mode. In a typical integration case, this is also + not necessary as it is standard practice to emit + :meth:`.Session.rollback` unconditionally within the outermost + exception catch. + + To track the transactional state of a :class:`.Session` fully, + use event listeners, primarily the :meth:`.SessionEvents.after_begin`, + :meth:`.SessionEvents.after_commit`, + :meth:`.SessionEvents.after_rollback` and related events. + + """ + return self.transaction and self.transaction.is_active + + identity_map = None + """A mapping of object identities to objects themselves. + + Iterating through ``Session.identity_map.values()`` provides + access to the full set of persistent objects (i.e., those + that have row identity) currently in the session. + + .. seealso:: + + :func:`.identity_key` - helper function to produce the keys used + in this dictionary. + + """ + + @property + def _dirty_states(self): + """The set of all persistent states considered dirty. + + This method returns all states that were modified including + those that were possibly deleted. + + """ + return self.identity_map._dirty_states() + + @property + def dirty(self): + """The set of all persistent instances considered dirty. + + E.g.:: + + some_mapped_object in session.dirty + + Instances are considered dirty when they were modified but not + deleted. + + Note that this 'dirty' calculation is 'optimistic'; most + attribute-setting or collection modification operations will + mark an instance as 'dirty' and place it in this set, even if + there is no net change to the attribute's value. At flush + time, the value of each attribute is compared to its + previously saved value, and if there's no net change, no SQL + operation will occur (this is a more expensive operation so + it's only done at flush time). + + To check if an instance has actionable net changes to its + attributes, use the :meth:`.Session.is_modified` method. + + """ + return util.IdentitySet( + [state.obj() + for state in self._dirty_states + if state not in self._deleted]) + + @property + def deleted(self): + "The set of all instances marked as 'deleted' within this ``Session``" + + return util.IdentitySet(list(self._deleted.values())) + + @property + def new(self): + "The set of all instances marked as 'new' within this ``Session``." + + return util.IdentitySet(list(self._new.values())) + + +class sessionmaker(_SessionClassMethods): + """A configurable :class:`.Session` factory. + + The :class:`.sessionmaker` factory generates new + :class:`.Session` objects when called, creating them given + the configurational arguments established here. + + e.g.:: + + # global scope + Session = sessionmaker(autoflush=False) + + # later, in a local scope, create and use a session: + sess = Session() + + Any keyword arguments sent to the constructor itself will override the + "configured" keywords:: + + Session = sessionmaker() + + # bind an individual session to a connection + sess = Session(bind=connection) + + The class also includes a method :meth:`.configure`, which can + be used to specify additional keyword arguments to the factory, which + will take effect for subsequent :class:`.Session` objects generated. + This is usually used to associate one or more :class:`.Engine` objects + with an existing :class:`.sessionmaker` factory before it is first + used:: + + # application starts + Session = sessionmaker() + + # ... later + engine = create_engine('sqlite:///foo.db') + Session.configure(bind=engine) + + sess = Session() + + .. seealso: + + :ref:`session_getting` - introductory text on creating + sessions using :class:`.sessionmaker`. + + """ + + def __init__(self, bind=None, class_=Session, autoflush=True, + autocommit=False, + expire_on_commit=True, + info=None, **kw): + r"""Construct a new :class:`.sessionmaker`. + + All arguments here except for ``class_`` correspond to arguments + accepted by :class:`.Session` directly. See the + :meth:`.Session.__init__` docstring for more details on parameters. + + :param bind: a :class:`.Engine` or other :class:`.Connectable` with + which newly created :class:`.Session` objects will be associated. + :param class_: class to use in order to create new :class:`.Session` + objects. Defaults to :class:`.Session`. + :param autoflush: The autoflush setting to use with newly created + :class:`.Session` objects. + :param autocommit: The autocommit setting to use with newly created + :class:`.Session` objects. + :param expire_on_commit=True: the expire_on_commit setting to use + with newly created :class:`.Session` objects. + :param info: optional dictionary of information that will be available + via :attr:`.Session.info`. Note this dictionary is *updated*, not + replaced, when the ``info`` parameter is specified to the specific + :class:`.Session` construction operation. + + .. versionadded:: 0.9.0 + + :param \**kw: all other keyword arguments are passed to the + constructor of newly created :class:`.Session` objects. + + """ + kw['bind'] = bind + kw['autoflush'] = autoflush + kw['autocommit'] = autocommit + kw['expire_on_commit'] = expire_on_commit + if info is not None: + kw['info'] = info + self.kw = kw + # make our own subclass of the given class, so that + # events can be associated with it specifically. + self.class_ = type(class_.__name__, (class_,), {}) + + def __call__(self, **local_kw): + """Produce a new :class:`.Session` object using the configuration + established in this :class:`.sessionmaker`. + + In Python, the ``__call__`` method is invoked on an object when + it is "called" in the same way as a function:: + + Session = sessionmaker() + session = Session() # invokes sessionmaker.__call__() + + """ + for k, v in self.kw.items(): + if k == 'info' and 'info' in local_kw: + d = v.copy() + d.update(local_kw['info']) + local_kw['info'] = d + else: + local_kw.setdefault(k, v) + return self.class_(**local_kw) + + def configure(self, **new_kw): + """(Re)configure the arguments for this sessionmaker. + + e.g.:: + + Session = sessionmaker() + + Session.configure(bind=create_engine('sqlite://')) + """ + self.kw.update(new_kw) + + def __repr__(self): + return "%s(class_=%r,%s)" % ( + self.__class__.__name__, + self.class_.__name__, + ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()) + ) + + +def make_transient(instance): + """Alter the state of the given instance so that it is :term:`transient`. + + .. note:: + + :func:`.make_transient` is a special-case function for + advanced use cases only. + + The given mapped instance is assumed to be in the :term:`persistent` or + :term:`detached` state. The function will remove its association with any + :class:`.Session` as well as its :attr:`.InstanceState.identity`. The + effect is that the object will behave as though it were newly constructed, + except retaining any attribute / collection values that were loaded at the + time of the call. The :attr:`.InstanceState.deleted` flag is also reset + if this object had been deleted as a result of using + :meth:`.Session.delete`. + + .. warning:: + + :func:`.make_transient` does **not** "unexpire" or otherwise eagerly + load ORM-mapped attributes that are not currently loaded at the time + the function is called. This includes attributes which: + + * were expired via :meth:`.Session.expire` + + * were expired as the natural effect of committing a session + transaction, e.g. :meth:`.Session.commit` + + * are normally :term:`lazy loaded` but are not currently loaded + + * are "deferred" via :ref:`deferred` and are not yet loaded + + * were not present in the query which loaded this object, such as that + which is common in joined table inheritance and other scenarios. + + After :func:`.make_transient` is called, unloaded attributes such + as those above will normally resolve to the value ``None`` when + accessed, or an empty collection for a collection-oriented attribute. + As the object is transient and un-associated with any database + identity, it will no longer retrieve these values. + + .. seealso:: + + :func:`.make_transient_to_detached` + + """ + state = attributes.instance_state(instance) + s = _state_session(state) + if s: + s._expunge_states([state]) + + # remove expired state + state.expired_attributes.clear() + + # remove deferred callables + if state.callables: + del state.callables + + if state.key: + del state.key + if state._deleted: + del state._deleted + + +def make_transient_to_detached(instance): + """Make the given transient instance :term:`detached`. + + .. note:: + + :func:`.make_transient_to_detached` is a special-case function for + advanced use cases only. + + All attribute history on the given instance + will be reset as though the instance were freshly loaded + from a query. Missing attributes will be marked as expired. + The primary key attributes of the object, which are required, will be made + into the "key" of the instance. + + The object can then be added to a session, or merged + possibly with the load=False flag, at which point it will look + as if it were loaded that way, without emitting SQL. + + This is a special use case function that differs from a normal + call to :meth:`.Session.merge` in that a given persistent state + can be manufactured without any SQL calls. + + .. versionadded:: 0.9.5 + + .. seealso:: + + :func:`.make_transient` + + """ + state = attributes.instance_state(instance) + if state.session_id or state.key: + raise sa_exc.InvalidRequestError( + "Given object must be transient") + state.key = state.mapper._identity_key_from_state(state) + if state._deleted: + del state._deleted + state._commit_all(state.dict) + state._expire_attributes(state.dict, state.unloaded) + + +def object_session(instance): + """Return the :class:`.Session` to which the given instance belongs. + + This is essentially the same as the :attr:`.InstanceState.session` + accessor. See that attribute for details. + + """ + + try: + state = attributes.instance_state(instance) + except exc.NO_STATE: + raise exc.UnmappedInstanceError(instance) + else: + return _state_session(state) + + +_new_sessionid = util.counter() diff --git a/app/lib/sqlalchemy/orm/state.py b/app/lib/sqlalchemy/orm/state.py new file mode 100644 index 0000000..0fba240 --- /dev/null +++ b/app/lib/sqlalchemy/orm/state.py @@ -0,0 +1,847 @@ +# orm/state.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Defines instrumentation of instances. + +This module is usually not directly visible to user applications, but +defines a large part of the ORM's interactivity. + +""" + +import weakref +from .. import util +from .. import inspection +from . import exc as orm_exc, interfaces +from .path_registry import PathRegistry +from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \ + NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF +from . import base + + +@inspection._self_inspects +class InstanceState(interfaces.InspectionAttr): + """tracks state information at the instance level. + + The :class:`.InstanceState` is a key object used by the + SQLAlchemy ORM in order to track the state of an object; + it is created the moment an object is instantiated, typically + as a result of :term:`instrumentation` which SQLAlchemy applies + to the ``__init__()`` method of the class. + + :class:`.InstanceState` is also a semi-public object, + available for runtime inspection as to the state of a + mapped instance, including information such as its current + status within a particular :class:`.Session` and details + about data on individual attributes. The public API + in order to acquire a :class:`.InstanceState` object + is to use the :func:`.inspect` system:: + + >>> from sqlalchemy import inspect + >>> insp = inspect(some_mapped_object) + + .. seealso:: + + :ref:`core_inspection_toplevel` + + """ + + session_id = None + key = None + runid = None + load_options = util.EMPTY_SET + load_path = () + insert_order = None + _strong_obj = None + modified = False + expired = False + _deleted = False + _load_pending = False + is_instance = True + + callables = () + """A namespace where a per-state loader callable can be associated. + + In SQLAlchemy 1.0, this is only used for lazy loaders / deferred + loaders that were set up via query option. + + Previously, callables was used also to indicate expired attributes + by storing a link to the InstanceState itself in this dictionary. + This role is now handled by the expired_attributes set. + + """ + + def __init__(self, obj, manager): + self.class_ = obj.__class__ + self.manager = manager + self.obj = weakref.ref(obj, self._cleanup) + self.committed_state = {} + self.expired_attributes = set() + + expired_attributes = None + """The set of keys which are 'expired' to be loaded by + the manager's deferred scalar loader, assuming no pending + changes. + + see also the ``unmodified`` collection which is intersected + against this set when a refresh operation occurs.""" + + @util.memoized_property + def attrs(self): + """Return a namespace representing each attribute on + the mapped object, including its current value + and history. + + The returned object is an instance of :class:`.AttributeState`. + This object allows inspection of the current data + within an attribute as well as attribute history + since the last flush. + + """ + return util.ImmutableProperties( + dict( + (key, AttributeState(self, key)) + for key in self.manager + ) + ) + + @property + def transient(self): + """Return true if the object is :term:`transient`. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is None and \ + not self._attached + + @property + def pending(self): + """Return true if the object is :term:`pending`. + + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is None and \ + self._attached + + @property + def deleted(self): + """Return true if the object is :term:`deleted`. + + An object that is in the deleted state is guaranteed to + not be within the :attr:`.Session.identity_map` of its parent + :class:`.Session`; however if the session's transaction is rolled + back, the object will be restored to the persistent state and + the identity map. + + .. note:: + + The :attr:`.InstanceState.deleted` attribute refers to a specific + state of the object that occurs between the "persistent" and + "detached" states; once the object is :term:`detached`, the + :attr:`.InstanceState.deleted` attribute **no longer returns + True**; in order to detect that a state was deleted, regardless + of whether or not the object is associated with a :class:`.Session`, + use the :attr:`.InstanceState.was_deleted` accessor. + + .. versionadded: 1.1 + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is not None and \ + self._attached and self._deleted + + @property + def was_deleted(self): + """Return True if this object is or was previously in the + "deleted" state and has not been reverted to persistent. + + This flag returns True once the object was deleted in flush. + When the object is expunged from the session either explicitly + or via transaction commit and enters the "detached" state, + this flag will continue to report True. + + .. versionadded:: 1.1 - added a local method form of + :func:`.orm.util.was_deleted`. + + .. seealso:: + + :attr:`.InstanceState.deleted` - refers to the "deleted" state + + :func:`.orm.util.was_deleted` - standalone function + + :ref:`session_object_states` + + """ + return self._deleted + + @property + def persistent(self): + """Return true if the object is :term:`persistent`. + + An object that is in the persistent state is guaranteed to + be within the :attr:`.Session.identity_map` of its parent + :class:`.Session`. + + .. versionchanged:: 1.1 The :attr:`.InstanceState.persistent` + accessor no longer returns True for an object that was + "deleted" within a flush; use the :attr:`.InstanceState.deleted` + accessor to detect this state. This allows the "persistent" + state to guarantee membership in the identity map. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is not None and \ + self._attached and not self._deleted + + @property + def detached(self): + """Return true if the object is :term:`detached`. + + .. seealso:: + + :ref:`session_object_states` + + """ + return self.key is not None and not self._attached + + @property + @util.dependencies("sqlalchemy.orm.session") + def _attached(self, sessionlib): + return self.session_id is not None and \ + self.session_id in sessionlib._sessions + + @property + @util.dependencies("sqlalchemy.orm.session") + def session(self, sessionlib): + """Return the owning :class:`.Session` for this instance, + or ``None`` if none available. + + Note that the result here can in some cases be *different* + from that of ``obj in session``; an object that's been deleted + will report as not ``in session``, however if the transaction is + still in progress, this attribute will still refer to that session. + Only when the transaction is completed does the object become + fully detached under normal circumstances. + + """ + return sessionlib._state_session(self) + + @property + def object(self): + """Return the mapped object represented by this + :class:`.InstanceState`.""" + return self.obj() + + @property + def identity(self): + """Return the mapped identity of the mapped object. + This is the primary key identity as persisted by the ORM + which can always be passed directly to + :meth:`.Query.get`. + + Returns ``None`` if the object has no primary key identity. + + .. note:: + An object which is :term:`transient` or :term:`pending` + does **not** have a mapped identity until it is flushed, + even if its attributes include primary key values. + + """ + if self.key is None: + return None + else: + return self.key[1] + + @property + def identity_key(self): + """Return the identity key for the mapped object. + + This is the key used to locate the object within + the :attr:`.Session.identity_map` mapping. It contains + the identity as returned by :attr:`.identity` within it. + + + """ + # TODO: just change .key to .identity_key across + # the board ? probably + return self.key + + @util.memoized_property + def parents(self): + return {} + + @util.memoized_property + def _pending_mutations(self): + return {} + + @util.memoized_property + def mapper(self): + """Return the :class:`.Mapper` used for this mapepd object.""" + return self.manager.mapper + + @property + def has_identity(self): + """Return ``True`` if this object has an identity key. + + This should always have the same value as the + expression ``state.persistent or state.detached``. + + """ + return bool(self.key) + + @classmethod + def _detach_states(self, states, session, to_transient=False): + persistent_to_detached = \ + session.dispatch.persistent_to_detached or None + deleted_to_detached = \ + session.dispatch.deleted_to_detached or None + pending_to_transient = \ + session.dispatch.pending_to_transient or None + persistent_to_transient = \ + session.dispatch.persistent_to_transient or None + + for state in states: + deleted = state._deleted + pending = state.key is None + persistent = not pending and not deleted + + state.session_id = None + + if to_transient and state.key: + del state.key + if persistent: + if to_transient: + if persistent_to_transient is not None: + obj = state.obj() + if obj is not None: + persistent_to_transient(session, obj) + elif persistent_to_detached is not None: + obj = state.obj() + if obj is not None: + persistent_to_detached(session, obj) + elif deleted and deleted_to_detached is not None: + obj = state.obj() + if obj is not None: + deleted_to_detached(session, obj) + elif pending and pending_to_transient is not None: + obj = state.obj() + if obj is not None: + pending_to_transient(session, obj) + + state._strong_obj = None + + def _detach(self, session=None): + if session: + InstanceState._detach_states([self], session) + else: + self.session_id = self._strong_obj = None + + def _dispose(self): + self._detach() + del self.obj + + def _cleanup(self, ref): + """Weakref callback cleanup. + + This callable cleans out the state when it is being garbage + collected. + + this _cleanup **assumes** that there are no strong refs to us! + Will not work otherwise! + + """ + instance_dict = self._instance_dict() + if instance_dict is not None: + instance_dict._fast_discard(self) + del self._instance_dict + + # we can't possibly be in instance_dict._modified + # b.c. this is weakref cleanup only, that set + # is strong referencing! + # assert self not in instance_dict._modified + + self.session_id = self._strong_obj = None + del self.obj + + def obj(self): + return None + + @property + def dict(self): + """Return the instance dict used by the object. + + Under normal circumstances, this is always synonymous + with the ``__dict__`` attribute of the mapped object, + unless an alternative instrumentation system has been + configured. + + In the case that the actual object has been garbage + collected, this accessor returns a blank dictionary. + + """ + o = self.obj() + if o is not None: + return base.instance_dict(o) + else: + return {} + + def _initialize_instance(*mixed, **kwargs): + self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa + manager = self.manager + + manager.dispatch.init(self, args, kwargs) + + try: + return manager.original_init(*mixed[1:], **kwargs) + except: + with util.safe_reraise(): + manager.dispatch.init_failure(self, args, kwargs) + + def get_history(self, key, passive): + return self.manager[key].impl.get_history(self, self.dict, passive) + + def get_impl(self, key): + return self.manager[key].impl + + def _get_pending_mutation(self, key): + if key not in self._pending_mutations: + self._pending_mutations[key] = PendingCollection() + return self._pending_mutations[key] + + def __getstate__(self): + state_dict = {'instance': self.obj()} + state_dict.update( + (k, self.__dict__[k]) for k in ( + 'committed_state', '_pending_mutations', 'modified', + 'expired', 'callables', 'key', 'parents', 'load_options', + 'class_', 'expired_attributes' + ) if k in self.__dict__ + ) + if self.load_path: + state_dict['load_path'] = self.load_path.serialize() + + state_dict['manager'] = self.manager._serialize(self, state_dict) + + return state_dict + + def __setstate__(self, state_dict): + inst = state_dict['instance'] + if inst is not None: + self.obj = weakref.ref(inst, self._cleanup) + self.class_ = inst.__class__ + else: + # None being possible here generally new as of 0.7.4 + # due to storage of state in "parents". "class_" + # also new. + self.obj = None + self.class_ = state_dict['class_'] + + self.committed_state = state_dict.get('committed_state', {}) + self._pending_mutations = state_dict.get('_pending_mutations', {}) + self.parents = state_dict.get('parents', {}) + self.modified = state_dict.get('modified', False) + self.expired = state_dict.get('expired', False) + if 'callables' in state_dict: + self.callables = state_dict['callables'] + + try: + self.expired_attributes = state_dict['expired_attributes'] + except KeyError: + self.expired_attributes = set() + # 0.9 and earlier compat + for k in list(self.callables): + if self.callables[k] is self: + self.expired_attributes.add(k) + del self.callables[k] + + self.__dict__.update([ + (k, state_dict[k]) for k in ( + 'key', 'load_options', + ) if k in state_dict + ]) + + if 'load_path' in state_dict: + self.load_path = PathRegistry.\ + deserialize(state_dict['load_path']) + + state_dict['manager'](self, inst, state_dict) + + def _reset(self, dict_, key): + """Remove the given attribute and any + callables associated with it.""" + + old = dict_.pop(key, None) + if old is not None and self.manager[key].impl.collection: + self.manager[key].impl._invalidate_collection(old) + self.expired_attributes.discard(key) + if self.callables: + self.callables.pop(key, None) + + def _copy_callables(self, from_): + if 'callables' in from_.__dict__: + self.callables = dict(from_.callables) + + @classmethod + def _instance_level_callable_processor(cls, manager, fn, key): + impl = manager[key].impl + if impl.collection: + def _set_callable(state, dict_, row): + if 'callables' not in state.__dict__: + state.callables = {} + old = dict_.pop(key, None) + if old is not None: + impl._invalidate_collection(old) + state.callables[key] = fn + else: + def _set_callable(state, dict_, row): + if 'callables' not in state.__dict__: + state.callables = {} + state.callables[key] = fn + return _set_callable + + def _expire(self, dict_, modified_set): + self.expired = True + + if self.modified: + modified_set.discard(self) + self.committed_state.clear() + self.modified = False + + self._strong_obj = None + + if '_pending_mutations' in self.__dict__: + del self.__dict__['_pending_mutations'] + + if 'parents' in self.__dict__: + del self.__dict__['parents'] + + self.expired_attributes.update( + [impl.key for impl in self.manager._scalar_loader_impls + if impl.expire_missing or impl.key in dict_] + ) + + if self.callables: + for k in self.expired_attributes.intersection(self.callables): + del self.callables[k] + + for k in self.manager._collection_impl_keys.intersection(dict_): + collection = dict_.pop(k) + collection._sa_adapter.invalidated = True + + for key in self.manager._all_key_set.intersection(dict_): + del dict_[key] + + self.manager.dispatch.expire(self, None) + + def _expire_attributes(self, dict_, attribute_names, no_loader=False): + pending = self.__dict__.get('_pending_mutations', None) + + callables = self.callables + + for key in attribute_names: + impl = self.manager[key].impl + if impl.accepts_scalar_loader: + if no_loader and ( + impl.callable_ or + key in callables + ): + continue + + self.expired_attributes.add(key) + if callables and key in callables: + del callables[key] + old = dict_.pop(key, None) + if impl.collection and old is not None: + impl._invalidate_collection(old) + + self.committed_state.pop(key, None) + if pending: + pending.pop(key, None) + + self.manager.dispatch.expire(self, attribute_names) + + def _load_expired(self, state, passive): + """__call__ allows the InstanceState to act as a deferred + callable for loading expired attributes, which is also + serializable (picklable). + + """ + + if not passive & SQL_OK: + return PASSIVE_NO_RESULT + + toload = self.expired_attributes.\ + intersection(self.unmodified) + + self.manager.deferred_scalar_loader(self, toload) + + # if the loader failed, or this + # instance state didn't have an identity, + # the attributes still might be in the callables + # dict. ensure they are removed. + self.expired_attributes.clear() + + return ATTR_WAS_SET + + @property + def unmodified(self): + """Return the set of keys which have no uncommitted changes""" + + return set(self.manager).difference(self.committed_state) + + def unmodified_intersection(self, keys): + """Return self.unmodified.intersection(keys).""" + + return set(keys).intersection(self.manager).\ + difference(self.committed_state) + + @property + def unloaded(self): + """Return the set of keys which do not have a loaded value. + + This includes expired attributes and any other attribute that + was never populated or modified. + + """ + return set(self.manager).\ + difference(self.committed_state).\ + difference(self.dict) + + @property + def _unloaded_non_object(self): + return self.unloaded.intersection( + attr for attr in self.manager + if self.manager[attr].impl.accepts_scalar_loader + ) + + def _instance_dict(self): + return None + + def _modified_event( + self, dict_, attr, previous, collection=False, force=False): + if not attr.send_modified_events: + return + if attr.key not in self.committed_state or force: + if collection: + if previous is NEVER_SET: + if attr.key in dict_: + previous = dict_[attr.key] + + if previous not in (None, NO_VALUE, NEVER_SET): + previous = attr.copy(previous) + + self.committed_state[attr.key] = previous + + # assert self._strong_obj is None or self.modified + + if (self.session_id and self._strong_obj is None) \ + or not self.modified: + self.modified = True + instance_dict = self._instance_dict() + if instance_dict: + instance_dict._modified.add(self) + + # only create _strong_obj link if attached + # to a session + + inst = self.obj() + if self.session_id: + self._strong_obj = inst + + if inst is None: + raise orm_exc.ObjectDereferencedError( + "Can't emit change event for attribute '%s' - " + "parent object of type %s has been garbage " + "collected." + % ( + self.manager[attr.key], + base.state_class_str(self) + )) + + def _commit(self, dict_, keys): + """Commit attributes. + + This is used by a partial-attribute load operation to mark committed + those attributes which were refreshed from the database. + + Attributes marked as "expired" can potentially remain "expired" after + this step if a value was not populated in state.dict. + + """ + for key in keys: + self.committed_state.pop(key, None) + + self.expired = False + + self.expired_attributes.difference_update( + set(keys).intersection(dict_)) + + # the per-keys commit removes object-level callables, + # while that of commit_all does not. it's not clear + # if this behavior has a clear rationale, however tests do + # ensure this is what it does. + if self.callables: + for key in set(self.callables).\ + intersection(keys).\ + intersection(dict_): + del self.callables[key] + + def _commit_all(self, dict_, instance_dict=None): + """commit all attributes unconditionally. + + This is used after a flush() or a full load/refresh + to remove all pending state from the instance. + + - all attributes are marked as "committed" + - the "strong dirty reference" is removed + - the "modified" flag is set to False + - any "expired" markers for scalar attributes loaded are removed. + - lazy load callables for objects / collections *stay* + + Attributes marked as "expired" can potentially remain + "expired" after this step if a value was not populated in state.dict. + + """ + self._commit_all_states([(self, dict_)], instance_dict) + + @classmethod + def _commit_all_states(self, iter, instance_dict=None): + """Mass / highly inlined version of commit_all().""" + + for state, dict_ in iter: + state_dict = state.__dict__ + + state.committed_state.clear() + + if '_pending_mutations' in state_dict: + del state_dict['_pending_mutations'] + + state.expired_attributes.difference_update(dict_) + + if instance_dict and state.modified: + instance_dict._modified.discard(state) + + state.modified = state.expired = False + state._strong_obj = None + + +class AttributeState(object): + """Provide an inspection interface corresponding + to a particular attribute on a particular mapped object. + + The :class:`.AttributeState` object is accessed + via the :attr:`.InstanceState.attrs` collection + of a particular :class:`.InstanceState`:: + + from sqlalchemy import inspect + + insp = inspect(some_mapped_object) + attr_state = insp.attrs.some_attribute + + """ + + def __init__(self, state, key): + self.state = state + self.key = key + + @property + def loaded_value(self): + """The current value of this attribute as loaded from the database. + + If the value has not been loaded, or is otherwise not present + in the object's dictionary, returns NO_VALUE. + + """ + return self.state.dict.get(self.key, NO_VALUE) + + @property + def value(self): + """Return the value of this attribute. + + This operation is equivalent to accessing the object's + attribute directly or via ``getattr()``, and will fire + off any pending loader callables if needed. + + """ + return self.state.manager[self.key].__get__( + self.state.obj(), self.state.class_) + + @property + def history(self): + """Return the current pre-flush change history for + this attribute, via the :class:`.History` interface. + + This method will **not** emit loader callables if the value of the + attribute is unloaded. + + .. seealso:: + + :meth:`.AttributeState.load_history` - retrieve history + using loader callables if the value is not locally present. + + :func:`.attributes.get_history` - underlying function + + """ + return self.state.get_history(self.key, + PASSIVE_NO_INITIALIZE) + + def load_history(self): + """Return the current pre-flush change history for + this attribute, via the :class:`.History` interface. + + This method **will** emit loader callables if the value of the + attribute is unloaded. + + .. seealso:: + + :attr:`.AttributeState.history` + + :func:`.attributes.get_history` - underlying function + + .. versionadded:: 0.9.0 + + """ + return self.state.get_history(self.key, + PASSIVE_OFF ^ INIT_OK) + + +class PendingCollection(object): + """A writable placeholder for an unloaded collection. + + Stores items appended to and removed from a collection that has not yet + been loaded. When the collection is loaded, the changes stored in + PendingCollection are applied to it to produce the final result. + + """ + + def __init__(self): + self.deleted_items = util.IdentitySet() + self.added_items = util.OrderedIdentitySet() + + def append(self, value): + if value in self.deleted_items: + self.deleted_items.remove(value) + else: + self.added_items.add(value) + + def remove(self, value): + if value in self.added_items: + self.added_items.remove(value) + else: + self.deleted_items.add(value) diff --git a/app/lib/sqlalchemy/orm/strategies.py b/app/lib/sqlalchemy/orm/strategies.py new file mode 100644 index 0000000..c70994e --- /dev/null +++ b/app/lib/sqlalchemy/orm/strategies.py @@ -0,0 +1,1707 @@ +# orm/strategies.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""sqlalchemy.orm.interfaces.LoaderStrategy + implementations, and related MapperOptions.""" + +from .. import exc as sa_exc, inspect +from .. import util, log, event +from ..sql import util as sql_util, visitors +from .. import sql +from . import ( + attributes, interfaces, exc as orm_exc, loading, + unitofwork, util as orm_util +) +from .state import InstanceState +from .util import _none_set +from . import properties +from .interfaces import ( + LoaderStrategy, StrategizedProperty +) +from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE +from .session import _state_session +import itertools + + +def _register_attribute( + prop, mapper, useobject, + compare_function=None, + typecallable=None, + callable_=None, + proxy_property=None, + active_history=False, + impl_class=None, + **kw +): + + attribute_ext = list(util.to_list(prop.extension, default=[])) + + listen_hooks = [] + + uselist = useobject and prop.uselist + + if useobject and prop.single_parent: + listen_hooks.append(single_parent_validator) + + if prop.key in prop.parent.validators: + fn, opts = prop.parent.validators[prop.key] + listen_hooks.append( + lambda desc, prop: orm_util._validator_events( + desc, + prop.key, fn, **opts) + ) + + if useobject: + listen_hooks.append(unitofwork.track_cascade_events) + + # need to assemble backref listeners + # after the singleparentvalidator, mapper validator + if useobject: + backref = prop.back_populates + if backref: + listen_hooks.append( + lambda desc, prop: attributes.backref_listeners( + desc, + backref, + uselist + ) + ) + + # a single MapperProperty is shared down a class inheritance + # hierarchy, so we set up attribute instrumentation and backref event + # for each mapper down the hierarchy. + + # typically, "mapper" is the same as prop.parent, due to the way + # the configure_mappers() process runs, however this is not strongly + # enforced, and in the case of a second configure_mappers() run the + # mapper here might not be prop.parent; also, a subclass mapper may + # be called here before a superclass mapper. That is, can't depend + # on mappers not already being set up so we have to check each one. + + for m in mapper.self_and_descendants: + if prop is m._props.get(prop.key) and \ + not m.class_manager._attr_has_impl(prop.key): + + desc = attributes.register_attribute_impl( + m.class_, + prop.key, + parent_token=prop, + uselist=uselist, + compare_function=compare_function, + useobject=useobject, + extension=attribute_ext, + trackparent=useobject and ( + prop.single_parent or + prop.direction is interfaces.ONETOMANY), + typecallable=typecallable, + callable_=callable_, + active_history=active_history, + impl_class=impl_class, + send_modified_events=not useobject or not prop.viewonly, + doc=prop.doc, + **kw + ) + + for hook in listen_hooks: + hook(desc, prop) + + +@properties.ColumnProperty.strategy_for(instrument=False, deferred=False) +class UninstrumentedColumnLoader(LoaderStrategy): + """Represent a non-instrumented MapperProperty. + + The polymorphic_on argument of mapper() often results in this, + if the argument is against the with_polymorphic selectable. + + """ + __slots__ = 'columns', + + def __init__(self, parent, strategy_key): + super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key) + self.columns = self.parent_property.columns + + def setup_query( + self, context, entity, path, loadopt, adapter, + column_collection=None, **kwargs): + for c in self.columns: + if adapter: + c = adapter.columns[c] + column_collection.append(c) + + def create_row_processor( + self, context, path, loadopt, + mapper, result, adapter, populators): + pass + + +@log.class_logger +@properties.ColumnProperty.strategy_for(instrument=True, deferred=False) +class ColumnLoader(LoaderStrategy): + """Provide loading behavior for a :class:`.ColumnProperty`.""" + + __slots__ = 'columns', 'is_composite' + + def __init__(self, parent, strategy_key): + super(ColumnLoader, self).__init__(parent, strategy_key) + self.columns = self.parent_property.columns + self.is_composite = hasattr(self.parent_property, 'composite_class') + + def setup_query( + self, context, entity, path, loadopt, + adapter, column_collection, memoized_populators, **kwargs): + + for c in self.columns: + if adapter: + c = adapter.columns[c] + column_collection.append(c) + + fetch = self.columns[0] + if adapter: + fetch = adapter.columns[fetch] + memoized_populators[self.parent_property] = fetch + + def init_class_attribute(self, mapper): + self.is_class_level = True + coltype = self.columns[0].type + # TODO: check all columns ? check for foreign key as well? + active_history = self.parent_property.active_history or \ + self.columns[0].primary_key or \ + mapper.version_id_col in set(self.columns) + + _register_attribute( + self.parent_property, mapper, useobject=False, + compare_function=coltype.compare_values, + active_history=active_history + ) + + def create_row_processor( + self, context, path, + loadopt, mapper, result, adapter, populators): + # look through list of columns represented here + # to see which, if any, is present in the row. + for col in self.columns: + if adapter: + col = adapter.columns[col] + getter = result._getter(col, False) + if getter: + populators["quick"].append((self.key, getter)) + break + else: + populators["expire"].append((self.key, True)) + + +@log.class_logger +@properties.ColumnProperty.strategy_for(deferred=True, instrument=True) +class DeferredColumnLoader(LoaderStrategy): + """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" + + __slots__ = 'columns', 'group' + + def __init__(self, parent, strategy_key): + super(DeferredColumnLoader, self).__init__(parent, strategy_key) + if hasattr(self.parent_property, 'composite_class'): + raise NotImplementedError("Deferred loading for composite " + "types not implemented yet") + self.columns = self.parent_property.columns + self.group = self.parent_property.group + + def create_row_processor( + self, context, path, loadopt, + mapper, result, adapter, populators): + + # this path currently does not check the result + # for the column; this is because in most cases we are + # working just with the setup_query() directive which does + # not support this, and the behavior here should be consistent. + if not self.is_class_level: + set_deferred_for_local_state = \ + self.parent_property._deferred_column_loader + populators["new"].append((self.key, set_deferred_for_local_state)) + else: + populators["expire"].append((self.key, False)) + + def init_class_attribute(self, mapper): + self.is_class_level = True + + _register_attribute( + self.parent_property, mapper, useobject=False, + compare_function=self.columns[0].type.compare_values, + callable_=self._load_for_state, + expire_missing=False + ) + + def setup_query( + self, context, entity, path, loadopt, + adapter, column_collection, memoized_populators, + only_load_props=None, **kw): + + if ( + ( + loadopt and + 'undefer_pks' in loadopt.local_opts and + set(self.columns).intersection( + self.parent._should_undefer_in_wildcard) + ) + or + ( + loadopt and + self.group and + loadopt.local_opts.get('undefer_group_%s' % self.group, False) + ) + or + ( + only_load_props and self.key in only_load_props + ) + ): + self.parent_property._get_strategy( + (("deferred", False), ("instrument", True)) + ).setup_query( + context, entity, + path, loadopt, adapter, + column_collection, memoized_populators, **kw) + elif self.is_class_level: + memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED + else: + memoized_populators[self.parent_property] = _DEFER_FOR_STATE + + def _load_for_state(self, state, passive): + if not state.key: + return attributes.ATTR_EMPTY + + if not passive & attributes.SQL_OK: + return attributes.PASSIVE_NO_RESULT + + localparent = state.manager.mapper + + if self.group: + toload = [ + p.key for p in + localparent.iterate_properties + if isinstance(p, StrategizedProperty) and + isinstance(p.strategy, DeferredColumnLoader) and + p.group == self.group + ] + else: + toload = [self.key] + + # narrow the keys down to just those which have no history + group = [k for k in toload if k in state.unmodified] + + session = _state_session(state) + if session is None: + raise orm_exc.DetachedInstanceError( + "Parent instance %s is not bound to a Session; " + "deferred load operation of attribute '%s' cannot proceed" % + (orm_util.state_str(state), self.key) + ) + + query = session.query(localparent) + if loading.load_on_ident( + query, state.key, + only_load_props=group, refresh_state=state) is None: + raise orm_exc.ObjectDeletedError(state) + + return attributes.ATTR_WAS_SET + + +class LoadDeferredColumns(object): + """serializable loader object used by DeferredColumnLoader""" + + def __init__(self, key): + self.key = key + + def __call__(self, state, passive=attributes.PASSIVE_OFF): + key = self.key + + localparent = state.manager.mapper + prop = localparent._props[key] + strategy = prop._strategies[DeferredColumnLoader] + return strategy._load_for_state(state, passive) + + +class AbstractRelationshipLoader(LoaderStrategy): + """LoaderStratgies which deal with related objects.""" + + __slots__ = 'mapper', 'target', 'uselist' + + def __init__(self, parent, strategy_key): + super(AbstractRelationshipLoader, self).__init__(parent, strategy_key) + self.mapper = self.parent_property.mapper + self.target = self.parent_property.target + self.uselist = self.parent_property.uselist + + +@log.class_logger +@properties.RelationshipProperty.strategy_for(lazy="noload") +@properties.RelationshipProperty.strategy_for(lazy=None) +class NoLoader(AbstractRelationshipLoader): + """Provide loading behavior for a :class:`.RelationshipProperty` + with "lazy=None". + + """ + + __slots__ = () + + def init_class_attribute(self, mapper): + self.is_class_level = True + + _register_attribute( + self.parent_property, mapper, + useobject=True, + typecallable=self.parent_property.collection_class, + ) + + def create_row_processor( + self, context, path, loadopt, mapper, + result, adapter, populators): + def invoke_no_load(state, dict_, row): + if self.uselist: + state.manager.get_impl(self.key).initialize(state, dict_) + else: + dict_[self.key] = None + populators["new"].append((self.key, invoke_no_load)) + + +@log.class_logger +@properties.RelationshipProperty.strategy_for(lazy=True) +@properties.RelationshipProperty.strategy_for(lazy="select") +@properties.RelationshipProperty.strategy_for(lazy="raise") +@properties.RelationshipProperty.strategy_for(lazy="raise_on_sql") +class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots): + """Provide loading behavior for a :class:`.RelationshipProperty` + with "lazy=True", that is loads when first accessed. + + """ + + __slots__ = ( + '_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col', + '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns', + '_simple_lazy_clause', '_raise_always', '_raise_on_sql') + + def __init__(self, parent, strategy_key): + super(LazyLoader, self).__init__(parent, strategy_key) + self._raise_always = self.strategy_opts["lazy"] == "raise" + self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql" + + join_condition = self.parent_property._join_condition + self._lazywhere, \ + self._bind_to_col, \ + self._equated_columns = join_condition.create_lazy_clause() + + self._rev_lazywhere, \ + self._rev_bind_to_col, \ + self._rev_equated_columns = join_condition.create_lazy_clause( + reverse_direction=True) + + self.logger.info("%s lazy loading clause %s", self, self._lazywhere) + + # determine if our "lazywhere" clause is the same as the mapper's + # get() clause. then we can just use mapper.get() + self.use_get = not self.uselist and \ + self.mapper._get_clause[0].compare( + self._lazywhere, + use_proxies=True, + equivalents=self.mapper._equivalent_columns + ) + + if self.use_get: + for col in list(self._equated_columns): + if col in self.mapper._equivalent_columns: + for c in self.mapper._equivalent_columns[col]: + self._equated_columns[c] = self._equated_columns[col] + + self.logger.info("%s will use query.get() to " + "optimize instance loads", self) + + def init_class_attribute(self, mapper): + self.is_class_level = True + + active_history = ( + self.parent_property.active_history or + self.parent_property.direction is not interfaces.MANYTOONE or + not self.use_get + ) + + # MANYTOONE currently only needs the + # "old" value for delete-orphan + # cascades. the required _SingleParentValidator + # will enable active_history + # in that case. otherwise we don't need the + # "old" value during backref operations. + _register_attribute( + self.parent_property, + mapper, + useobject=True, + callable_=self._load_for_state, + typecallable=self.parent_property.collection_class, + active_history=active_history + ) + + def _memoized_attr__simple_lazy_clause(self): + criterion, bind_to_col = ( + self._lazywhere, + self._bind_to_col + ) + + params = [] + + def visit_bindparam(bindparam): + bindparam.unique = False + if bindparam._identifying_key in bind_to_col: + params.append(( + bindparam.key, bind_to_col[bindparam._identifying_key], + None)) + elif bindparam.callable is None: + params.append((bindparam.key, None, bindparam.value)) + + criterion = visitors.cloned_traverse( + criterion, {}, {'bindparam': visit_bindparam} + ) + + return criterion, params + + def _generate_lazy_clause(self, state, passive): + criterion, param_keys = self._simple_lazy_clause + + if state is None: + return sql_util.adapt_criterion_to_null( + criterion, [key for key, ident, value in param_keys]) + + mapper = self.parent_property.parent + + o = state.obj() # strong ref + dict_ = attributes.instance_dict(o) + + if passive & attributes.INIT_OK: + passive ^= attributes.INIT_OK + + params = {} + for key, ident, value in param_keys: + if ident is not None: + if passive and passive & attributes.LOAD_AGAINST_COMMITTED: + value = mapper._get_committed_state_attr_by_column( + state, dict_, ident, passive) + else: + value = mapper._get_state_attr_by_column( + state, dict_, ident, passive) + + params[key] = value + + return criterion, params + + def _invoke_raise_load(self, state, passive, lazy): + raise sa_exc.InvalidRequestError( + "'%s' is not available due to lazy='%s'" % (self, lazy) + ) + + def _load_for_state(self, state, passive): + + if not state.key and ( + ( + not self.parent_property.load_on_pending + and not state._load_pending + ) + or not state.session_id + ): + return attributes.ATTR_EMPTY + + pending = not state.key + ident_key = None + + if ( + (not passive & attributes.SQL_OK and not self.use_get) + or + (not passive & attributes.NON_PERSISTENT_OK and pending) + ): + return attributes.PASSIVE_NO_RESULT + + if self._raise_always: + self._invoke_raise_load(state, passive, "raise") + + session = _state_session(state) + if not session: + raise orm_exc.DetachedInstanceError( + "Parent instance %s is not bound to a Session; " + "lazy load operation of attribute '%s' cannot proceed" % + (orm_util.state_str(state), self.key) + ) + + # if we have a simple primary key load, check the + # identity map without generating a Query at all + if self.use_get: + ident = self._get_ident_for_use_get( + session, + state, + passive + ) + if attributes.PASSIVE_NO_RESULT in ident: + return attributes.PASSIVE_NO_RESULT + elif attributes.NEVER_SET in ident: + return attributes.NEVER_SET + + if _none_set.issuperset(ident): + return None + + ident_key = self.mapper.identity_key_from_primary_key(ident) + instance = loading.get_from_identity(session, ident_key, passive) + if instance is not None: + return instance + elif not passive & attributes.SQL_OK or \ + not passive & attributes.RELATED_OBJECT_OK: + return attributes.PASSIVE_NO_RESULT + + return self._emit_lazyload(session, state, ident_key, passive) + + def _get_ident_for_use_get(self, session, state, passive): + instance_mapper = state.manager.mapper + + if passive & attributes.LOAD_AGAINST_COMMITTED: + get_attr = instance_mapper._get_committed_state_attr_by_column + else: + get_attr = instance_mapper._get_state_attr_by_column + + dict_ = state.dict + + return [ + get_attr( + state, + dict_, + self._equated_columns[pk], + passive=passive) + for pk in self.mapper.primary_key + ] + + @util.dependencies("sqlalchemy.orm.strategy_options") + def _emit_lazyload( + self, strategy_options, session, state, ident_key, passive): + + q = session.query(self.mapper)._adapt_all_clauses() + if self.parent_property.secondary is not None: + q = q.select_from(self.mapper, self.parent_property.secondary) + + q = q._with_invoke_all_eagers(False) + + pending = not state.key + + # don't autoflush on pending + if pending or passive & attributes.NO_AUTOFLUSH: + q = q.autoflush(False) + + if state.load_path: + q = q._with_current_path(state.load_path[self.parent_property]) + + if state.load_options: + q = q._conditional_options(*state.load_options) + + if self.use_get: + if self._raise_on_sql: + self._invoke_raise_load(state, passive, "raise_on_sql") + return loading.load_on_ident(q, ident_key) + + if self.parent_property.order_by: + q = q.order_by(*util.to_list(self.parent_property.order_by)) + + for rev in self.parent_property._reverse_property: + # reverse props that are MANYTOONE are loading *this* + # object from get(), so don't need to eager out to those. + if rev.direction is interfaces.MANYTOONE and \ + rev._use_get and \ + not isinstance(rev.strategy, LazyLoader): + q = q.options( + strategy_options.Load.for_existing_path( + q._current_path[rev.parent] + ).lazyload(rev.key) + ) + + lazy_clause, params = self._generate_lazy_clause( + state, passive=passive) + + if pending: + if util.has_intersection( + orm_util._none_set, params.values()): + return None + elif util.has_intersection(orm_util._never_set, params.values()): + return None + + if self._raise_on_sql: + self._invoke_raise_load(state, passive, "raise_on_sql") + + q = q.filter(lazy_clause).params(params) + + result = q.all() + if self.uselist: + return result + else: + l = len(result) + if l: + if l > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for lazily-loaded attribute '%s' " + % self.parent_property) + + return result[0] + else: + return None + + def create_row_processor( + self, context, path, loadopt, + mapper, result, adapter, populators): + key = self.key + if not self.is_class_level: + # we are not the primary manager for this attribute + # on this class - set up a + # per-instance lazyloader, which will override the + # class-level behavior. + # this currently only happens when using a + # "lazyload" option on a "no load" + # attribute - "eager" attributes always have a + # class-level lazyloader installed. + set_lazy_callable = InstanceState._instance_level_callable_processor( + mapper.class_manager, + LoadLazyAttribute(key, self), key) + + populators["new"].append((self.key, set_lazy_callable)) + elif context.populate_existing or mapper.always_refresh: + def reset_for_lazy_callable(state, dict_, row): + # we are the primary manager for this attribute on + # this class - reset its + # per-instance attribute state, so that the class-level + # lazy loader is + # executed when next referenced on this instance. + # this is needed in + # populate_existing() types of scenarios to reset + # any existing state. + state._reset(dict_, key) + + populators["new"].append((self.key, reset_for_lazy_callable)) + + +class LoadLazyAttribute(object): + """serializable loader object used by LazyLoader""" + + def __init__(self, key, initiating_strategy): + self.key = key + self.strategy_key = initiating_strategy.strategy_key + + def __call__(self, state, passive=attributes.PASSIVE_OFF): + key = self.key + instance_mapper = state.manager.mapper + prop = instance_mapper._props[key] + strategy = prop._strategies[self.strategy_key] + + return strategy._load_for_state(state, passive) + + +@properties.RelationshipProperty.strategy_for(lazy="immediate") +class ImmediateLoader(AbstractRelationshipLoader): + __slots__ = () + + def init_class_attribute(self, mapper): + self.parent_property.\ + _get_strategy((("lazy", "select"),)).\ + init_class_attribute(mapper) + + def setup_query( + self, context, entity, + path, loadopt, adapter, column_collection=None, + parentmapper=None, **kwargs): + pass + + def create_row_processor( + self, context, path, loadopt, + mapper, result, adapter, populators): + def load_immediate(state, dict_, row): + state.get_impl(self.key).get(state, dict_) + + populators["delayed"].append((self.key, load_immediate)) + + +@log.class_logger +@properties.RelationshipProperty.strategy_for(lazy="subquery") +class SubqueryLoader(AbstractRelationshipLoader): + __slots__ = 'join_depth', + + def __init__(self, parent, strategy_key): + super(SubqueryLoader, self).__init__(parent, strategy_key) + self.join_depth = self.parent_property.join_depth + + def init_class_attribute(self, mapper): + self.parent_property.\ + _get_strategy((("lazy", "select"),)).\ + init_class_attribute(mapper) + + def setup_query( + self, context, entity, + path, loadopt, adapter, + column_collection=None, + parentmapper=None, **kwargs): + + if not context.query._enable_eagerloads: + return + elif context.query._yield_per: + context.query._no_yield_per("subquery") + + path = path[self.parent_property] + + # build up a path indicating the path from the leftmost + # entity to the thing we're subquery loading. + with_poly_info = path.get( + context.attributes, + "path_with_polymorphic", None) + if with_poly_info is not None: + effective_entity = with_poly_info.entity + else: + effective_entity = self.mapper + + subq_path = context.attributes.get( + ('subquery_path', None), + orm_util.PathRegistry.root) + + subq_path = subq_path + path + + # if not via query option, check for + # a cycle + if not path.contains(context.attributes, "loader"): + if self.join_depth: + if path.length / 2 > self.join_depth: + return + elif subq_path.contains_mapper(self.mapper): + return + + leftmost_mapper, leftmost_attr, leftmost_relationship = \ + self._get_leftmost(subq_path) + + orig_query = context.attributes.get( + ("orig_query", SubqueryLoader), + context.query) + + # generate a new Query from the original, then + # produce a subquery from it. + left_alias = self._generate_from_original_query( + orig_query, leftmost_mapper, + leftmost_attr, leftmost_relationship, + entity.entity_zero + ) + + # generate another Query that will join the + # left alias to the target relationships. + # basically doing a longhand + # "from_self()". (from_self() itself not quite industrial + # strength enough for all contingencies...but very close) + q = orig_query.session.query(effective_entity) + q._attributes = { + ("orig_query", SubqueryLoader): orig_query, + ('subquery_path', None): subq_path + } + + q = q._set_enable_single_crit(False) + to_join, local_attr, parent_alias = \ + self._prep_for_joins(left_alias, subq_path) + q = q.order_by(*local_attr) + q = q.add_columns(*local_attr) + q = self._apply_joins( + q, to_join, left_alias, + parent_alias, effective_entity) + + q = self._setup_options(q, subq_path, orig_query, effective_entity) + q = self._setup_outermost_orderby(q) + + # add new query to attributes to be picked up + # by create_row_processor + path.set(context.attributes, "subquery", q) + + def _get_leftmost(self, subq_path): + subq_path = subq_path.path + subq_mapper = orm_util._class_to_mapper(subq_path[0]) + + # determine attributes of the leftmost mapper + if self.parent.isa(subq_mapper) and \ + self.parent_property is subq_path[1]: + leftmost_mapper, leftmost_prop = \ + self.parent, self.parent_property + else: + leftmost_mapper, leftmost_prop = \ + subq_mapper, \ + subq_path[1] + + leftmost_cols = leftmost_prop.local_columns + + leftmost_attr = [ + getattr( + subq_path[0].entity, + leftmost_mapper._columntoproperty[c].key) + for c in leftmost_cols + ] + + return leftmost_mapper, leftmost_attr, leftmost_prop + + def _generate_from_original_query( + self, + orig_query, leftmost_mapper, + leftmost_attr, leftmost_relationship, orig_entity + ): + # reformat the original query + # to look only for significant columns + q = orig_query._clone().correlate(None) + + # set a real "from" if not present, as this is more + # accurate than just going off of the column expression + if not q._from_obj and orig_entity.is_mapper and \ + orig_entity.mapper.isa(leftmost_mapper): + q._set_select_from([orig_entity], False) + target_cols = q._adapt_col_list(leftmost_attr) + + # select from the identity columns of the outer. This will remove + # other columns from the query that might suggest the right entity + # which is why we try to _set_select_from above. + q._set_entities(target_cols) + + distinct_target_key = leftmost_relationship.distinct_target_key + + if distinct_target_key is True: + q._distinct = True + elif distinct_target_key is None: + # if target_cols refer to a non-primary key or only + # part of a composite primary key, set the q as distinct + for t in set(c.table for c in target_cols): + if not set(target_cols).issuperset(t.primary_key): + q._distinct = True + break + + if q._order_by is False: + q._order_by = leftmost_mapper.order_by + + # don't need ORDER BY if no limit/offset + if q._limit is None and q._offset is None: + q._order_by = None + + # the original query now becomes a subquery + # which we'll join onto. + + embed_q = q.with_labels().subquery() + left_alias = orm_util.AliasedClass( + leftmost_mapper, embed_q, + use_mapper_path=True) + return left_alias + + def _prep_for_joins(self, left_alias, subq_path): + # figure out what's being joined. a.k.a. the fun part + to_join = [] + pairs = list(subq_path.pairs()) + + for i, (mapper, prop) in enumerate(pairs): + if i > 0: + # look at the previous mapper in the chain - + # if it is as or more specific than this prop's + # mapper, use that instead. + # note we have an assumption here that + # the non-first element is always going to be a mapper, + # not an AliasedClass + + prev_mapper = pairs[i - 1][1].mapper + to_append = prev_mapper if prev_mapper.isa(mapper) else mapper + else: + to_append = mapper + + to_join.append((to_append, prop.key)) + + # determine the immediate parent class we are joining from, + # which needs to be aliased. + + if len(to_join) < 2: + # in the case of a one level eager load, this is the + # leftmost "left_alias". + parent_alias = left_alias + else: + info = inspect(to_join[-1][0]) + if info.is_aliased_class: + parent_alias = info.entity + else: + # alias a plain mapper as we may be + # joining multiple times + parent_alias = orm_util.AliasedClass( + info.entity, + use_mapper_path=True) + + local_cols = self.parent_property.local_columns + + local_attr = [ + getattr(parent_alias, self.parent._columntoproperty[c].key) + for c in local_cols + ] + return to_join, local_attr, parent_alias + + def _apply_joins( + self, q, to_join, left_alias, parent_alias, + effective_entity): + + ltj = len(to_join) + if ltj == 1: + to_join = [ + getattr(left_alias, to_join[0][1]).of_type(effective_entity) + ] + elif ltj == 2: + to_join = [ + getattr(left_alias, to_join[0][1]).of_type(parent_alias), + getattr(parent_alias, to_join[-1][1]).of_type(effective_entity) + ] + elif ltj > 2: + middle = [ + ( + orm_util.AliasedClass(item[0]) + if not inspect(item[0]).is_aliased_class + else item[0].entity, + item[1] + ) for item in to_join[1:-1] + ] + inner = [] + + while middle: + item = middle.pop(0) + attr = getattr(item[0], item[1]) + if middle: + attr = attr.of_type(middle[0][0]) + else: + attr = attr.of_type(parent_alias) + + inner.append(attr) + + to_join = [ + getattr(left_alias, to_join[0][1]).of_type(inner[0].parent) + ] + inner + [ + getattr(parent_alias, to_join[-1][1]).of_type(effective_entity) + ] + + for attr in to_join: + q = q.join(attr, from_joinpoint=True) + return q + + def _setup_options(self, q, subq_path, orig_query, effective_entity): + # propagate loader options etc. to the new query. + # these will fire relative to subq_path. + q = q._with_current_path(subq_path) + q = q._conditional_options(*orig_query._with_options) + if orig_query._populate_existing: + q._populate_existing = orig_query._populate_existing + + return q + + def _setup_outermost_orderby(self, q): + if self.parent_property.order_by: + # if there's an ORDER BY, alias it the same + # way joinedloader does, but we have to pull out + # the "eagerjoin" from the query. + # this really only picks up the "secondary" table + # right now. + eagerjoin = q._from_obj[0] + eager_order_by = \ + eagerjoin._target_adapter.\ + copy_and_process( + util.to_list( + self.parent_property.order_by + ) + ) + q = q.order_by(*eager_order_by) + return q + + class _SubqCollections(object): + """Given a :class:`.Query` used to emit the "subquery load", + provide a load interface that executes the query at the + first moment a value is needed. + + """ + _data = None + + def __init__(self, subq): + self.subq = subq + + def get(self, key, default): + if self._data is None: + self._load() + return self._data.get(key, default) + + def _load(self): + self._data = dict( + (k, [vv[0] for vv in v]) + for k, v in itertools.groupby( + self.subq, + lambda x: x[1:] + ) + ) + + def loader(self, state, dict_, row): + if self._data is None: + self._load() + + def create_row_processor( + self, context, path, loadopt, + mapper, result, adapter, populators): + if not self.parent.class_manager[self.key].impl.supports_population: + raise sa_exc.InvalidRequestError( + "'%s' does not support object " + "population - eager loading cannot be applied." % + self) + + path = path[self.parent_property] + + subq = path.get(context.attributes, 'subquery') + + if subq is None: + return + + assert subq.session is context.session, ( + "Subquery session doesn't refer to that of " + "our context. Are there broken context caching " + "schemes being used?" + ) + + local_cols = self.parent_property.local_columns + + # cache the loaded collections in the context + # so that inheriting mappers don't re-load when they + # call upon create_row_processor again + collections = path.get(context.attributes, "collections") + if collections is None: + collections = self._SubqCollections(subq) + path.set(context.attributes, 'collections', collections) + + if adapter: + local_cols = [adapter.columns[c] for c in local_cols] + + if self.uselist: + self._create_collection_loader( + context, collections, local_cols, populators) + else: + self._create_scalar_loader( + context, collections, local_cols, populators) + + def _create_collection_loader( + self, context, collections, local_cols, populators): + def load_collection_from_subq(state, dict_, row): + collection = collections.get( + tuple([row[col] for col in local_cols]), + () + ) + state.get_impl(self.key).\ + set_committed_value(state, dict_, collection) + + def load_collection_from_subq_existing_row(state, dict_, row): + if self.key not in dict_: + load_collection_from_subq(state, dict_, row) + + populators["new"].append( + (self.key, load_collection_from_subq)) + populators["existing"].append( + (self.key, load_collection_from_subq_existing_row)) + + if context.invoke_all_eagers: + populators["eager"].append((self.key, collections.loader)) + + def _create_scalar_loader( + self, context, collections, local_cols, populators): + def load_scalar_from_subq(state, dict_, row): + collection = collections.get( + tuple([row[col] for col in local_cols]), + (None,) + ) + if len(collection) > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for eagerly-loaded attribute '%s' " + % self) + + scalar = collection[0] + state.get_impl(self.key).\ + set_committed_value(state, dict_, scalar) + + def load_scalar_from_subq_existing_row(state, dict_, row): + if self.key not in dict_: + load_scalar_from_subq(state, dict_, row) + + populators["new"].append( + (self.key, load_scalar_from_subq)) + populators["existing"].append( + (self.key, load_scalar_from_subq_existing_row)) + if context.invoke_all_eagers: + populators["eager"].append((self.key, collections.loader)) + + +@log.class_logger +@properties.RelationshipProperty.strategy_for(lazy="joined") +@properties.RelationshipProperty.strategy_for(lazy=False) +class JoinedLoader(AbstractRelationshipLoader): + """Provide loading behavior for a :class:`.RelationshipProperty` + using joined eager loading. + + """ + + __slots__ = 'join_depth', '_aliased_class_pool' + + def __init__(self, parent, strategy_key): + super(JoinedLoader, self).__init__(parent, strategy_key) + self.join_depth = self.parent_property.join_depth + self._aliased_class_pool = [] + + def init_class_attribute(self, mapper): + self.parent_property.\ + _get_strategy((("lazy", "select"),)).init_class_attribute(mapper) + + def setup_query( + self, context, entity, path, loadopt, adapter, + column_collection=None, parentmapper=None, + chained_from_outerjoin=False, + **kwargs): + """Add a left outer join to the statement that's being constructed.""" + + if not context.query._enable_eagerloads: + return + elif context.query._yield_per and self.uselist: + context.query._no_yield_per("joined collection") + + path = path[self.parent_property] + + with_polymorphic = None + + user_defined_adapter = self._init_user_defined_eager_proc( + loadopt, context) if loadopt else False + + if user_defined_adapter is not False: + clauses, adapter, add_to_collection = \ + self._setup_query_on_user_defined_adapter( + context, entity, path, adapter, + user_defined_adapter + ) + else: + # if not via query option, check for + # a cycle + if not path.contains(context.attributes, "loader"): + if self.join_depth: + if path.length / 2 > self.join_depth: + return + elif path.contains_mapper(self.mapper): + return + + clauses, adapter, add_to_collection, chained_from_outerjoin = \ + self._generate_row_adapter( + context, entity, path, loadopt, adapter, + column_collection, parentmapper, chained_from_outerjoin + ) + + with_poly_info = path.get( + context.attributes, + "path_with_polymorphic", + None + ) + if with_poly_info is not None: + with_polymorphic = with_poly_info.with_polymorphic_mappers + else: + with_polymorphic = None + + path = path[self.mapper] + + loading._setup_entity_query( + context, self.mapper, entity, + path, clauses, add_to_collection, + with_polymorphic=with_polymorphic, + parentmapper=self.mapper, + chained_from_outerjoin=chained_from_outerjoin) + + if with_poly_info is not None and \ + None in set(context.secondary_columns): + raise sa_exc.InvalidRequestError( + "Detected unaliased columns when generating joined " + "load. Make sure to use aliased=True or flat=True " + "when using joined loading with with_polymorphic()." + ) + + def _init_user_defined_eager_proc(self, loadopt, context): + + # check if the opt applies at all + if "eager_from_alias" not in loadopt.local_opts: + # nope + return False + + path = loadopt.path.parent + + # the option applies. check if the "user_defined_eager_row_processor" + # has been built up. + adapter = path.get( + context.attributes, + "user_defined_eager_row_processor", False) + if adapter is not False: + # just return it + return adapter + + # otherwise figure it out. + alias = loadopt.local_opts["eager_from_alias"] + + root_mapper, prop = path[-2:] + + #from .mapper import Mapper + #from .interfaces import MapperProperty + #assert isinstance(root_mapper, Mapper) + #assert isinstance(prop, MapperProperty) + + if alias is not None: + if isinstance(alias, str): + alias = prop.target.alias(alias) + adapter = sql_util.ColumnAdapter( + alias, + equivalents=prop.mapper._equivalent_columns) + else: + if path.contains(context.attributes, "path_with_polymorphic"): + with_poly_info = path.get( + context.attributes, + "path_with_polymorphic") + adapter = orm_util.ORMAdapter( + with_poly_info.entity, + equivalents=prop.mapper._equivalent_columns) + else: + adapter = context.query._polymorphic_adapters.get( + prop.mapper, None) + path.set( + context.attributes, + "user_defined_eager_row_processor", + adapter) + + return adapter + + def _setup_query_on_user_defined_adapter( + self, context, entity, + path, adapter, user_defined_adapter): + + # apply some more wrapping to the "user defined adapter" + # if we are setting up the query for SQL render. + adapter = entity._get_entity_clauses(context.query, context) + + if adapter and user_defined_adapter: + user_defined_adapter = user_defined_adapter.wrap(adapter) + path.set( + context.attributes, "user_defined_eager_row_processor", + user_defined_adapter) + elif adapter: + user_defined_adapter = adapter + path.set( + context.attributes, "user_defined_eager_row_processor", + user_defined_adapter) + + add_to_collection = context.primary_columns + return user_defined_adapter, adapter, add_to_collection + + def _gen_pooled_aliased_class(self, context): + # keep a local pool of AliasedClass objects that get re-used. + # we need one unique AliasedClass per query per appearance of our + # entity in the query. + + key = ('joinedloader_ac', self) + if key not in context.attributes: + context.attributes[key] = idx = 0 + else: + context.attributes[key] = idx = context.attributes[key] + 1 + + if idx >= len(self._aliased_class_pool): + to_adapt = orm_util.AliasedClass( + self.mapper, + flat=True, + use_mapper_path=True) + # load up the .columns collection on the Alias() before + # the object becomes shared among threads. this prevents + # races for column identities. + inspect(to_adapt).selectable.c + + self._aliased_class_pool.append(to_adapt) + + return self._aliased_class_pool[idx] + + def _generate_row_adapter( + self, + context, entity, path, loadopt, adapter, + column_collection, parentmapper, chained_from_outerjoin): + with_poly_info = path.get( + context.attributes, + "path_with_polymorphic", + None + ) + if with_poly_info: + to_adapt = with_poly_info.entity + else: + to_adapt = self._gen_pooled_aliased_class(context) + + clauses = inspect(to_adapt)._memo( + ("joinedloader_ormadapter", self), + orm_util.ORMAdapter, + to_adapt, + equivalents=self.mapper._equivalent_columns, + adapt_required=True, allow_label_resolve=False, + anonymize_labels=True + ) + + assert clauses.aliased_class is not None + + if self.parent_property.uselist: + context.multi_row_eager_loaders = True + + innerjoin = ( + loadopt.local_opts.get( + 'innerjoin', self.parent_property.innerjoin) + if loadopt is not None + else self.parent_property.innerjoin + ) + + if not innerjoin: + # if this is an outer join, all non-nested eager joins from + # this path must also be outer joins + chained_from_outerjoin = True + + context.create_eager_joins.append( + ( + self._create_eager_join, context, + entity, path, adapter, + parentmapper, clauses, innerjoin, chained_from_outerjoin + ) + ) + + add_to_collection = context.secondary_columns + path.set(context.attributes, "eager_row_processor", clauses) + + return clauses, adapter, add_to_collection, chained_from_outerjoin + + def _create_eager_join( + self, context, entity, + path, adapter, parentmapper, + clauses, innerjoin, chained_from_outerjoin): + + if parentmapper is None: + localparent = entity.mapper + else: + localparent = parentmapper + + # whether or not the Query will wrap the selectable in a subquery, + # and then attach eager load joins to that (i.e., in the case of + # LIMIT/OFFSET etc.) + should_nest_selectable = context.multi_row_eager_loaders and \ + context.query._should_nest_selectable + + entity_key = None + + if entity not in context.eager_joins and \ + not should_nest_selectable and \ + context.from_clause: + index, clause = sql_util.find_join_source( + context.from_clause, entity.selectable) + if clause is not None: + # join to an existing FROM clause on the query. + # key it to its list index in the eager_joins dict. + # Query._compile_context will adapt as needed and + # append to the FROM clause of the select(). + entity_key, default_towrap = index, clause + + if entity_key is None: + entity_key, default_towrap = entity, entity.selectable + + towrap = context.eager_joins.setdefault(entity_key, default_towrap) + + if adapter: + if getattr(adapter, 'aliased_class', None): + # joining from an adapted entity. The adapted entity + # might be a "with_polymorphic", so resolve that to our + # specific mapper's entity before looking for our attribute + # name on it. + efm = inspect(adapter.aliased_class).\ + _entity_for_mapper( + localparent + if localparent.isa(self.parent) else self.parent) + + # look for our attribute on the adapted entity, else fall back + # to our straight property + onclause = getattr( + efm.entity, self.key, + self.parent_property) + else: + onclause = getattr( + orm_util.AliasedClass( + self.parent, + adapter.selectable, + use_mapper_path=True + ), + self.key, self.parent_property + ) + + else: + onclause = self.parent_property + + assert clauses.aliased_class is not None + + attach_on_outside = ( + not chained_from_outerjoin or + not innerjoin or innerjoin == 'unnested') + + if attach_on_outside: + # this is the "classic" eager join case. + eagerjoin = orm_util._ORMJoin( + towrap, + clauses.aliased_class, + onclause, + isouter=not innerjoin or ( + chained_from_outerjoin and isinstance(towrap, sql.Join) + ), _left_memo=self.parent, _right_memo=self.mapper + ) + else: + # all other cases are innerjoin=='nested' approach + eagerjoin = self._splice_nested_inner_join( + path, towrap, clauses, onclause) + + context.eager_joins[entity_key] = eagerjoin + + # send a hint to the Query as to where it may "splice" this join + eagerjoin.stop_on = entity.selectable + + if not parentmapper: + # for parentclause that is the non-eager end of the join, + # ensure all the parent cols in the primaryjoin are actually + # in the + # columns clause (i.e. are not deferred), so that aliasing applied + # by the Query propagates those columns outward. + # This has the effect + # of "undefering" those columns. + for col in sql_util._find_columns( + self.parent_property.primaryjoin): + if localparent.mapped_table.c.contains_column(col): + if adapter: + col = adapter.columns[col] + context.primary_columns.append(col) + + if self.parent_property.order_by: + context.eager_order_by += eagerjoin._target_adapter.\ + copy_and_process( + util.to_list( + self.parent_property.order_by + ) + ) + + def _splice_nested_inner_join( + self, path, join_obj, clauses, onclause, splicing=False): + + if splicing is False: + # first call is always handed a join object + # from the outside + assert isinstance(join_obj, orm_util._ORMJoin) + elif isinstance(join_obj, sql.selectable.FromGrouping): + return self._splice_nested_inner_join( + path, join_obj.element, clauses, onclause, splicing + ) + elif not isinstance(join_obj, orm_util._ORMJoin): + if path[-2] is splicing: + return orm_util._ORMJoin( + join_obj, clauses.aliased_class, + onclause, isouter=False, + _left_memo=splicing, + _right_memo=path[-1].mapper + ) + else: + # only here if splicing == True + return None + + target_join = self._splice_nested_inner_join( + path, join_obj.right, clauses, + onclause, join_obj._right_memo) + if target_join is None: + right_splice = False + target_join = self._splice_nested_inner_join( + path, join_obj.left, clauses, + onclause, join_obj._left_memo) + if target_join is None: + # should only return None when recursively called, + # e.g. splicing==True + assert splicing is not False, \ + "assertion failed attempting to produce joined eager loads" + return None + else: + right_splice = True + + if right_splice: + # for a right splice, attempt to flatten out + # a JOIN b JOIN c JOIN .. to avoid needless + # parenthesis nesting + if not join_obj.isouter and not target_join.isouter: + eagerjoin = join_obj._splice_into_center(target_join) + else: + eagerjoin = orm_util._ORMJoin( + join_obj.left, target_join, + join_obj.onclause, isouter=join_obj.isouter, + _left_memo=join_obj._left_memo) + else: + eagerjoin = orm_util._ORMJoin( + target_join, join_obj.right, + join_obj.onclause, isouter=join_obj.isouter, + _right_memo=join_obj._right_memo) + + eagerjoin._target_adapter = target_join._target_adapter + return eagerjoin + + def _create_eager_adapter(self, context, result, adapter, path, loadopt): + user_defined_adapter = self._init_user_defined_eager_proc( + loadopt, context) if loadopt else False + + if user_defined_adapter is not False: + decorator = user_defined_adapter + # user defined eagerloads are part of the "primary" + # portion of the load. + # the adapters applied to the Query should be honored. + if context.adapter and decorator: + decorator = decorator.wrap(context.adapter) + elif context.adapter: + decorator = context.adapter + else: + decorator = path.get(context.attributes, "eager_row_processor") + if decorator is None: + return False + + if self.mapper._result_has_identity_key(result, decorator): + return decorator + else: + # no identity key - don't return a row + # processor, will cause a degrade to lazy + return False + + def create_row_processor( + self, context, path, loadopt, mapper, + result, adapter, populators): + if not self.parent.class_manager[self.key].impl.supports_population: + raise sa_exc.InvalidRequestError( + "'%s' does not support object " + "population - eager loading cannot be applied." % + self + ) + + our_path = path[self.parent_property] + + eager_adapter = self._create_eager_adapter( + context, + result, + adapter, our_path, loadopt) + + if eager_adapter is not False: + key = self.key + + _instance = loading._instance_processor( + self.mapper, + context, + result, + our_path[self.mapper], + eager_adapter) + + if not self.uselist: + self._create_scalar_loader(context, key, _instance, populators) + else: + self._create_collection_loader( + context, key, _instance, populators) + else: + self.parent_property._get_strategy((("lazy", "select"),)).\ + create_row_processor( + context, path, loadopt, + mapper, result, adapter, populators) + + def _create_collection_loader(self, context, key, _instance, populators): + def load_collection_from_joined_new_row(state, dict_, row): + collection = attributes.init_state_collection( + state, dict_, key) + result_list = util.UniqueAppender(collection, + 'append_without_event') + context.attributes[(state, key)] = result_list + inst = _instance(row) + if inst is not None: + result_list.append(inst) + + def load_collection_from_joined_existing_row(state, dict_, row): + if (state, key) in context.attributes: + result_list = context.attributes[(state, key)] + else: + # appender_key can be absent from context.attributes + # with isnew=False when self-referential eager loading + # is used; the same instance may be present in two + # distinct sets of result columns + collection = attributes.init_state_collection( + state, dict_, key) + result_list = util.UniqueAppender( + collection, + 'append_without_event') + context.attributes[(state, key)] = result_list + inst = _instance(row) + if inst is not None: + result_list.append(inst) + + def load_collection_from_joined_exec(state, dict_, row): + _instance(row) + + populators["new"].append((self.key, load_collection_from_joined_new_row)) + populators["existing"].append( + (self.key, load_collection_from_joined_existing_row)) + if context.invoke_all_eagers: + populators["eager"].append( + (self.key, load_collection_from_joined_exec)) + + def _create_scalar_loader(self, context, key, _instance, populators): + def load_scalar_from_joined_new_row(state, dict_, row): + # set a scalar object instance directly on the parent + # object, bypassing InstrumentedAttribute event handlers. + dict_[key] = _instance(row) + + def load_scalar_from_joined_existing_row(state, dict_, row): + # call _instance on the row, even though the object has + # been created, so that we further descend into properties + existing = _instance(row) + + # conflicting value already loaded, this shouldn't happen + if key in dict_: + if existing is not dict_[key]: + util.warn( + "Multiple rows returned with " + "uselist=False for eagerly-loaded attribute '%s' " + % self) + else: + # this case is when one row has multiple loads of the + # same entity (e.g. via aliasing), one has an attribute + # that the other doesn't. + dict_[key] = existing + + def load_scalar_from_joined_exec(state, dict_, row): + _instance(row) + + populators["new"].append((self.key, load_scalar_from_joined_new_row)) + populators["existing"].append( + (self.key, load_scalar_from_joined_existing_row)) + if context.invoke_all_eagers: + populators["eager"].append((self.key, load_scalar_from_joined_exec)) + + +def single_parent_validator(desc, prop): + def _do_check(state, value, oldvalue, initiator): + if value is not None and initiator.key == prop.key: + hasparent = initiator.hasparent(attributes.instance_state(value)) + if hasparent and oldvalue is not value: + raise sa_exc.InvalidRequestError( + "Instance %s is already associated with an instance " + "of %s via its %s attribute, and is only allowed a " + "single parent." % + (orm_util.instance_str(value), state.class_, prop) + ) + return value + + def append(state, value, initiator): + return _do_check(state, value, None, initiator) + + def set_(state, value, oldvalue, initiator): + return _do_check(state, value, oldvalue, initiator) + + event.listen( + desc, 'append', append, raw=True, retval=True, + active_history=True) + event.listen( + desc, 'set', set_, raw=True, retval=True, + active_history=True) diff --git a/app/lib/sqlalchemy/orm/strategy_options.py b/app/lib/sqlalchemy/orm/strategy_options.py new file mode 100644 index 0000000..bae2b73 --- /dev/null +++ b/app/lib/sqlalchemy/orm/strategy_options.py @@ -0,0 +1,1106 @@ +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + +""" + +from .interfaces import MapperOption, PropComparator +from .. import util +from ..sql.base import _generative, Generative +from .. import exc as sa_exc, inspect +from .base import _is_aliased_class, _class_to_mapper +from . import util as orm_util +from .path_registry import PathRegistry, TokenRegistry, \ + _WILDCARD_TOKEN, _DEFAULT_TOKEN + + +class Load(Generative, MapperOption): + """Represents loader options which modify the state of a + :class:`.Query` in order to affect how various mapped attributes are + loaded. + + .. versionadded:: 0.9.0 The :meth:`.Load` system is a new foundation for + the existing system of loader options, including options such as + :func:`.orm.joinedload`, :func:`.orm.defer`, and others. In + particular, it introduces a new method-chained system that replaces the + need for dot-separated paths as well as "_all()" options such as + :func:`.orm.joinedload_all`. + + A :class:`.Load` object can be used directly or indirectly. To use one + directly, instantiate given the parent class. This style of usage is + useful when dealing with a :class:`.Query` that has multiple entities, + or when producing a loader option that can be applied generically to + any style of query:: + + myopt = Load(MyClass).joinedload("widgets") + + The above ``myopt`` can now be used with :meth:`.Query.options`:: + + session.query(MyClass).options(myopt) + + The :class:`.Load` construct is invoked indirectly whenever one makes use + of the various loader options that are present in ``sqlalchemy.orm``, + including options such as :func:`.orm.joinedload`, :func:`.orm.defer`, + :func:`.orm.subqueryload`, and all the rest. These constructs produce an + "anonymous" form of the :class:`.Load` object which tracks attributes and + options, but is not linked to a parent class until it is associated with a + parent :class:`.Query`:: + + # produce "unbound" Load object + myopt = joinedload("widgets") + + # when applied using options(), the option is "bound" to the + # class observed in the given query, e.g. MyClass + session.query(MyClass).options(myopt) + + Whether the direct or indirect style is used, the :class:`.Load` object + returned now represents a specific "path" along the entities of a + :class:`.Query`. This path can be traversed using a standard + method-chaining approach. Supposing a class hierarchy such as ``User``, + ``User.addresses -> Address``, ``User.orders -> Order`` and + ``Order.items -> Item``, we can specify a variety of loader options along + each element in the "path":: + + session.query(User).options( + joinedload("addresses"), + subqueryload("orders").joinedload("items") + ) + + Where above, the ``addresses`` collection will be joined-loaded, the + ``orders`` collection will be subquery-loaded, and within that subquery + load the ``items`` collection will be joined-loaded. + + + """ + + def __init__(self, entity): + insp = inspect(entity) + self.path = insp._path_registry + # note that this .context is shared among all descendant + # Load objects + self.context = {} + self.local_opts = {} + + @classmethod + def for_existing_path(cls, path): + load = cls.__new__(cls) + load.path = path + load.context = {} + load.local_opts = {} + return load + + def _generate(self): + cloned = super(Load, self)._generate() + cloned.local_opts = {} + return cloned + + is_opts_only = False + strategy = None + propagate_to_loaders = False + + def process_query(self, query): + self._process(query, True) + + def process_query_conditionally(self, query): + self._process(query, False) + + def _process(self, query, raiseerr): + current_path = query._current_path + if current_path: + for (token, start_path), loader in self.context.items(): + chopped_start_path = self._chop_path(start_path, current_path) + if chopped_start_path is not None: + query._attributes[(token, chopped_start_path)] = loader + else: + query._attributes.update(self.context) + + def _generate_path(self, path, attr, wildcard_key, raiseerr=True): + if raiseerr and not path.has_entity: + if isinstance(path, TokenRegistry): + raise sa_exc.ArgumentError( + "Wildcard token cannot be followed by another entity") + else: + raise sa_exc.ArgumentError( + "Attribute '%s' of entity '%s' does not " + "refer to a mapped entity" % + (path.prop.key, path.parent.entity) + ) + + if isinstance(attr, util.string_types): + default_token = attr.endswith(_DEFAULT_TOKEN) + if attr.endswith(_WILDCARD_TOKEN) or default_token: + if default_token: + self.propagate_to_loaders = False + if wildcard_key: + attr = "%s:%s" % (wildcard_key, attr) + return path.token(attr) + + try: + # use getattr on the class to work around + # synonyms, hybrids, etc. + attr = getattr(path.entity.class_, attr) + except AttributeError: + if raiseerr: + raise sa_exc.ArgumentError( + "Can't find property named '%s' on the " + "mapped entity %s in this Query. " % ( + attr, path.entity) + ) + else: + return None + else: + attr = attr.property + + path = path[attr] + else: + prop = attr.property + + if not prop.parent.common_parent(path.mapper): + if raiseerr: + raise sa_exc.ArgumentError( + "Attribute '%s' does not " + "link from element '%s'" % (attr, path.entity)) + else: + return None + + if getattr(attr, '_of_type', None): + ac = attr._of_type + ext_info = inspect(ac) + + path_element = ext_info.mapper + existing = path.entity_path[prop].get( + self.context, "path_with_polymorphic") + if not ext_info.is_aliased_class: + ac = orm_util.with_polymorphic( + ext_info.mapper.base_mapper, + ext_info.mapper, aliased=True, + _use_mapper_path=True, + _existing_alias=existing) + path.entity_path[prop].set( + self.context, "path_with_polymorphic", inspect(ac)) + path = path[prop][path_element] + else: + path = path[prop] + + if path.has_entity: + path = path.entity_path + return path + + def __str__(self): + return "Load(strategy=%r)" % (self.strategy, ) + + def _coerce_strat(self, strategy): + if strategy is not None: + strategy = tuple(sorted(strategy.items())) + return strategy + + @_generative + def set_relationship_strategy( + self, attr, strategy, propagate_to_loaders=True): + strategy = self._coerce_strat(strategy) + + self.propagate_to_loaders = propagate_to_loaders + # if the path is a wildcard, this will set propagate_to_loaders=False + self.path = self._generate_path(self.path, attr, "relationship") + self.strategy = strategy + if strategy is not None: + self._set_path_strategy() + + @_generative + def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False): + strategy = self._coerce_strat(strategy) + + for attr in attrs: + path = self._generate_path(self.path, attr, "column") + cloned = self._generate() + cloned.strategy = strategy + cloned.path = path + cloned.propagate_to_loaders = True + if opts: + cloned.local_opts.update(opts) + if opts_only: + cloned.is_opts_only = True + cloned._set_path_strategy() + + def _set_for_path(self, context, path, replace=True, merge_opts=False): + if merge_opts or not replace: + existing = path.get(self.context, "loader") + + if existing: + if merge_opts: + existing.local_opts.update(self.local_opts) + else: + path.set(context, "loader", self) + else: + existing = path.get(self.context, "loader") + path.set(context, "loader", self) + if existing and existing.is_opts_only: + self.local_opts.update(existing.local_opts) + + def _set_path_strategy(self): + if self.path.has_entity: + effective_path = self.path.parent + else: + effective_path = self.path + + self._set_for_path( + self.context, effective_path, replace=True, + merge_opts=self.is_opts_only) + + def __getstate__(self): + d = self.__dict__.copy() + d["path"] = self.path.serialize() + return d + + def __setstate__(self, state): + self.__dict__.update(state) + self.path = PathRegistry.deserialize(self.path) + + def _chop_path(self, to_chop, path): + i = -1 + + for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)): + if isinstance(c_token, util.string_types): + # TODO: this is approximated from the _UnboundLoad + # version and probably has issues, not fully covered. + + if i == 0 and c_token.endswith(':' + _DEFAULT_TOKEN): + return to_chop + elif c_token != 'relationship:%s' % (_WILDCARD_TOKEN,) and \ + c_token != p_token.key: + return None + + if c_token is p_token: + continue + else: + return None + return to_chop[i + 1:] + + +class _UnboundLoad(Load): + """Represent a loader option that isn't tied to a root entity. + + The loader option will produce an entity-linked :class:`.Load` + object when it is passed :meth:`.Query.options`. + + This provides compatibility with the traditional system + of freestanding options, e.g. ``joinedload('x.y.z')``. + + """ + + def __init__(self): + self.path = () + self._to_bind = set() + self.local_opts = {} + + _is_chain_link = False + + def _set_path_strategy(self): + self._to_bind.add(self) + + def _generate_path(self, path, attr, wildcard_key): + if wildcard_key and isinstance(attr, util.string_types) and \ + attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN): + if attr == _DEFAULT_TOKEN: + self.propagate_to_loaders = False + attr = "%s:%s" % (wildcard_key, attr) + + return path + (attr, ) + + def __getstate__(self): + d = self.__dict__.copy() + d['path'] = ret = [] + for token in util.to_list(self.path): + if isinstance(token, PropComparator): + ret.append((token._parentmapper.class_, token.key)) + else: + ret.append(token) + return d + + def __setstate__(self, state): + ret = [] + for key in state['path']: + if isinstance(key, tuple): + cls, propkey = key + ret.append(getattr(cls, propkey)) + else: + ret.append(key) + state['path'] = tuple(ret) + self.__dict__ = state + + def _process(self, query, raiseerr): + for val in self._to_bind: + val._bind_loader(query, query._attributes, raiseerr) + + @classmethod + def _from_keys(cls, meth, keys, chained, kw): + opt = _UnboundLoad() + + def _split_key(key): + if isinstance(key, util.string_types): + # coerce fooload('*') into "default loader strategy" + if key == _WILDCARD_TOKEN: + return (_DEFAULT_TOKEN, ) + # coerce fooload(".*") into "wildcard on default entity" + elif key.startswith("." + _WILDCARD_TOKEN): + key = key[1:] + return key.split(".") + else: + return (key,) + all_tokens = [token for key in keys for token in _split_key(key)] + + for token in all_tokens[0:-1]: + if chained: + opt = meth(opt, token, **kw) + else: + opt = opt.defaultload(token) + opt._is_chain_link = True + + opt = meth(opt, all_tokens[-1], **kw) + opt._is_chain_link = False + + return opt + + def _chop_path(self, to_chop, path): + i = -1 + for i, (c_token, (p_mapper, p_prop)) in enumerate( + zip(to_chop, path.pairs())): + if isinstance(c_token, util.string_types): + if i == 0 and c_token.endswith(':' + _DEFAULT_TOKEN): + return to_chop + elif c_token != 'relationship:%s' % ( + _WILDCARD_TOKEN,) and c_token != p_prop.key: + return None + elif isinstance(c_token, PropComparator): + if c_token.property is not p_prop: + return None + else: + i += 1 + + return to_chop[i:] + + def _bind_loader(self, query, context, raiseerr): + start_path = self.path + # _current_path implies we're in a + # secondary load with an existing path + + current_path = query._current_path + if current_path: + start_path = self._chop_path(start_path, current_path) + + if not start_path: + return None + + token = start_path[0] + + if isinstance(token, util.string_types): + entity = self._find_entity_basestring(query, token, raiseerr) + elif isinstance(token, PropComparator): + prop = token.property + entity = self._find_entity_prop_comparator( + query, + prop.key, + token._parententity, + raiseerr) + + else: + raise sa_exc.ArgumentError( + "mapper option expects " + "string key or list of attributes") + + if not entity: + return + + path_element = entity.entity_zero + + # transfer our entity-less state into a Load() object + # with a real entity path. + loader = Load(path_element) + loader.context = context + loader.strategy = self.strategy + loader.is_opts_only = self.is_opts_only + + path = loader.path + for token in start_path: + loader.path = path = loader._generate_path( + loader.path, token, None, raiseerr) + if path is None: + return + + loader.local_opts.update(self.local_opts) + + if loader.path.has_entity: + effective_path = loader.path.parent + else: + effective_path = loader.path + + # prioritize "first class" options over those + # that were "links in the chain", e.g. "x" and "y" in + # someload("x.y.z") versus someload("x") / someload("x.y") + + if effective_path.is_token: + for path in effective_path.generate_for_superclasses(): + loader._set_for_path( + context, path, + replace=not self._is_chain_link, + merge_opts=self.is_opts_only) + else: + loader._set_for_path( + context, effective_path, + replace=not self._is_chain_link, + merge_opts=self.is_opts_only) + + def _find_entity_prop_comparator(self, query, token, mapper, raiseerr): + if _is_aliased_class(mapper): + searchfor = mapper + else: + searchfor = _class_to_mapper(mapper) + for ent in query._mapper_entities: + if ent.corresponds_to(searchfor): + return ent + else: + if raiseerr: + if not list(query._mapper_entities): + raise sa_exc.ArgumentError( + "Query has only expression-based entities - " + "can't find property named '%s'." + % (token, ) + ) + else: + raise sa_exc.ArgumentError( + "Can't find property '%s' on any entity " + "specified in this Query. Note the full path " + "from root (%s) to target entity must be specified." + % (token, ",".join(str(x) for + x in query._mapper_entities)) + ) + else: + return None + + def _find_entity_basestring(self, query, token, raiseerr): + if token.endswith(':' + _WILDCARD_TOKEN): + if len(list(query._mapper_entities)) != 1: + if raiseerr: + raise sa_exc.ArgumentError( + "Wildcard loader can only be used with exactly " + "one entity. Use Load(ent) to specify " + "specific entities.") + elif token.endswith(_DEFAULT_TOKEN): + raiseerr = False + + for ent in query._mapper_entities: + # return only the first _MapperEntity when searching + # based on string prop name. Ideally object + # attributes are used to specify more exactly. + return ent + else: + if raiseerr: + raise sa_exc.ArgumentError( + "Query has only expression-based entities - " + "can't find property named '%s'." + % (token, ) + ) + else: + return None + + +class loader_option(object): + def __init__(self): + pass + + def __call__(self, fn): + self.name = name = fn.__name__ + self.fn = fn + if hasattr(Load, name): + raise TypeError("Load class already has a %s method." % (name)) + setattr(Load, name, fn) + + return self + + def _add_unbound_fn(self, fn): + self._unbound_fn = fn + fn_doc = self.fn.__doc__ + self.fn.__doc__ = """Produce a new :class:`.Load` object with the +:func:`.orm.%(name)s` option applied. + +See :func:`.orm.%(name)s` for usage examples. + +""" % {"name": self.name} + + fn.__doc__ = fn_doc + return self + + def _add_unbound_all_fn(self, fn): + self._unbound_all_fn = fn + fn.__doc__ = """Produce a standalone "all" option for :func:`.orm.%(name)s`. + +.. deprecated:: 0.9.0 + + The "_all()" style is replaced by method chaining, e.g.:: + + session.query(MyClass).options( + %(name)s("someattribute").%(name)s("anotherattribute") + ) + +""" % {"name": self.name} + return self + + +@loader_option() +def contains_eager(loadopt, attr, alias=None): + r"""Indicate that the given attribute should be eagerly loaded from + columns stated manually in the query. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + The option is used in conjunction with an explicit join that loads + the desired rows, i.e.:: + + sess.query(Order).\ + join(Order.user).\ + options(contains_eager(Order.user)) + + The above query would join from the ``Order`` entity to its related + ``User`` entity, and the returned ``Order`` objects would have the + ``Order.user`` attribute pre-populated. + + :func:`contains_eager` also accepts an `alias` argument, which is the + string name of an alias, an :func:`~sqlalchemy.sql.expression.alias` + construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when + the eagerly-loaded rows are to come from an aliased table:: + + user_alias = aliased(User) + sess.query(Order).\ + join((user_alias, Order.user)).\ + options(contains_eager(Order.user, alias=user_alias)) + + .. seealso:: + + :ref:`contains_eager` + + """ + if alias is not None: + if not isinstance(alias, str): + info = inspect(alias) + alias = info.selectable + + cloned = loadopt.set_relationship_strategy( + attr, + {"lazy": "joined"}, + propagate_to_loaders=False + ) + cloned.local_opts['eager_from_alias'] = alias + return cloned + + +@contains_eager._add_unbound_fn +def contains_eager(*keys, **kw): + return _UnboundLoad()._from_keys( + _UnboundLoad.contains_eager, keys, True, kw) + + +@loader_option() +def load_only(loadopt, *attrs): + """Indicate that for a particular entity, only the given list + of column-based attribute names should be loaded; all others will be + deferred. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + Example - given a class ``User``, load only the ``name`` and ``fullname`` + attributes:: + + session.query(User).options(load_only("name", "fullname")) + + Example - given a relationship ``User.addresses -> Address``, specify + subquery loading for the ``User.addresses`` collection, but on each + ``Address`` object load only the ``email_address`` attribute:: + + session.query(User).options( + subqueryload("addresses").load_only("email_address") + ) + + For a :class:`.Query` that has multiple entities, the lead entity can be + specifically referred to using the :class:`.Load` constructor:: + + session.query(User, Address).join(User.addresses).options( + Load(User).load_only("name", "fullname"), + Load(Address).load_only("email_addres") + ) + + + .. versionadded:: 0.9.0 + + """ + cloned = loadopt.set_column_strategy( + attrs, + {"deferred": False, "instrument": True} + ) + cloned.set_column_strategy("*", + {"deferred": True, "instrument": True}, + {"undefer_pks": True}) + return cloned + + +@load_only._add_unbound_fn +def load_only(*attrs): + return _UnboundLoad().load_only(*attrs) + + +@loader_option() +def joinedload(loadopt, attr, innerjoin=None): + """Indicate that the given attribute should be loaded using joined + eager loading. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + examples:: + + # joined-load the "orders" collection on "User" + query(User).options(joinedload(User.orders)) + + # joined-load Order.items and then Item.keywords + query(Order).options(joinedload(Order.items).joinedload(Item.keywords)) + + # lazily load Order.items, but when Items are loaded, + # joined-load the keywords collection + query(Order).options(lazyload(Order.items).joinedload(Item.keywords)) + + :param innerjoin: if ``True``, indicates that the joined eager load should + use an inner join instead of the default of left outer join:: + + query(Order).options(joinedload(Order.user, innerjoin=True)) + + In order to chain multiple eager joins together where some may be + OUTER and others INNER, right-nested joins are used to link them:: + + query(A).options( + joinedload(A.bs, innerjoin=False). + joinedload(B.cs, innerjoin=True) + ) + + The above query, linking A.bs via "outer" join and B.cs via "inner" join + would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using + SQLite, this form of JOIN is translated to use full subqueries as this + syntax is otherwise not directly supported. + + The ``innerjoin`` flag can also be stated with the term ``"unnested"``. + This will prevent joins from being right-nested, and will instead + link an "innerjoin" eagerload to an "outerjoin" eagerload by bypassing + the "inner" join. Using this form as follows:: + + query(A).options( + joinedload(A.bs, innerjoin=False). + joinedload(B.cs, innerjoin="unnested") + ) + + Joins will be rendered as "a LEFT OUTER JOIN b LEFT OUTER JOIN c", so that + all of "a" is matched rather than being incorrectly limited by a "b" that + does not contain a "c". + + .. note:: The "unnested" flag does **not** affect the JOIN rendered + from a many-to-many association table, e.g. a table configured + as :paramref:`.relationship.secondary`, to the target table; for + correctness of results, these joins are always INNER and are + therefore right-nested if linked to an OUTER join. + + .. versionadded:: 0.9.4 Added support for "nesting" of eager "inner" + joins. See :ref:`feature_2976`. + + .. versionchanged:: 1.0.0 ``innerjoin=True`` now implies + ``innerjoin="nested"``, whereas in 0.9 it implied + ``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested" + inner join behavior, use the value ``innerjoin="unnested"``. + See :ref:`migration_3008`. + + .. note:: + + The joins produced by :func:`.orm.joinedload` are **anonymously + aliased**. The criteria by which the join proceeds cannot be + modified, nor can the :class:`.Query` refer to these joins in any way, + including ordering. + + To produce a specific SQL JOIN which is explicitly available, use + :meth:`.Query.join`. To combine explicit JOINs with eager loading + of collections, use :func:`.orm.contains_eager`; see + :ref:`contains_eager`. + + .. seealso:: + + :ref:`loading_toplevel` + + :ref:`contains_eager` + + :func:`.orm.subqueryload` + + :func:`.orm.lazyload` + + :paramref:`.relationship.lazy` + + :paramref:`.relationship.innerjoin` - :func:`.relationship`-level + version of the :paramref:`.joinedload.innerjoin` option. + + """ + loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"}) + if innerjoin is not None: + loader.local_opts['innerjoin'] = innerjoin + return loader + + +@joinedload._add_unbound_fn +def joinedload(*keys, **kw): + return _UnboundLoad._from_keys( + _UnboundLoad.joinedload, keys, False, kw) + + +@joinedload._add_unbound_all_fn +def joinedload_all(*keys, **kw): + return _UnboundLoad._from_keys( + _UnboundLoad.joinedload, keys, True, kw) + + +@loader_option() +def subqueryload(loadopt, attr): + """Indicate that the given attribute should be loaded using + subquery eager loading. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + examples:: + + # subquery-load the "orders" collection on "User" + query(User).options(subqueryload(User.orders)) + + # subquery-load Order.items and then Item.keywords + query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords)) + + # lazily load Order.items, but when Items are loaded, + # subquery-load the keywords collection + query(Order).options(lazyload(Order.items).subqueryload(Item.keywords)) + + + .. seealso:: + + :ref:`loading_toplevel` + + :func:`.orm.joinedload` + + :func:`.orm.lazyload` + + :paramref:`.relationship.lazy` + + """ + return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"}) + + +@subqueryload._add_unbound_fn +def subqueryload(*keys): + return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {}) + + +@subqueryload._add_unbound_all_fn +def subqueryload_all(*keys): + return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, True, {}) + + +@loader_option() +def lazyload(loadopt, attr): + """Indicate that the given attribute should be loaded using "lazy" + loading. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + .. seealso:: + + :paramref:`.relationship.lazy` + + """ + return loadopt.set_relationship_strategy(attr, {"lazy": "select"}) + + +@lazyload._add_unbound_fn +def lazyload(*keys): + return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {}) + + +@lazyload._add_unbound_all_fn +def lazyload_all(*keys): + return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, True, {}) + + +@loader_option() +def immediateload(loadopt, attr): + """Indicate that the given attribute should be loaded using + an immediate load with a per-attribute SELECT statement. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + .. seealso:: + + :ref:`loading_toplevel` + + :func:`.orm.joinedload` + + :func:`.orm.lazyload` + + :paramref:`.relationship.lazy` + + """ + loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"}) + return loader + + +@immediateload._add_unbound_fn +def immediateload(*keys): + return _UnboundLoad._from_keys( + _UnboundLoad.immediateload, keys, False, {}) + + +@loader_option() +def noload(loadopt, attr): + """Indicate that the given relationship attribute should remain unloaded. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + :func:`.orm.noload` applies to :func:`.relationship` attributes; for + column-based attributes, see :func:`.orm.defer`. + + """ + + return loadopt.set_relationship_strategy(attr, {"lazy": "noload"}) + + +@noload._add_unbound_fn +def noload(*keys): + return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {}) + + +@loader_option() +def raiseload(loadopt, attr, sql_only=False): + """Indicate that the given relationship attribute should disallow lazy loads. + + A relationship attribute configured with :func:`.orm.raiseload` will + raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The + typical way this is useful is when an application is attempting to ensure + that all relationship attributes that are accessed in a particular context + would have been already loaded via eager loading. Instead of having + to read through SQL logs to ensure lazy loads aren't occurring, this + strategy will cause them to raise immediately. + + :param sql_only: if True, raise only if the lazy load would emit SQL, + but not if it is only checking the identity map, or determining that + the related value should just be None due to missing keys. When False, + the strategy will raise for all varieties of lazyload. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + :func:`.orm.raiseload` applies to :func:`.relationship` attributes only. + + .. versionadded:: 1.1 + + """ + + return loadopt.set_relationship_strategy( + attr, {"lazy": "raise_on_sql" if sql_only else "raise"}) + + +@raiseload._add_unbound_fn +def raiseload(*keys, **kw): + return _UnboundLoad._from_keys(_UnboundLoad.raiseload, keys, False, kw) + + +@loader_option() +def defaultload(loadopt, attr): + """Indicate an attribute should load using its default loader style. + + This method is used to link to other loader options, such as + to set the :func:`.orm.defer` option on a class that is linked to + a relationship of the parent class being loaded, :func:`.orm.defaultload` + can be used to navigate this path without changing the loading style + of the relationship:: + + session.query(MyClass).options(defaultload("someattr").defer("some_column")) + + .. seealso:: + + :func:`.orm.defer` + + :func:`.orm.undefer` + + """ + return loadopt.set_relationship_strategy( + attr, + None + ) + + +@defaultload._add_unbound_fn +def defaultload(*keys): + return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {}) + + +@loader_option() +def defer(loadopt, key): + r"""Indicate that the given column-oriented attribute should be deferred, e.g. + not loaded until accessed. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + e.g.:: + + from sqlalchemy.orm import defer + + session.query(MyClass).options( + defer("attribute_one"), + defer("attribute_two")) + + session.query(MyClass).options( + defer(MyClass.attribute_one), + defer(MyClass.attribute_two)) + + To specify a deferred load of an attribute on a related class, + the path can be specified one token at a time, specifying the loading + style for each link along the chain. To leave the loading style + for a link unchanged, use :func:`.orm.defaultload`:: + + session.query(MyClass).options(defaultload("someattr").defer("some_column")) + + A :class:`.Load` object that is present on a certain path can have + :meth:`.Load.defer` called multiple times, each will operate on the same + parent entity:: + + + session.query(MyClass).options( + defaultload("someattr"). + defer("some_column"). + defer("some_other_column"). + defer("another_column") + ) + + :param key: Attribute to be deferred. + + :param \*addl_attrs: Deprecated; this option supports the old 0.8 style + of specifying a path as a series of attributes, which is now superseded + by the method-chained style. + + .. seealso:: + + :ref:`deferred` + + :func:`.orm.undefer` + + """ + return loadopt.set_column_strategy( + (key, ), + {"deferred": True, "instrument": True} + ) + + +@defer._add_unbound_fn +def defer(key, *addl_attrs): + return _UnboundLoad._from_keys( + _UnboundLoad.defer, (key, ) + addl_attrs, False, {}) + + +@loader_option() +def undefer(loadopt, key): + r"""Indicate that the given column-oriented attribute should be undeferred, + e.g. specified within the SELECT statement of the entity as a whole. + + The column being undeferred is typically set up on the mapping as a + :func:`.deferred` attribute. + + This function is part of the :class:`.Load` interface and supports + both method-chained and standalone operation. + + Examples:: + + # undefer two columns + session.query(MyClass).options(undefer("col1"), undefer("col2")) + + # undefer all columns specific to a single class using Load + * + session.query(MyClass, MyOtherClass).options( + Load(MyClass).undefer("*")) + + :param key: Attribute to be undeferred. + + :param \*addl_attrs: Deprecated; this option supports the old 0.8 style + of specifying a path as a series of attributes, which is now superseded + by the method-chained style. + + .. seealso:: + + :ref:`deferred` + + :func:`.orm.defer` + + :func:`.orm.undefer_group` + + """ + return loadopt.set_column_strategy( + (key, ), + {"deferred": False, "instrument": True} + ) + + +@undefer._add_unbound_fn +def undefer(key, *addl_attrs): + return _UnboundLoad._from_keys( + _UnboundLoad.undefer, (key, ) + addl_attrs, False, {}) + + +@loader_option() +def undefer_group(loadopt, name): + """Indicate that columns within the given deferred group name should be + undeferred. + + The columns being undeferred are set up on the mapping as + :func:`.deferred` attributes and include a "group" name. + + E.g:: + + session.query(MyClass).options(undefer_group("large_attrs")) + + To undefer a group of attributes on a related entity, the path can be + spelled out using relationship loader options, such as + :func:`.orm.defaultload`:: + + session.query(MyClass).options( + defaultload("someattr").undefer_group("large_attrs")) + + .. versionchanged:: 0.9.0 :func:`.orm.undefer_group` is now specific to a + particiular entity load path. + + .. seealso:: + + :ref:`deferred` + + :func:`.orm.defer` + + :func:`.orm.undefer` + + """ + return loadopt.set_column_strategy( + "*", + None, + {"undefer_group_%s" % name: True}, + opts_only=True + ) + + +@undefer_group._add_unbound_fn +def undefer_group(name): + return _UnboundLoad().undefer_group(name) diff --git a/app/lib/sqlalchemy/orm/sync.py b/app/lib/sqlalchemy/orm/sync.py new file mode 100644 index 0000000..880428b --- /dev/null +++ b/app/lib/sqlalchemy/orm/sync.py @@ -0,0 +1,140 @@ +# orm/sync.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""private module containing functions used for copying data +between instances based on join conditions. + +""" + +from . import exc, util as orm_util, attributes + + +def populate(source, source_mapper, dest, dest_mapper, + synchronize_pairs, uowcommit, flag_cascaded_pks): + source_dict = source.dict + dest_dict = dest.dict + + for l, r in synchronize_pairs: + try: + # inline of source_mapper._get_state_attr_by_column + prop = source_mapper._columntoproperty[l] + value = source.manager[prop.key].impl.get(source, source_dict, + attributes.PASSIVE_OFF) + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, dest_mapper, r) + + try: + # inline of dest_mapper._set_state_attr_by_column + prop = dest_mapper._columntoproperty[r] + dest.manager[prop.key].impl.set(dest, dest_dict, value, None) + except exc.UnmappedColumnError: + _raise_col_to_prop(True, source_mapper, l, dest_mapper, r) + + # technically the "r.primary_key" check isn't + # needed here, but we check for this condition to limit + # how often this logic is invoked for memory/performance + # reasons, since we only need this info for a primary key + # destination. + if flag_cascaded_pks and l.primary_key and \ + r.primary_key and \ + r.references(l): + uowcommit.attributes[("pk_cascaded", dest, r)] = True + + +def bulk_populate_inherit_keys( + source_dict, source_mapper, synchronize_pairs): + # a simplified version of populate() used by bulk insert mode + for l, r in synchronize_pairs: + try: + prop = source_mapper._columntoproperty[l] + value = source_dict[prop.key] + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, source_mapper, r) + + try: + prop = source_mapper._columntoproperty[r] + source_dict[prop.key] = value + except exc.UnmappedColumnError: + _raise_col_to_prop(True, source_mapper, l, source_mapper, r) + + +def clear(dest, dest_mapper, synchronize_pairs): + for l, r in synchronize_pairs: + if r.primary_key and \ + dest_mapper._get_state_attr_by_column( + dest, dest.dict, r) not in orm_util._none_set: + + raise AssertionError( + "Dependency rule tried to blank-out primary key " + "column '%s' on instance '%s'" % + (r, orm_util.state_str(dest)) + ) + try: + dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None) + except exc.UnmappedColumnError: + _raise_col_to_prop(True, None, l, dest_mapper, r) + + +def update(source, source_mapper, dest, old_prefix, synchronize_pairs): + for l, r in synchronize_pairs: + try: + oldvalue = source_mapper._get_committed_attr_by_column( + source.obj(), l) + value = source_mapper._get_state_attr_by_column( + source, source.dict, l, passive=attributes.PASSIVE_OFF) + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, None, r) + dest[r.key] = value + dest[old_prefix + r.key] = oldvalue + + +def populate_dict(source, source_mapper, dict_, synchronize_pairs): + for l, r in synchronize_pairs: + try: + value = source_mapper._get_state_attr_by_column( + source, source.dict, l, passive=attributes.PASSIVE_OFF) + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, None, r) + + dict_[r.key] = value + + +def source_modified(uowcommit, source, source_mapper, synchronize_pairs): + """return true if the source object has changes from an old to a + new value on the given synchronize pairs + + """ + for l, r in synchronize_pairs: + try: + prop = source_mapper._columntoproperty[l] + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, None, r) + history = uowcommit.get_attribute_history( + source, prop.key, attributes.PASSIVE_NO_INITIALIZE) + if bool(history.deleted): + return True + else: + return False + + +def _raise_col_to_prop(isdest, source_mapper, source_column, + dest_mapper, dest_column): + if isdest: + raise exc.UnmappedColumnError( + "Can't execute sync rule for " + "destination column '%s'; mapper '%s' does not map " + "this column. Try using an explicit `foreign_keys` " + "collection which does not include this column (or use " + "a viewonly=True relation)." % (dest_column, dest_mapper)) + else: + raise exc.UnmappedColumnError( + "Can't execute sync rule for " + "source column '%s'; mapper '%s' does not map this " + "column. Try using an explicit `foreign_keys` " + "collection which does not include destination column " + "'%s' (or use a viewonly=True relation)." % + (source_column, source_mapper, dest_column)) diff --git a/app/lib/sqlalchemy/orm/unitofwork.py b/app/lib/sqlalchemy/orm/unitofwork.py new file mode 100644 index 0000000..3a39a30 --- /dev/null +++ b/app/lib/sqlalchemy/orm/unitofwork.py @@ -0,0 +1,672 @@ +# orm/unitofwork.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""The internals for the unit of work system. + +The session's flush() process passes objects to a contextual object +here, which assembles flush tasks based on mappers and their properties, +organizes them in order of dependency, and executes. + +""" + +from .. import util, event +from ..util import topological +from . import attributes, persistence, util as orm_util +from . import exc as orm_exc +import itertools + + +def track_cascade_events(descriptor, prop): + """Establish event listeners on object attributes which handle + cascade-on-set/append. + + """ + key = prop.key + + def append(state, item, initiator): + # process "save_update" cascade rules for when + # an instance is appended to the list of another instance + + if item is None: + return + + sess = state.session + if sess: + if sess._warn_on_events: + sess._flush_warning("collection append") + + prop = state.manager.mapper._props[key] + item_state = attributes.instance_state(item) + if prop._cascade.save_update and \ + (prop.cascade_backrefs or key == initiator.key) and \ + not sess._contains_state(item_state): + sess._save_or_update_state(item_state) + return item + + def remove(state, item, initiator): + if item is None: + return + + sess = state.session + if sess: + + prop = state.manager.mapper._props[key] + + if sess._warn_on_events: + sess._flush_warning( + "collection remove" + if prop.uselist + else "related attribute delete") + + # expunge pending orphans + item_state = attributes.instance_state(item) + if prop._cascade.delete_orphan and \ + item_state in sess._new and \ + prop.mapper._is_orphan(item_state): + sess.expunge(item) + + def set_(state, newvalue, oldvalue, initiator): + # process "save_update" cascade rules for when an instance + # is attached to another instance + if oldvalue is newvalue: + return newvalue + + sess = state.session + if sess: + + if sess._warn_on_events: + sess._flush_warning("related attribute set") + + prop = state.manager.mapper._props[key] + if newvalue is not None: + newvalue_state = attributes.instance_state(newvalue) + if prop._cascade.save_update and \ + (prop.cascade_backrefs or key == initiator.key) and \ + not sess._contains_state(newvalue_state): + sess._save_or_update_state(newvalue_state) + + if oldvalue is not None and \ + oldvalue is not attributes.NEVER_SET and \ + oldvalue is not attributes.PASSIVE_NO_RESULT and \ + prop._cascade.delete_orphan: + # possible to reach here with attributes.NEVER_SET ? + oldvalue_state = attributes.instance_state(oldvalue) + + if oldvalue_state in sess._new and \ + prop.mapper._is_orphan(oldvalue_state): + sess.expunge(oldvalue) + return newvalue + + event.listen(descriptor, 'append', append, raw=True, retval=True) + event.listen(descriptor, 'remove', remove, raw=True, retval=True) + event.listen(descriptor, 'set', set_, raw=True, retval=True) + + +class UOWTransaction(object): + def __init__(self, session): + self.session = session + + # dictionary used by external actors to + # store arbitrary state information. + self.attributes = {} + + # dictionary of mappers to sets of + # DependencyProcessors, which are also + # set to be part of the sorted flush actions, + # which have that mapper as a parent. + self.deps = util.defaultdict(set) + + # dictionary of mappers to sets of InstanceState + # items pending for flush which have that mapper + # as a parent. + self.mappers = util.defaultdict(set) + + # a dictionary of Preprocess objects, which gather + # additional states impacted by the flush + # and determine if a flush action is needed + self.presort_actions = {} + + # dictionary of PostSortRec objects, each + # one issues work during the flush within + # a certain ordering. + self.postsort_actions = {} + + # a set of 2-tuples, each containing two + # PostSortRec objects where the second + # is dependent on the first being executed + # first + self.dependencies = set() + + # dictionary of InstanceState-> (isdelete, listonly) + # tuples, indicating if this state is to be deleted + # or insert/updated, or just refreshed + self.states = {} + + # tracks InstanceStates which will be receiving + # a "post update" call. Keys are mappers, + # values are a set of states and a set of the + # columns which should be included in the update. + self.post_update_states = util.defaultdict(lambda: (set(), set())) + + @property + def has_work(self): + return bool(self.states) + + def was_already_deleted(self, state): + """return true if the given state is expired and was deleted + previously. + """ + if state.expired: + try: + state._load_expired(state, attributes.PASSIVE_OFF) + except orm_exc.ObjectDeletedError: + self.session._remove_newly_deleted([state]) + return True + return False + + def is_deleted(self, state): + """return true if the given state is marked as deleted + within this uowtransaction.""" + + return state in self.states and self.states[state][0] + + def memo(self, key, callable_): + if key in self.attributes: + return self.attributes[key] + else: + self.attributes[key] = ret = callable_() + return ret + + def remove_state_actions(self, state): + """remove pending actions for a state from the uowtransaction.""" + + isdelete = self.states[state][0] + + self.states[state] = (isdelete, True) + + def get_attribute_history(self, state, key, + passive=attributes.PASSIVE_NO_INITIALIZE): + """facade to attributes.get_state_history(), including + caching of results.""" + + hashkey = ("history", state, key) + + # cache the objects, not the states; the strong reference here + # prevents newly loaded objects from being dereferenced during the + # flush process + + if hashkey in self.attributes: + history, state_history, cached_passive = self.attributes[hashkey] + # if the cached lookup was "passive" and now + # we want non-passive, do a non-passive lookup and re-cache + + if not cached_passive & attributes.SQL_OK \ + and passive & attributes.SQL_OK: + impl = state.manager[key].impl + history = impl.get_history(state, state.dict, + attributes.PASSIVE_OFF | + attributes.LOAD_AGAINST_COMMITTED) + if history and impl.uses_objects: + state_history = history.as_state() + else: + state_history = history + self.attributes[hashkey] = (history, state_history, passive) + else: + impl = state.manager[key].impl + # TODO: store the history as (state, object) tuples + # so we don't have to keep converting here + history = impl.get_history(state, state.dict, passive | + attributes.LOAD_AGAINST_COMMITTED) + if history and impl.uses_objects: + state_history = history.as_state() + else: + state_history = history + self.attributes[hashkey] = (history, state_history, + passive) + + return state_history + + def has_dep(self, processor): + return (processor, True) in self.presort_actions + + def register_preprocessor(self, processor, fromparent): + key = (processor, fromparent) + if key not in self.presort_actions: + self.presort_actions[key] = Preprocess(processor, fromparent) + + def register_object(self, state, isdelete=False, + listonly=False, cancel_delete=False, + operation=None, prop=None): + if not self.session._contains_state(state): + # this condition is normal when objects are registered + # as part of a relationship cascade operation. it should + # not occur for the top-level register from Session.flush(). + if not state.deleted and operation is not None: + util.warn("Object of type %s not in session, %s operation " + "along '%s' will not proceed" % + (orm_util.state_class_str(state), operation, prop)) + return False + + if state not in self.states: + mapper = state.manager.mapper + + if mapper not in self.mappers: + self._per_mapper_flush_actions(mapper) + + self.mappers[mapper].add(state) + self.states[state] = (isdelete, listonly) + else: + if not listonly and (isdelete or cancel_delete): + self.states[state] = (isdelete, False) + return True + + def issue_post_update(self, state, post_update_cols): + mapper = state.manager.mapper.base_mapper + states, cols = self.post_update_states[mapper] + states.add(state) + cols.update(post_update_cols) + + def _per_mapper_flush_actions(self, mapper): + saves = SaveUpdateAll(self, mapper.base_mapper) + deletes = DeleteAll(self, mapper.base_mapper) + self.dependencies.add((saves, deletes)) + + for dep in mapper._dependency_processors: + dep.per_property_preprocessors(self) + + for prop in mapper.relationships: + if prop.viewonly: + continue + dep = prop._dependency_processor + dep.per_property_preprocessors(self) + + @util.memoized_property + def _mapper_for_dep(self): + """return a dynamic mapping of (Mapper, DependencyProcessor) to + True or False, indicating if the DependencyProcessor operates + on objects of that Mapper. + + The result is stored in the dictionary persistently once + calculated. + + """ + return util.PopulateDict( + lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop + ) + + def filter_states_for_dep(self, dep, states): + """Filter the given list of InstanceStates to those relevant to the + given DependencyProcessor. + + """ + mapper_for_dep = self._mapper_for_dep + return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]] + + def states_for_mapper_hierarchy(self, mapper, isdelete, listonly): + checktup = (isdelete, listonly) + for mapper in mapper.base_mapper.self_and_descendants: + for state in self.mappers[mapper]: + if self.states[state] == checktup: + yield state + + def _generate_actions(self): + """Generate the full, unsorted collection of PostSortRecs as + well as dependency pairs for this UOWTransaction. + + """ + # execute presort_actions, until all states + # have been processed. a presort_action might + # add new states to the uow. + while True: + ret = False + for action in list(self.presort_actions.values()): + if action.execute(self): + ret = True + if not ret: + break + + # see if the graph of mapper dependencies has cycles. + self.cycles = cycles = topological.find_cycles( + self.dependencies, + list(self.postsort_actions.values())) + + if cycles: + # if yes, break the per-mapper actions into + # per-state actions + convert = dict( + (rec, set(rec.per_state_flush_actions(self))) + for rec in cycles + ) + + # rewrite the existing dependencies to point to + # the per-state actions for those per-mapper actions + # that were broken up. + for edge in list(self.dependencies): + if None in edge or \ + edge[0].disabled or edge[1].disabled or \ + cycles.issuperset(edge): + self.dependencies.remove(edge) + elif edge[0] in cycles: + self.dependencies.remove(edge) + for dep in convert[edge[0]]: + self.dependencies.add((dep, edge[1])) + elif edge[1] in cycles: + self.dependencies.remove(edge) + for dep in convert[edge[1]]: + self.dependencies.add((edge[0], dep)) + + return set([a for a in self.postsort_actions.values() + if not a.disabled + ] + ).difference(cycles) + + def execute(self): + postsort_actions = self._generate_actions() + + # sort = topological.sort(self.dependencies, postsort_actions) + # print "--------------" + # print "\ndependencies:", self.dependencies + # print "\ncycles:", self.cycles + # print "\nsort:", list(sort) + # print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions) + + # execute + if self.cycles: + for set_ in topological.sort_as_subsets( + self.dependencies, + postsort_actions): + while set_: + n = set_.pop() + n.execute_aggregate(self, set_) + else: + for rec in topological.sort( + self.dependencies, + postsort_actions): + rec.execute(self) + + def finalize_flush_changes(self): + """mark processed objects as clean / deleted after a successful + flush(). + + this method is called within the flush() method after the + execute() method has succeeded and the transaction has been committed. + + """ + if not self.states: + return + + states = set(self.states) + isdel = set( + s for (s, (isdelete, listonly)) in self.states.items() + if isdelete + ) + other = states.difference(isdel) + if isdel: + self.session._remove_newly_deleted(isdel) + if other: + self.session._register_newly_persistent(other) + + +class IterateMappersMixin(object): + def _mappers(self, uow): + if self.fromparent: + return iter( + m for m in + self.dependency_processor.parent.self_and_descendants + if uow._mapper_for_dep[(m, self.dependency_processor)] + ) + else: + return self.dependency_processor.mapper.self_and_descendants + + +class Preprocess(IterateMappersMixin): + def __init__(self, dependency_processor, fromparent): + self.dependency_processor = dependency_processor + self.fromparent = fromparent + self.processed = set() + self.setup_flush_actions = False + + def execute(self, uow): + delete_states = set() + save_states = set() + + for mapper in self._mappers(uow): + for state in uow.mappers[mapper].difference(self.processed): + (isdelete, listonly) = uow.states[state] + if not listonly: + if isdelete: + delete_states.add(state) + else: + save_states.add(state) + + if delete_states: + self.dependency_processor.presort_deletes(uow, delete_states) + self.processed.update(delete_states) + if save_states: + self.dependency_processor.presort_saves(uow, save_states) + self.processed.update(save_states) + + if (delete_states or save_states): + if not self.setup_flush_actions and ( + self.dependency_processor. + prop_has_changes(uow, delete_states, True) or + self.dependency_processor. + prop_has_changes(uow, save_states, False) + ): + self.dependency_processor.per_property_flush_actions(uow) + self.setup_flush_actions = True + return True + else: + return False + + +class PostSortRec(object): + disabled = False + + def __new__(cls, uow, *args): + key = (cls, ) + args + if key in uow.postsort_actions: + return uow.postsort_actions[key] + else: + uow.postsort_actions[key] = \ + ret = \ + object.__new__(cls) + return ret + + def execute_aggregate(self, uow, recs): + self.execute(uow) + + def __repr__(self): + return "%s(%s)" % ( + self.__class__.__name__, + ",".join(str(x) for x in self.__dict__.values()) + ) + + +class ProcessAll(IterateMappersMixin, PostSortRec): + def __init__(self, uow, dependency_processor, delete, fromparent): + self.dependency_processor = dependency_processor + self.delete = delete + self.fromparent = fromparent + uow.deps[dependency_processor.parent.base_mapper].\ + add(dependency_processor) + + def execute(self, uow): + states = self._elements(uow) + if self.delete: + self.dependency_processor.process_deletes(uow, states) + else: + self.dependency_processor.process_saves(uow, states) + + def per_state_flush_actions(self, uow): + # this is handled by SaveUpdateAll and DeleteAll, + # since a ProcessAll should unconditionally be pulled + # into per-state if either the parent/child mappers + # are part of a cycle + return iter([]) + + def __repr__(self): + return "%s(%s, delete=%s)" % ( + self.__class__.__name__, + self.dependency_processor, + self.delete + ) + + def _elements(self, uow): + for mapper in self._mappers(uow): + for state in uow.mappers[mapper]: + (isdelete, listonly) = uow.states[state] + if isdelete == self.delete and not listonly: + yield state + + +class IssuePostUpdate(PostSortRec): + def __init__(self, uow, mapper, isdelete): + self.mapper = mapper + self.isdelete = isdelete + + def execute(self, uow): + states, cols = uow.post_update_states[self.mapper] + states = [s for s in states if uow.states[s][0] == self.isdelete] + + persistence.post_update(self.mapper, states, uow, cols) + + +class SaveUpdateAll(PostSortRec): + def __init__(self, uow, mapper): + self.mapper = mapper + assert mapper is mapper.base_mapper + + def execute(self, uow): + persistence.save_obj(self.mapper, + uow.states_for_mapper_hierarchy( + self.mapper, False, False), + uow + ) + + def per_state_flush_actions(self, uow): + states = list(uow.states_for_mapper_hierarchy( + self.mapper, False, False)) + base_mapper = self.mapper.base_mapper + delete_all = DeleteAll(uow, base_mapper) + for state in states: + # keep saves before deletes - + # this ensures 'row switch' operations work + action = SaveUpdateState(uow, state, base_mapper) + uow.dependencies.add((action, delete_all)) + yield action + + for dep in uow.deps[self.mapper]: + states_for_prop = uow.filter_states_for_dep(dep, states) + dep.per_state_flush_actions(uow, states_for_prop, False) + + +class DeleteAll(PostSortRec): + def __init__(self, uow, mapper): + self.mapper = mapper + assert mapper is mapper.base_mapper + + def execute(self, uow): + persistence.delete_obj(self.mapper, + uow.states_for_mapper_hierarchy( + self.mapper, True, False), + uow + ) + + def per_state_flush_actions(self, uow): + states = list(uow.states_for_mapper_hierarchy( + self.mapper, True, False)) + base_mapper = self.mapper.base_mapper + save_all = SaveUpdateAll(uow, base_mapper) + for state in states: + # keep saves before deletes - + # this ensures 'row switch' operations work + action = DeleteState(uow, state, base_mapper) + uow.dependencies.add((save_all, action)) + yield action + + for dep in uow.deps[self.mapper]: + states_for_prop = uow.filter_states_for_dep(dep, states) + dep.per_state_flush_actions(uow, states_for_prop, True) + + +class ProcessState(PostSortRec): + def __init__(self, uow, dependency_processor, delete, state): + self.dependency_processor = dependency_processor + self.delete = delete + self.state = state + + def execute_aggregate(self, uow, recs): + cls_ = self.__class__ + dependency_processor = self.dependency_processor + delete = self.delete + our_recs = [r for r in recs + if r.__class__ is cls_ and + r.dependency_processor is dependency_processor and + r.delete is delete] + recs.difference_update(our_recs) + states = [self.state] + [r.state for r in our_recs] + if delete: + dependency_processor.process_deletes(uow, states) + else: + dependency_processor.process_saves(uow, states) + + def __repr__(self): + return "%s(%s, %s, delete=%s)" % ( + self.__class__.__name__, + self.dependency_processor, + orm_util.state_str(self.state), + self.delete + ) + + +class SaveUpdateState(PostSortRec): + def __init__(self, uow, state, mapper): + self.state = state + self.mapper = mapper + + def execute_aggregate(self, uow, recs): + cls_ = self.__class__ + mapper = self.mapper + our_recs = [r for r in recs + if r.__class__ is cls_ and + r.mapper is mapper] + recs.difference_update(our_recs) + persistence.save_obj(mapper, + [self.state] + + [r.state for r in our_recs], + uow) + + def __repr__(self): + return "%s(%s)" % ( + self.__class__.__name__, + orm_util.state_str(self.state) + ) + + +class DeleteState(PostSortRec): + def __init__(self, uow, state, mapper): + self.state = state + self.mapper = mapper + + def execute_aggregate(self, uow, recs): + cls_ = self.__class__ + mapper = self.mapper + our_recs = [r for r in recs + if r.__class__ is cls_ and + r.mapper is mapper] + recs.difference_update(our_recs) + states = [self.state] + [r.state for r in our_recs] + persistence.delete_obj(mapper, + [s for s in states if uow.states[s][0]], + uow) + + def __repr__(self): + return "%s(%s)" % ( + self.__class__.__name__, + orm_util.state_str(self.state) + ) diff --git a/app/lib/sqlalchemy/orm/util.py b/app/lib/sqlalchemy/orm/util.py new file mode 100644 index 0000000..fc0dba5 --- /dev/null +++ b/app/lib/sqlalchemy/orm/util.py @@ -0,0 +1,1058 @@ +# orm/util.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +from .. import sql, util, event, exc as sa_exc, inspection +from ..sql import expression, util as sql_util, operators +from .interfaces import PropComparator, MapperProperty +from . import attributes +import re + +from .base import instance_str, state_str, state_class_str, attribute_str, \ + state_attribute_str, object_mapper, object_state, _none_set, _never_set +from .base import class_mapper, _class_to_mapper +from .base import InspectionAttr +from .path_registry import PathRegistry + +all_cascades = frozenset(("delete", "delete-orphan", "all", "merge", + "expunge", "save-update", "refresh-expire", + "none")) + + +class CascadeOptions(frozenset): + """Keeps track of the options sent to relationship().cascade""" + + _add_w_all_cascades = all_cascades.difference([ + 'all', 'none', 'delete-orphan']) + _allowed_cascades = all_cascades + + __slots__ = ( + 'save_update', 'delete', 'refresh_expire', 'merge', + 'expunge', 'delete_orphan') + + def __new__(cls, value_list): + if isinstance(value_list, util.string_types) or value_list is None: + return cls.from_string(value_list) + values = set(value_list) + if values.difference(cls._allowed_cascades): + raise sa_exc.ArgumentError( + "Invalid cascade option(s): %s" % + ", ".join([repr(x) for x in + sorted(values.difference(cls._allowed_cascades))])) + + if "all" in values: + values.update(cls._add_w_all_cascades) + if "none" in values: + values.clear() + values.discard('all') + + self = frozenset.__new__(CascadeOptions, values) + self.save_update = 'save-update' in values + self.delete = 'delete' in values + self.refresh_expire = 'refresh-expire' in values + self.merge = 'merge' in values + self.expunge = 'expunge' in values + self.delete_orphan = "delete-orphan" in values + + if self.delete_orphan and not self.delete: + util.warn("The 'delete-orphan' cascade " + "option requires 'delete'.") + return self + + def __repr__(self): + return "CascadeOptions(%r)" % ( + ",".join([x for x in sorted(self)]) + ) + + @classmethod + def from_string(cls, arg): + values = [ + c for c + in re.split(r'\s*,\s*', arg or "") + if c + ] + return cls(values) + + +def _validator_events( + desc, key, validator, include_removes, include_backrefs): + """Runs a validation method on an attribute value to be set or + appended. + """ + + if not include_backrefs: + def detect_is_backref(state, initiator): + impl = state.manager[key].impl + return initiator.impl is not impl + + if include_removes: + def append(state, value, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + return validator(state.obj(), key, value, False) + else: + return value + + def set_(state, value, oldvalue, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + return validator(state.obj(), key, value, False) + else: + return value + + def remove(state, value, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + validator(state.obj(), key, value, True) + + else: + def append(state, value, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + return validator(state.obj(), key, value) + else: + return value + + def set_(state, value, oldvalue, initiator): + if include_backrefs or not detect_is_backref(state, initiator): + return validator(state.obj(), key, value) + else: + return value + + event.listen(desc, 'append', append, raw=True, retval=True) + event.listen(desc, 'set', set_, raw=True, retval=True) + if include_removes: + event.listen(desc, "remove", remove, raw=True, retval=True) + + +def polymorphic_union(table_map, typecolname, + aliasname='p_union', cast_nulls=True): + """Create a ``UNION`` statement used by a polymorphic mapper. + + See :ref:`concrete_inheritance` for an example of how + this is used. + + :param table_map: mapping of polymorphic identities to + :class:`.Table` objects. + :param typecolname: string name of a "discriminator" column, which will be + derived from the query, producing the polymorphic identity for + each row. If ``None``, no polymorphic discriminator is generated. + :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()` + construct generated. + :param cast_nulls: if True, non-existent columns, which are represented + as labeled NULLs, will be passed into CAST. This is a legacy behavior + that is problematic on some backends such as Oracle - in which case it + can be set to False. + + """ + + colnames = util.OrderedSet() + colnamemaps = {} + types = {} + for key in table_map: + table = table_map[key] + + # mysql doesn't like selecting from a select; + # make it an alias of the select + if isinstance(table, sql.Select): + table = table.alias() + table_map[key] = table + + m = {} + for c in table.c: + colnames.add(c.key) + m[c.key] = c + types[c.key] = c.type + colnamemaps[table] = m + + def col(name, table): + try: + return colnamemaps[table][name] + except KeyError: + if cast_nulls: + return sql.cast(sql.null(), types[name]).label(name) + else: + return sql.type_coerce(sql.null(), types[name]).label(name) + + result = [] + for type, table in table_map.items(): + if typecolname is not None: + result.append( + sql.select([col(name, table) for name in colnames] + + [sql.literal_column( + sql_util._quote_ddl_expr(type)). + label(typecolname)], + from_obj=[table])) + else: + result.append(sql.select([col(name, table) for name in colnames], + from_obj=[table])) + return sql.union_all(*result).alias(aliasname) + + +def identity_key(*args, **kwargs): + """Generate "identity key" tuples, as are used as keys in the + :attr:`.Session.identity_map` dictionary. + + This function has several call styles: + + * ``identity_key(class, ident)`` + + This form receives a mapped class and a primary key scalar or + tuple as an argument. + + E.g.:: + + >>> identity_key(MyClass, (1, 2)) + (, (1, 2)) + + :param class: mapped class (must be a positional argument) + :param ident: primary key, may be a scalar or tuple argument. + + + * ``identity_key(instance=instance)`` + + This form will produce the identity key for a given instance. The + instance need not be persistent, only that its primary key attributes + are populated (else the key will contain ``None`` for those missing + values). + + E.g.:: + + >>> instance = MyClass(1, 2) + >>> identity_key(instance=instance) + (, (1, 2)) + + In this form, the given instance is ultimately run though + :meth:`.Mapper.identity_key_from_instance`, which will have the + effect of performing a database check for the corresponding row + if the object is expired. + + :param instance: object instance (must be given as a keyword arg) + + * ``identity_key(class, row=row)`` + + This form is similar to the class/tuple form, except is passed a + database result row as a :class:`.RowProxy` object. + + E.g.:: + + >>> row = engine.execute("select * from table where a=1 and b=2").\ +first() + >>> identity_key(MyClass, row=row) + (, (1, 2)) + + :param class: mapped class (must be a positional argument) + :param row: :class:`.RowProxy` row returned by a :class:`.ResultProxy` + (must be given as a keyword arg) + + """ + if args: + if len(args) == 1: + class_ = args[0] + try: + row = kwargs.pop("row") + except KeyError: + ident = kwargs.pop("ident") + elif len(args) == 2: + class_, ident = args + elif len(args) == 3: + class_, ident = args + else: + raise sa_exc.ArgumentError( + "expected up to three positional arguments, " + "got %s" % len(args)) + if kwargs: + raise sa_exc.ArgumentError("unknown keyword arguments: %s" + % ", ".join(kwargs)) + mapper = class_mapper(class_) + if "ident" in locals(): + return mapper.identity_key_from_primary_key(util.to_list(ident)) + return mapper.identity_key_from_row(row) + instance = kwargs.pop("instance") + if kwargs: + raise sa_exc.ArgumentError("unknown keyword arguments: %s" + % ", ".join(kwargs.keys)) + mapper = object_mapper(instance) + return mapper.identity_key_from_instance(instance) + + +class ORMAdapter(sql_util.ColumnAdapter): + """ColumnAdapter subclass which excludes adaptation of entities from + non-matching mappers. + + """ + + def __init__(self, entity, equivalents=None, adapt_required=False, + chain_to=None, allow_label_resolve=True, + anonymize_labels=False): + info = inspection.inspect(entity) + + self.mapper = info.mapper + selectable = info.selectable + is_aliased_class = info.is_aliased_class + if is_aliased_class: + self.aliased_class = entity + else: + self.aliased_class = None + + sql_util.ColumnAdapter.__init__( + self, selectable, equivalents, chain_to, + adapt_required=adapt_required, + allow_label_resolve=allow_label_resolve, + anonymize_labels=anonymize_labels, + include_fn=self._include_fn + ) + + def _include_fn(self, elem): + entity = elem._annotations.get('parentmapper', None) + return not entity or entity.isa(self.mapper) + + +class AliasedClass(object): + r"""Represents an "aliased" form of a mapped class for usage with Query. + + The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias` + construct, this object mimics the mapped class using a + __getattr__ scheme and maintains a reference to a + real :class:`~sqlalchemy.sql.expression.Alias` object. + + Usage is via the :func:`.orm.aliased` function, or alternatively + via the :func:`.orm.with_polymorphic` function. + + Usage example:: + + # find all pairs of users with the same name + user_alias = aliased(User) + session.query(User, user_alias).\ + join((user_alias, User.id > user_alias.id)).\ + filter(User.name==user_alias.name) + + The resulting object is an instance of :class:`.AliasedClass`. + This object implements an attribute scheme which produces the + same attribute and method interface as the original mapped + class, allowing :class:`.AliasedClass` to be compatible + with any attribute technique which works on the original class, + including hybrid attributes (see :ref:`hybrids_toplevel`). + + The :class:`.AliasedClass` can be inspected for its underlying + :class:`.Mapper`, aliased selectable, and other information + using :func:`.inspect`:: + + from sqlalchemy import inspect + my_alias = aliased(MyClass) + insp = inspect(my_alias) + + The resulting inspection object is an instance of :class:`.AliasedInsp`. + + See :func:`.aliased` and :func:`.with_polymorphic` for construction + argument descriptions. + + """ + + def __init__(self, cls, alias=None, + name=None, + flat=False, + adapt_on_names=False, + # TODO: None for default here? + with_polymorphic_mappers=(), + with_polymorphic_discriminator=None, + base_alias=None, + use_mapper_path=False): + mapper = _class_to_mapper(cls) + if alias is None: + alias = mapper._with_polymorphic_selectable.alias( + name=name, flat=flat) + + self._aliased_insp = AliasedInsp( + self, + mapper, + alias, + name, + with_polymorphic_mappers + if with_polymorphic_mappers + else mapper.with_polymorphic_mappers, + with_polymorphic_discriminator + if with_polymorphic_discriminator is not None + else mapper.polymorphic_on, + base_alias, + use_mapper_path, + adapt_on_names + ) + + self.__name__ = 'AliasedClass_%s' % mapper.class_.__name__ + + def __getattr__(self, key): + try: + _aliased_insp = self.__dict__['_aliased_insp'] + except KeyError: + raise AttributeError() + else: + for base in _aliased_insp._target.__mro__: + try: + attr = object.__getattribute__(base, key) + except AttributeError: + continue + else: + break + else: + raise AttributeError(key) + + if isinstance(attr, PropComparator): + ret = attr.adapt_to_entity(_aliased_insp) + setattr(self, key, ret) + return ret + elif hasattr(attr, 'func_code'): + is_method = getattr(_aliased_insp._target, key, None) + if is_method and is_method.__self__ is not None: + return util.types.MethodType(attr.__func__, self, self) + else: + return None + elif hasattr(attr, '__get__'): + ret = attr.__get__(None, self) + if isinstance(ret, PropComparator): + return ret.adapt_to_entity(_aliased_insp) + else: + return ret + else: + return attr + + def __repr__(self): + return '' % ( + id(self), self._aliased_insp._target.__name__) + + +class AliasedInsp(InspectionAttr): + """Provide an inspection interface for an + :class:`.AliasedClass` object. + + The :class:`.AliasedInsp` object is returned + given an :class:`.AliasedClass` using the + :func:`.inspect` function:: + + from sqlalchemy import inspect + from sqlalchemy.orm import aliased + + my_alias = aliased(MyMappedClass) + insp = inspect(my_alias) + + Attributes on :class:`.AliasedInsp` + include: + + * ``entity`` - the :class:`.AliasedClass` represented. + * ``mapper`` - the :class:`.Mapper` mapping the underlying class. + * ``selectable`` - the :class:`.Alias` construct which ultimately + represents an aliased :class:`.Table` or :class:`.Select` + construct. + * ``name`` - the name of the alias. Also is used as the attribute + name when returned in a result tuple from :class:`.Query`. + * ``with_polymorphic_mappers`` - collection of :class:`.Mapper` objects + indicating all those mappers expressed in the select construct + for the :class:`.AliasedClass`. + * ``polymorphic_on`` - an alternate column or SQL expression which + will be used as the "discriminator" for a polymorphic load. + + .. seealso:: + + :ref:`inspection_toplevel` + + """ + + def __init__(self, entity, mapper, selectable, name, + with_polymorphic_mappers, polymorphic_on, + _base_alias, _use_mapper_path, adapt_on_names): + self.entity = entity + self.mapper = mapper + self.selectable = selectable + self.name = name + self.with_polymorphic_mappers = with_polymorphic_mappers + self.polymorphic_on = polymorphic_on + self._base_alias = _base_alias or self + self._use_mapper_path = _use_mapper_path + + self._adapter = sql_util.ColumnAdapter( + selectable, equivalents=mapper._equivalent_columns, + adapt_on_names=adapt_on_names, anonymize_labels=True) + + self._adapt_on_names = adapt_on_names + self._target = mapper.class_ + + for poly in self.with_polymorphic_mappers: + if poly is not mapper: + setattr(self.entity, poly.class_.__name__, + AliasedClass(poly.class_, selectable, base_alias=self, + adapt_on_names=adapt_on_names, + use_mapper_path=_use_mapper_path)) + + is_aliased_class = True + "always returns True" + + @property + def class_(self): + """Return the mapped class ultimately represented by this + :class:`.AliasedInsp`.""" + return self.mapper.class_ + + @util.memoized_property + def _path_registry(self): + if self._use_mapper_path: + return self.mapper._path_registry + else: + return PathRegistry.per_mapper(self) + + def __getstate__(self): + return { + 'entity': self.entity, + 'mapper': self.mapper, + 'alias': self.selectable, + 'name': self.name, + 'adapt_on_names': self._adapt_on_names, + 'with_polymorphic_mappers': + self.with_polymorphic_mappers, + 'with_polymorphic_discriminator': + self.polymorphic_on, + 'base_alias': self._base_alias, + 'use_mapper_path': self._use_mapper_path + } + + def __setstate__(self, state): + self.__init__( + state['entity'], + state['mapper'], + state['alias'], + state['name'], + state['with_polymorphic_mappers'], + state['with_polymorphic_discriminator'], + state['base_alias'], + state['use_mapper_path'], + state['adapt_on_names'] + ) + + def _adapt_element(self, elem): + return self._adapter.traverse(elem).\ + _annotate({ + 'parententity': self, + 'parentmapper': self.mapper} + ) + + def _entity_for_mapper(self, mapper): + self_poly = self.with_polymorphic_mappers + if mapper in self_poly: + if mapper is self.mapper: + return self + else: + return getattr( + self.entity, mapper.class_.__name__)._aliased_insp + elif mapper.isa(self.mapper): + return self + else: + assert False, "mapper %s doesn't correspond to %s" % ( + mapper, self) + + @util.memoized_property + def _memoized_values(self): + return {} + + def _memo(self, key, callable_, *args, **kw): + if key in self._memoized_values: + return self._memoized_values[key] + else: + self._memoized_values[key] = value = callable_(*args, **kw) + return value + + def __repr__(self): + if self.with_polymorphic_mappers: + with_poly = "(%s)" % ", ".join( + mp.class_.__name__ for mp in self.with_polymorphic_mappers) + else: + with_poly = "" + return '' % ( + id(self), self.class_.__name__, with_poly) + + +inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) +inspection._inspects(AliasedInsp)(lambda target: target) + + +def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): + """Produce an alias of the given element, usually an :class:`.AliasedClass` + instance. + + E.g.:: + + my_alias = aliased(MyClass) + + session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id) + + The :func:`.aliased` function is used to create an ad-hoc mapping + of a mapped class to a new selectable. By default, a selectable + is generated from the normally mapped selectable (typically a + :class:`.Table`) using the :meth:`.FromClause.alias` method. + However, :func:`.aliased` can also be used to link the class to + a new :func:`.select` statement. Also, the :func:`.with_polymorphic` + function is a variant of :func:`.aliased` that is intended to specify + a so-called "polymorphic selectable", that corresponds to the union + of several joined-inheritance subclasses at once. + + For convenience, the :func:`.aliased` function also accepts plain + :class:`.FromClause` constructs, such as a :class:`.Table` or + :func:`.select` construct. In those cases, the :meth:`.FromClause.alias` + method is called on the object and the new :class:`.Alias` object + returned. The returned :class:`.Alias` is not ORM-mapped in this case. + + :param element: element to be aliased. Is normally a mapped class, + but for convenience can also be a :class:`.FromClause` element. + + :param alias: Optional selectable unit to map the element to. This should + normally be a :class:`.Alias` object corresponding to the :class:`.Table` + to which the class is mapped, or to a :func:`.select` construct that + is compatible with the mapping. By default, a simple anonymous + alias of the mapped table is generated. + + :param name: optional string name to use for the alias, if not specified + by the ``alias`` parameter. The name, among other things, forms the + attribute name that will be accessible via tuples returned by a + :class:`.Query` object. + + :param flat: Boolean, will be passed through to the + :meth:`.FromClause.alias` call so that aliases of :class:`.Join` objects + don't include an enclosing SELECT. This can lead to more efficient + queries in many circumstances. A JOIN against a nested JOIN will be + rewritten as a JOIN against an aliased SELECT subquery on backends that + don't support this syntax. + + .. versionadded:: 0.9.0 + + .. seealso:: :meth:`.Join.alias` + + :param adapt_on_names: if True, more liberal "matching" will be used when + mapping the mapped columns of the ORM entity to those of the + given selectable - a name-based match will be performed if the + given selectable doesn't otherwise have a column that corresponds + to one on the entity. The use case for this is when associating + an entity with some derived selectable such as one that uses + aggregate functions:: + + class UnitPrice(Base): + __tablename__ = 'unit_price' + ... + unit_id = Column(Integer) + price = Column(Numeric) + + aggregated_unit_price = Session.query( + func.sum(UnitPrice.price).label('price') + ).group_by(UnitPrice.unit_id).subquery() + + aggregated_unit_price = aliased(UnitPrice, + alias=aggregated_unit_price, adapt_on_names=True) + + Above, functions on ``aggregated_unit_price`` which refer to + ``.price`` will return the + ``func.sum(UnitPrice.price).label('price')`` column, as it is + matched on the name "price". Ordinarily, the "price" function + wouldn't have any "column correspondence" to the actual + ``UnitPrice.price`` column as it is not a proxy of the original. + + .. versionadded:: 0.7.3 + + + """ + if isinstance(element, expression.FromClause): + if adapt_on_names: + raise sa_exc.ArgumentError( + "adapt_on_names only applies to ORM elements" + ) + return element.alias(name, flat=flat) + else: + return AliasedClass(element, alias=alias, flat=flat, + name=name, adapt_on_names=adapt_on_names) + + +def with_polymorphic(base, classes, selectable=False, + flat=False, + polymorphic_on=None, aliased=False, + innerjoin=False, _use_mapper_path=False, + _existing_alias=None): + """Produce an :class:`.AliasedClass` construct which specifies + columns for descendant mappers of the given base. + + .. versionadded:: 0.8 + :func:`.orm.with_polymorphic` is in addition to the existing + :class:`.Query` method :meth:`.Query.with_polymorphic`, + which has the same purpose but is not as flexible in its usage. + + Using this method will ensure that each descendant mapper's + tables are included in the FROM clause, and will allow filter() + criterion to be used against those tables. The resulting + instances will also have those columns already loaded so that + no "post fetch" of those columns will be required. + + See the examples at :ref:`with_polymorphic`. + + :param base: Base class to be aliased. + + :param classes: a single class or mapper, or list of + class/mappers, which inherit from the base class. + Alternatively, it may also be the string ``'*'``, in which case + all descending mapped classes will be added to the FROM clause. + + :param aliased: when True, the selectable will be wrapped in an + alias, that is ``(SELECT * FROM ) AS anon_1``. + This can be important when using the with_polymorphic() + to create the target of a JOIN on a backend that does not + support parenthesized joins, such as SQLite and older + versions of MySQL. + + :param flat: Boolean, will be passed through to the + :meth:`.FromClause.alias` call so that aliases of :class:`.Join` + objects don't include an enclosing SELECT. This can lead to more + efficient queries in many circumstances. A JOIN against a nested JOIN + will be rewritten as a JOIN against an aliased SELECT subquery on + backends that don't support this syntax. + + Setting ``flat`` to ``True`` implies the ``aliased`` flag is + also ``True``. + + .. versionadded:: 0.9.0 + + .. seealso:: :meth:`.Join.alias` + + :param selectable: a table or select() statement that will + be used in place of the generated FROM clause. This argument is + required if any of the desired classes use concrete table + inheritance, since SQLAlchemy currently cannot generate UNIONs + among tables automatically. If used, the ``selectable`` argument + must represent the full set of tables and columns mapped by every + mapped class. Otherwise, the unaccounted mapped columns will + result in their table being appended directly to the FROM clause + which will usually lead to incorrect results. + + :param polymorphic_on: a column to be used as the "discriminator" + column for the given selectable. If not given, the polymorphic_on + attribute of the base classes' mapper will be used, if any. This + is useful for mappings that don't have polymorphic loading + behavior by default. + + :param innerjoin: if True, an INNER JOIN will be used. This should + only be specified if querying for one specific subtype only + """ + primary_mapper = _class_to_mapper(base) + if _existing_alias: + assert _existing_alias.mapper is primary_mapper + classes = util.to_set(classes) + new_classes = set([ + mp.class_ for mp in + _existing_alias.with_polymorphic_mappers]) + if classes == new_classes: + return _existing_alias + else: + classes = classes.union(new_classes) + mappers, selectable = primary_mapper.\ + _with_polymorphic_args(classes, selectable, + innerjoin=innerjoin) + if aliased or flat: + selectable = selectable.alias(flat=flat) + return AliasedClass(base, + selectable, + with_polymorphic_mappers=mappers, + with_polymorphic_discriminator=polymorphic_on, + use_mapper_path=_use_mapper_path) + + +def _orm_annotate(element, exclude=None): + """Deep copy the given ClauseElement, annotating each element with the + "_orm_adapt" flag. + + Elements within the exclude collection will be cloned but not annotated. + + """ + return sql_util._deep_annotate(element, {'_orm_adapt': True}, exclude) + + +def _orm_deannotate(element): + """Remove annotations that link a column to a particular mapping. + + Note this doesn't affect "remote" and "foreign" annotations + passed by the :func:`.orm.foreign` and :func:`.orm.remote` + annotators. + + """ + + return sql_util._deep_deannotate(element, + values=("_orm_adapt", "parententity") + ) + + +def _orm_full_deannotate(element): + return sql_util._deep_deannotate(element) + + +class _ORMJoin(expression.Join): + """Extend Join to support ORM constructs as input.""" + + __visit_name__ = expression.Join.__visit_name__ + + def __init__( + self, + left, right, onclause=None, isouter=False, + full=False, _left_memo=None, _right_memo=None): + + left_info = inspection.inspect(left) + left_orm_info = getattr(left, '_joined_from_info', left_info) + + right_info = inspection.inspect(right) + adapt_to = right_info.selectable + + self._joined_from_info = right_info + + self._left_memo = _left_memo + self._right_memo = _right_memo + + if isinstance(onclause, util.string_types): + onclause = getattr(left_orm_info.entity, onclause) + + if isinstance(onclause, attributes.QueryableAttribute): + on_selectable = onclause.comparator._source_selectable() + prop = onclause.property + elif isinstance(onclause, MapperProperty): + prop = onclause + on_selectable = prop.parent.selectable + else: + prop = None + + if prop: + if sql_util.clause_is_present( + on_selectable, left_info.selectable): + adapt_from = on_selectable + else: + adapt_from = left_info.selectable + + pj, sj, source, dest, \ + secondary, target_adapter = prop._create_joins( + source_selectable=adapt_from, + dest_selectable=adapt_to, + source_polymorphic=True, + dest_polymorphic=True, + of_type=right_info.mapper) + + if sj is not None: + if isouter: + # note this is an inner join from secondary->right + right = sql.join(secondary, right, sj) + onclause = pj + else: + left = sql.join(left, secondary, pj, isouter) + onclause = sj + else: + onclause = pj + self._target_adapter = target_adapter + + expression.Join.__init__(self, left, right, onclause, isouter, full) + + if not prop and getattr(right_info, 'mapper', None) \ + and right_info.mapper.single: + # if single inheritance target and we are using a manual + # or implicit ON clause, augment it the same way we'd augment the + # WHERE. + single_crit = right_info.mapper._single_table_criterion + if single_crit is not None: + if right_info.is_aliased_class: + single_crit = right_info._adapter.traverse(single_crit) + self.onclause = self.onclause & single_crit + + def _splice_into_center(self, other): + """Splice a join into the center. + + Given join(a, b) and join(b, c), return join(a, b).join(c) + + """ + leftmost = other + while isinstance(leftmost, sql.Join): + leftmost = leftmost.left + + assert self.right is leftmost + + left = _ORMJoin( + self.left, other.left, + self.onclause, isouter=self.isouter, + _left_memo=self._left_memo, + _right_memo=other._left_memo + ) + + return _ORMJoin( + left, + other.right, + other.onclause, isouter=other.isouter, + _right_memo=other._right_memo + ) + + def join( + self, right, onclause=None, + isouter=False, full=False, join_to_left=None): + return _ORMJoin(self, right, onclause, full, isouter) + + def outerjoin( + self, right, onclause=None, + full=False, join_to_left=None): + return _ORMJoin(self, right, onclause, True, full=full) + + +def join( + left, right, onclause=None, isouter=False, + full=False, join_to_left=None): + r"""Produce an inner join between left and right clauses. + + :func:`.orm.join` is an extension to the core join interface + provided by :func:`.sql.expression.join()`, where the + left and right selectables may be not only core selectable + objects such as :class:`.Table`, but also mapped classes or + :class:`.AliasedClass` instances. The "on" clause can + be a SQL expression, or an attribute or string name + referencing a configured :func:`.relationship`. + + :func:`.orm.join` is not commonly needed in modern usage, + as its functionality is encapsulated within that of the + :meth:`.Query.join` method, which features a + significant amount of automation beyond :func:`.orm.join` + by itself. Explicit usage of :func:`.orm.join` + with :class:`.Query` involves usage of the + :meth:`.Query.select_from` method, as in:: + + from sqlalchemy.orm import join + session.query(User).\ + select_from(join(User, Address, User.addresses)).\ + filter(Address.email_address=='foo@bar.com') + + In modern SQLAlchemy the above join can be written more + succinctly as:: + + session.query(User).\ + join(User.addresses).\ + filter(Address.email_address=='foo@bar.com') + + See :meth:`.Query.join` for information on modern usage + of ORM level joins. + + .. versionchanged:: 0.8.1 - the ``join_to_left`` parameter + is no longer used, and is deprecated. + + """ + return _ORMJoin(left, right, onclause, isouter, full) + + +def outerjoin(left, right, onclause=None, full=False, join_to_left=None): + """Produce a left outer join between left and right clauses. + + This is the "outer join" version of the :func:`.orm.join` function, + featuring the same behavior except that an OUTER JOIN is generated. + See that function's documentation for other usage details. + + """ + return _ORMJoin(left, right, onclause, True, full) + + +def with_parent(instance, prop): + """Create filtering criterion that relates this query's primary entity + to the given related instance, using established :func:`.relationship()` + configuration. + + The SQL rendered is the same as that rendered when a lazy loader + would fire off from the given parent on that attribute, meaning + that the appropriate state is taken from the parent object in + Python without the need to render joins to the parent table + in the rendered statement. + + .. versionchanged:: 0.6.4 + This method accepts parent instances in all + persistence states, including transient, persistent, and detached. + Only the requisite primary key/foreign key attributes need to + be populated. Previous versions didn't work with transient + instances. + + :param instance: + An instance which has some :func:`.relationship`. + + :param property: + String property name, or class-bound attribute, which indicates + what relationship from the instance should be used to reconcile the + parent/child relationship. + + """ + if isinstance(prop, util.string_types): + mapper = object_mapper(instance) + prop = getattr(mapper.class_, prop).property + elif isinstance(prop, attributes.QueryableAttribute): + prop = prop.property + + return prop._with_parent(instance) + + +def has_identity(object): + """Return True if the given object has a database + identity. + + This typically corresponds to the object being + in either the persistent or detached state. + + .. seealso:: + + :func:`.was_deleted` + + """ + state = attributes.instance_state(object) + return state.has_identity + + +def was_deleted(object): + """Return True if the given object was deleted + within a session flush. + + This is regardless of whether or not the object is + persistent or detached. + + .. versionadded:: 0.8.0 + + .. seealso:: + + :attr:`.InstanceState.was_deleted` + + """ + + state = attributes.instance_state(object) + return state.was_deleted + + +def randomize_unitofwork(): + """Use random-ordering sets within the unit of work in order + to detect unit of work sorting issues. + + This is a utility function that can be used to help reproduce + inconsistent unit of work sorting issues. For example, + if two kinds of objects A and B are being inserted, and + B has a foreign key reference to A - the A must be inserted first. + However, if there is no relationship between A and B, the unit of work + won't know to perform this sorting, and an operation may or may not + fail, depending on how the ordering works out. Since Python sets + and dictionaries have non-deterministic ordering, such an issue may + occur on some runs and not on others, and in practice it tends to + have a great dependence on the state of the interpreter. This leads + to so-called "heisenbugs" where changing entirely irrelevant aspects + of the test program still cause the failure behavior to change. + + By calling ``randomize_unitofwork()`` when a script first runs, the + ordering of a key series of sets within the unit of work implementation + are randomized, so that the script can be minimized down to the + fundamental mapping and operation that's failing, while still reproducing + the issue on at least some runs. + + This utility is also available when running the test suite via the + ``--reversetop`` flag. + + .. versionadded:: 0.8.1 created a standalone version of the + ``--reversetop`` feature. + + """ + from sqlalchemy.orm import unitofwork, session, mapper, dependency + from sqlalchemy.util import topological + from sqlalchemy.testing.util import RandomSet + topological.set = unitofwork.set = session.set = mapper.set = \ + dependency.set = RandomSet diff --git a/app/lib/sqlalchemy/pool.py b/app/lib/sqlalchemy/pool.py new file mode 100644 index 0000000..b58fdaa --- /dev/null +++ b/app/lib/sqlalchemy/pool.py @@ -0,0 +1,1445 @@ +# sqlalchemy/pool.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +"""Connection pooling for DB-API connections. + +Provides a number of connection pool implementations for a variety of +usage scenarios and thread behavior requirements imposed by the +application, DB-API or database itself. + +Also provides a DB-API 2.0 connection proxying mechanism allowing +regular DB-API connect() methods to be transparently managed by a +SQLAlchemy connection pool. +""" + +import time +import traceback +import weakref + +from . import exc, log, event, interfaces, util +from .util import queue as sqla_queue +from .util import threading, memoized_property, \ + chop_traceback + +from collections import deque +proxies = {} + + +def manage(module, **params): + r"""Return a proxy for a DB-API module that automatically + pools connections. + + Given a DB-API 2.0 module and pool management parameters, returns + a proxy for the module that will automatically pool connections, + creating new connection pools for each distinct set of connection + arguments sent to the decorated module's connect() function. + + :param module: a DB-API 2.0 database module + + :param poolclass: the class used by the pool module to provide + pooling. Defaults to :class:`.QueuePool`. + + :param \**params: will be passed through to *poolclass* + + """ + try: + return proxies[module] + except KeyError: + return proxies.setdefault(module, _DBProxy(module, **params)) + + +def clear_managers(): + """Remove all current DB-API 2.0 managers. + + All pools and connections are disposed. + """ + + for manager in proxies.values(): + manager.close() + proxies.clear() + +reset_rollback = util.symbol('reset_rollback') +reset_commit = util.symbol('reset_commit') +reset_none = util.symbol('reset_none') + + +class _ConnDialect(object): + + """partial implementation of :class:`.Dialect` + which provides DBAPI connection methods. + + When a :class:`.Pool` is combined with an :class:`.Engine`, + the :class:`.Engine` replaces this with its own + :class:`.Dialect`. + + """ + + def do_rollback(self, dbapi_connection): + dbapi_connection.rollback() + + def do_commit(self, dbapi_connection): + dbapi_connection.commit() + + def do_close(self, dbapi_connection): + dbapi_connection.close() + + +class Pool(log.Identified): + + """Abstract base class for connection pools.""" + + _dialect = _ConnDialect() + + def __init__(self, + creator, recycle=-1, echo=None, + use_threadlocal=False, + logging_name=None, + reset_on_return=True, + listeners=None, + events=None, + dialect=None, + _dispatch=None): + """ + Construct a Pool. + + :param creator: a callable function that returns a DB-API + connection object. The function will be called with + parameters. + + :param recycle: If set to non -1, number of seconds between + connection recycling, which means upon checkout, if this + timeout is surpassed the connection will be closed and + replaced with a newly opened connection. Defaults to -1. + + :param logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.pool" logger. Defaults to a hexstring of the object's + id. + + :param echo: If True, connections being pulled and retrieved + from the pool will be logged to the standard output, as well + as pool sizing information. Echoing can also be achieved by + enabling logging for the "sqlalchemy.pool" + namespace. Defaults to False. + + :param use_threadlocal: If set to True, repeated calls to + :meth:`connect` within the same application thread will be + guaranteed to return the same connection object, if one has + already been retrieved from the pool and has not been + returned yet. Offers a slight performance advantage at the + cost of individual transactions by default. The + :meth:`.Pool.unique_connection` method is provided to return + a consistently unique connection to bypass this behavior + when the flag is set. + + .. warning:: The :paramref:`.Pool.use_threadlocal` flag + **does not affect the behavior** of :meth:`.Engine.connect`. + :meth:`.Engine.connect` makes use of the + :meth:`.Pool.unique_connection` method which **does not use thread + local context**. To produce a :class:`.Connection` which refers + to the :meth:`.Pool.connect` method, use + :meth:`.Engine.contextual_connect`. + + Note that other SQLAlchemy connectivity systems such as + :meth:`.Engine.execute` as well as the orm + :class:`.Session` make use of + :meth:`.Engine.contextual_connect` internally, so these functions + are compatible with the :paramref:`.Pool.use_threadlocal` setting. + + .. seealso:: + + :ref:`threadlocal_strategy` - contains detail on the + "threadlocal" engine strategy, which provides a more comprehensive + approach to "threadlocal" connectivity for the specific + use case of using :class:`.Engine` and :class:`.Connection` objects + directly. + + :param reset_on_return: Determine steps to take on + connections as they are returned to the pool. + reset_on_return can have any of these values: + + * ``"rollback"`` - call rollback() on the connection, + to release locks and transaction resources. + This is the default value. The vast majority + of use cases should leave this value set. + * ``True`` - same as 'rollback', this is here for + backwards compatibility. + * ``"commit"`` - call commit() on the connection, + to release locks and transaction resources. + A commit here may be desirable for databases that + cache query plans if a commit is emitted, + such as Microsoft SQL Server. However, this + value is more dangerous than 'rollback' because + any data changes present on the transaction + are committed unconditionally. + * ``None`` - don't do anything on the connection. + This setting should only be made on a database + that has no transaction support at all, + namely MySQL MyISAM. By not doing anything, + performance can be improved. This + setting should **never be selected** for a + database that supports transactions, + as it will lead to deadlocks and stale + state. + * ``"none"`` - same as ``None`` + + .. versionadded:: 0.9.10 + + * ``False`` - same as None, this is here for + backwards compatibility. + + .. versionchanged:: 0.7.6 + :paramref:`.Pool.reset_on_return` accepts ``"rollback"`` + and ``"commit"`` arguments. + + :param events: a list of 2-tuples, each of the form + ``(callable, target)`` which will be passed to :func:`.event.listen` + upon construction. Provided here so that event listeners + can be assigned via :func:`.create_engine` before dialect-level + listeners are applied. + + :param listeners: Deprecated. A list of + :class:`~sqlalchemy.interfaces.PoolListener`-like objects or + dictionaries of callables that receive events when DB-API + connections are created, checked out and checked in to the + pool. This has been superseded by + :func:`~sqlalchemy.event.listen`. + + :param dialect: a :class:`.Dialect` that will handle the job + of calling rollback(), close(), or commit() on DBAPI connections. + If omitted, a built-in "stub" dialect is used. Applications that + make use of :func:`~.create_engine` should not use this parameter + as it is handled by the engine creation strategy. + + .. versionadded:: 1.1 - ``dialect`` is now a public parameter + to the :class:`.Pool`. + + """ + if logging_name: + self.logging_name = self._orig_logging_name = logging_name + else: + self._orig_logging_name = None + + log.instance_logger(self, echoflag=echo) + self._threadconns = threading.local() + self._creator = creator + self._recycle = recycle + self._invalidate_time = 0 + self._use_threadlocal = use_threadlocal + if reset_on_return in ('rollback', True, reset_rollback): + self._reset_on_return = reset_rollback + elif reset_on_return in ('none', None, False, reset_none): + self._reset_on_return = reset_none + elif reset_on_return in ('commit', reset_commit): + self._reset_on_return = reset_commit + else: + raise exc.ArgumentError( + "Invalid value for 'reset_on_return': %r" + % reset_on_return) + + self.echo = echo + + if _dispatch: + self.dispatch._update(_dispatch, only_propagate=False) + if dialect: + self._dialect = dialect + if events: + for fn, target in events: + event.listen(self, target, fn) + if listeners: + util.warn_deprecated( + "The 'listeners' argument to Pool (and " + "create_engine()) is deprecated. Use event.listen().") + for l in listeners: + self.add_listener(l) + + @property + def _creator(self): + return self.__dict__['_creator'] + + @_creator.setter + def _creator(self, creator): + self.__dict__['_creator'] = creator + self._invoke_creator = self._should_wrap_creator(creator) + + def _should_wrap_creator(self, creator): + """Detect if creator accepts a single argument, or is sent + as a legacy style no-arg function. + + """ + + try: + argspec = util.get_callable_argspec(self._creator, no_self=True) + except TypeError: + return lambda crec: creator() + + defaulted = argspec[3] is not None and len(argspec[3]) or 0 + positionals = len(argspec[0]) - defaulted + + # look for the exact arg signature that DefaultStrategy + # sends us + if (argspec[0], argspec[3]) == (['connection_record'], (None,)): + return creator + # or just a single positional + elif positionals == 1: + return creator + # all other cases, just wrap and assume legacy "creator" callable + # thing + else: + return lambda crec: creator() + + def _close_connection(self, connection): + self.logger.debug("Closing connection %r", connection) + + try: + self._dialect.do_close(connection) + except Exception: + self.logger.error("Exception closing connection %r", + connection, exc_info=True) + + @util.deprecated( + 2.7, "Pool.add_listener is deprecated. Use event.listen()") + def add_listener(self, listener): + """Add a :class:`.PoolListener`-like object to this pool. + + ``listener`` may be an object that implements some or all of + PoolListener, or a dictionary of callables containing implementations + of some or all of the named methods in PoolListener. + + """ + interfaces.PoolListener._adapt_listener(self, listener) + + def unique_connection(self): + """Produce a DBAPI connection that is not referenced by any + thread-local context. + + This method is equivalent to :meth:`.Pool.connect` when the + :paramref:`.Pool.use_threadlocal` flag is not set to True. + When :paramref:`.Pool.use_threadlocal` is True, the + :meth:`.Pool.unique_connection` method provides a means of bypassing + the threadlocal context. + + """ + return _ConnectionFairy._checkout(self) + + def _create_connection(self): + """Called by subclasses to create a new ConnectionRecord.""" + + return _ConnectionRecord(self) + + def _invalidate(self, connection, exception=None): + """Mark all connections established within the generation + of the given connection as invalidated. + + If this pool's last invalidate time is before when the given + connection was created, update the timestamp til now. Otherwise, + no action is performed. + + Connections with a start time prior to this pool's invalidation + time will be recycled upon next checkout. + """ + + rec = getattr(connection, "_connection_record", None) + if not rec or self._invalidate_time < rec.starttime: + self._invalidate_time = time.time() + if getattr(connection, 'is_valid', False): + connection.invalidate(exception) + + def recreate(self): + """Return a new :class:`.Pool`, of the same class as this one + and configured with identical creation arguments. + + This method is used in conjunction with :meth:`dispose` + to close out an entire :class:`.Pool` and create a new one in + its place. + + """ + + raise NotImplementedError() + + def dispose(self): + """Dispose of this pool. + + This method leaves the possibility of checked-out connections + remaining open, as it only affects connections that are + idle in the pool. + + See also the :meth:`Pool.recreate` method. + + """ + + raise NotImplementedError() + + def connect(self): + """Return a DBAPI connection from the pool. + + The connection is instrumented such that when its + ``close()`` method is called, the connection will be returned to + the pool. + + """ + if not self._use_threadlocal: + return _ConnectionFairy._checkout(self) + + try: + rec = self._threadconns.current() + except AttributeError: + pass + else: + if rec is not None: + return rec._checkout_existing() + + return _ConnectionFairy._checkout(self, self._threadconns) + + def _return_conn(self, record): + """Given a _ConnectionRecord, return it to the :class:`.Pool`. + + This method is called when an instrumented DBAPI connection + has its ``close()`` method called. + + """ + if self._use_threadlocal: + try: + del self._threadconns.current + except AttributeError: + pass + self._do_return_conn(record) + + def _do_get(self): + """Implementation for :meth:`get`, supplied by subclasses.""" + + raise NotImplementedError() + + def _do_return_conn(self, conn): + """Implementation for :meth:`return_conn`, supplied by subclasses.""" + + raise NotImplementedError() + + def status(self): + raise NotImplementedError() + + +class _ConnectionRecord(object): + + """Internal object which maintains an individual DBAPI connection + referenced by a :class:`.Pool`. + + The :class:`._ConnectionRecord` object always exists for any particular + DBAPI connection whether or not that DBAPI connection has been + "checked out". This is in contrast to the :class:`._ConnectionFairy` + which is only a public facade to the DBAPI connection while it is checked + out. + + A :class:`._ConnectionRecord` may exist for a span longer than that + of a single DBAPI connection. For example, if the + :meth:`._ConnectionRecord.invalidate` + method is called, the DBAPI connection associated with this + :class:`._ConnectionRecord` + will be discarded, but the :class:`._ConnectionRecord` may be used again, + in which case a new DBAPI connection is produced when the :class:`.Pool` + next uses this record. + + The :class:`._ConnectionRecord` is delivered along with connection + pool events, including :meth:`.PoolEvents.connect` and + :meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still + remains an internal object whose API and internals may change. + + .. seealso:: + + :class:`._ConnectionFairy` + + """ + + def __init__(self, pool, connect=True): + self.__pool = pool + if connect: + self.__connect(first_connect_check=True) + self.finalize_callback = deque() + + fairy_ref = None + + starttime = None + + connection = None + """A reference to the actual DBAPI connection being tracked. + + May be ``None`` if this :class:`._ConnectionRecord` has been marked + as invalidated; a new DBAPI connection may replace it if the owning + pool calls upon this :class:`._ConnectionRecord` to reconnect. + + """ + + _soft_invalidate_time = 0 + + @util.memoized_property + def info(self): + """The ``.info`` dictionary associated with the DBAPI connection. + + This dictionary is shared among the :attr:`._ConnectionFairy.info` + and :attr:`.Connection.info` accessors. + + .. note:: + + The lifespan of this dictionary is linked to the + DBAPI connection itself, meaning that it is **discarded** each time + the DBAPI connection is closed and/or invalidated. The + :attr:`._ConnectionRecord.record_info` dictionary remains + persistent throughout the lifespan of the + :class:`._ConnectionRecord` container. + + """ + return {} + + @util.memoized_property + def record_info(self): + """An "info' dictionary associated with the connection record + itself. + + Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked + to the lifespan of the DBAPI connection, this dictionary is linked + to the lifespan of the :class:`._ConnectionRecord` container itself + and will remain persisent throughout the life of the + :class:`._ConnectionRecord`. + + .. versionadded:: 1.1 + + """ + return {} + + @classmethod + def checkout(cls, pool): + rec = pool._do_get() + try: + dbapi_connection = rec.get_connection() + except: + with util.safe_reraise(): + rec.checkin() + echo = pool._should_log_debug() + fairy = _ConnectionFairy(dbapi_connection, rec, echo) + rec.fairy_ref = weakref.ref( + fairy, + lambda ref: _finalize_fairy and + _finalize_fairy( + dbapi_connection, + rec, pool, ref, echo) + ) + _refs.add(rec) + if echo: + pool.logger.debug("Connection %r checked out from pool", + dbapi_connection) + return fairy + + def checkin(self): + self.fairy_ref = None + connection = self.connection + pool = self.__pool + while self.finalize_callback: + finalizer = self.finalize_callback.pop() + finalizer(connection) + if pool.dispatch.checkin: + pool.dispatch.checkin(connection, self) + pool._return_conn(self) + + @property + def in_use(self): + return self.fairy_ref is not None + + @property + def last_connect_time(self): + return self.starttime + + def close(self): + if self.connection is not None: + self.__close() + + def invalidate(self, e=None, soft=False): + """Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`. + + This method is called for all connection invalidations, including + when the :meth:`._ConnectionFairy.invalidate` or + :meth:`.Connection.invalidate` methods are called, as well as when any + so-called "automatic invalidation" condition occurs. + + :param e: an exception object indicating a reason for the invalidation. + + :param soft: if True, the connection isn't closed; instead, this + connection will be recycled on next checkout. + + .. versionadded:: 1.0.3 + + .. seealso:: + + :ref:`pool_connection_invalidation` + + """ + # already invalidated + if self.connection is None: + return + if soft: + self.__pool.dispatch.soft_invalidate(self.connection, self, e) + else: + self.__pool.dispatch.invalidate(self.connection, self, e) + if e is not None: + self.__pool.logger.info( + "%sInvalidate connection %r (reason: %s:%s)", + "Soft " if soft else "", + self.connection, e.__class__.__name__, e) + else: + self.__pool.logger.info( + "%sInvalidate connection %r", + "Soft " if soft else "", + self.connection) + if soft: + self._soft_invalidate_time = time.time() + else: + self.__close() + self.connection = None + + def get_connection(self): + recycle = False + if self.connection is None: + self.info.clear() + self.__connect() + elif self.__pool._recycle > -1 and \ + time.time() - self.starttime > self.__pool._recycle: + self.__pool.logger.info( + "Connection %r exceeded timeout; recycling", + self.connection) + recycle = True + elif self.__pool._invalidate_time > self.starttime: + self.__pool.logger.info( + "Connection %r invalidated due to pool invalidation; " + + "recycling", + self.connection + ) + recycle = True + elif self._soft_invalidate_time > self.starttime: + self.__pool.logger.info( + "Connection %r invalidated due to local soft invalidation; " + + "recycling", + self.connection + ) + recycle = True + + if recycle: + self.__close() + self.info.clear() + + self.__connect() + return self.connection + + def __close(self): + self.finalize_callback.clear() + if self.__pool.dispatch.close: + self.__pool.dispatch.close(self.connection, self) + self.__pool._close_connection(self.connection) + self.connection = None + + def __connect(self, first_connect_check=False): + pool = self.__pool + + # ensure any existing connection is removed, so that if + # creator fails, this attribute stays None + self.connection = None + try: + self.starttime = time.time() + connection = pool._invoke_creator(self) + pool.logger.debug("Created new connection %r", connection) + self.connection = connection + except Exception as e: + pool.logger.debug("Error on connect(): %s", e) + raise + else: + if first_connect_check: + pool.dispatch.first_connect.\ + for_modify(pool.dispatch).\ + exec_once(self.connection, self) + if pool.dispatch.connect: + pool.dispatch.connect(self.connection, self) + + +def _finalize_fairy(connection, connection_record, + pool, ref, echo, fairy=None): + """Cleanup for a :class:`._ConnectionFairy` whether or not it's already + been garbage collected. + + """ + _refs.discard(connection_record) + + if ref is not None and \ + connection_record.fairy_ref is not ref: + return + + if connection is not None: + if connection_record and echo: + pool.logger.debug("Connection %r being returned to pool", + connection) + + try: + fairy = fairy or _ConnectionFairy( + connection, connection_record, echo) + assert fairy.connection is connection + fairy._reset(pool) + + # Immediately close detached instances + if not connection_record: + if pool.dispatch.close_detached: + pool.dispatch.close_detached(connection) + pool._close_connection(connection) + except BaseException as e: + pool.logger.error( + "Exception during reset or similar", exc_info=True) + if connection_record: + connection_record.invalidate(e=e) + if not isinstance(e, Exception): + raise + + if connection_record: + connection_record.checkin() + + +_refs = set() + + +class _ConnectionFairy(object): + + """Proxies a DBAPI connection and provides return-on-dereference + support. + + This is an internal object used by the :class:`.Pool` implementation + to provide context management to a DBAPI connection delivered by + that :class:`.Pool`. + + The name "fairy" is inspired by the fact that the + :class:`._ConnectionFairy` object's lifespan is transitory, as it lasts + only for the length of a specific DBAPI connection being checked out from + the pool, and additionally that as a transparent proxy, it is mostly + invisible. + + .. seealso:: + + :class:`._ConnectionRecord` + + """ + + def __init__(self, dbapi_connection, connection_record, echo): + self.connection = dbapi_connection + self._connection_record = connection_record + self._echo = echo + + connection = None + """A reference to the actual DBAPI connection being tracked.""" + + _connection_record = None + """A reference to the :class:`._ConnectionRecord` object associated + with the DBAPI connection. + + This is currently an internal accessor which is subject to change. + + """ + + _reset_agent = None + """Refer to an object with a ``.commit()`` and ``.rollback()`` method; + if non-None, the "reset-on-return" feature will call upon this object + rather than directly against the dialect-level do_rollback() and + do_commit() methods. + + In practice, a :class:`.Connection` assigns a :class:`.Transaction` object + to this variable when one is in scope so that the :class:`.Transaction` + takes the job of committing or rolling back on return if + :meth:`.Connection.close` is called while the :class:`.Transaction` + still exists. + + This is essentially an "event handler" of sorts but is simplified as an + instance variable both for performance/simplicity as well as that there + can only be one "reset agent" at a time. + """ + + @classmethod + def _checkout(cls, pool, threadconns=None, fairy=None): + if not fairy: + fairy = _ConnectionRecord.checkout(pool) + + fairy._pool = pool + fairy._counter = 0 + + if threadconns is not None: + threadconns.current = weakref.ref(fairy) + + if fairy.connection is None: + raise exc.InvalidRequestError("This connection is closed") + fairy._counter += 1 + + if not pool.dispatch.checkout or fairy._counter != 1: + return fairy + + # Pool listeners can trigger a reconnection on checkout + attempts = 2 + while attempts > 0: + try: + pool.dispatch.checkout(fairy.connection, + fairy._connection_record, + fairy) + return fairy + except exc.DisconnectionError as e: + pool.logger.info( + "Disconnection detected on checkout: %s", e) + fairy._connection_record.invalidate(e) + try: + fairy.connection = \ + fairy._connection_record.get_connection() + except: + with util.safe_reraise(): + fairy._connection_record.checkin() + + attempts -= 1 + + pool.logger.info("Reconnection attempts exhausted on checkout") + fairy.invalidate() + raise exc.InvalidRequestError("This connection is closed") + + def _checkout_existing(self): + return _ConnectionFairy._checkout(self._pool, fairy=self) + + def _checkin(self): + _finalize_fairy(self.connection, self._connection_record, + self._pool, None, self._echo, fairy=self) + self.connection = None + self._connection_record = None + + _close = _checkin + + def _reset(self, pool): + if pool.dispatch.reset: + pool.dispatch.reset(self, self._connection_record) + if pool._reset_on_return is reset_rollback: + if self._echo: + pool.logger.debug("Connection %s rollback-on-return%s", + self.connection, + ", via agent" + if self._reset_agent else "") + if self._reset_agent: + self._reset_agent.rollback() + else: + pool._dialect.do_rollback(self) + elif pool._reset_on_return is reset_commit: + if self._echo: + pool.logger.debug("Connection %s commit-on-return%s", + self.connection, + ", via agent" + if self._reset_agent else "") + if self._reset_agent: + self._reset_agent.commit() + else: + pool._dialect.do_commit(self) + + @property + def _logger(self): + return self._pool.logger + + @property + def is_valid(self): + """Return True if this :class:`._ConnectionFairy` still refers + to an active DBAPI connection.""" + + return self.connection is not None + + @util.memoized_property + def info(self): + """Info dictionary associated with the underlying DBAPI connection + referred to by this :class:`.ConnectionFairy`, allowing user-defined + data to be associated with the connection. + + The data here will follow along with the DBAPI connection including + after it is returned to the connection pool and used again + in subsequent instances of :class:`._ConnectionFairy`. It is shared + with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info` + accessors. + + The dictionary associated with a particular DBAPI connection is + discarded when the connection itself is discarded. + + """ + return self._connection_record.info + + @property + def record_info(self): + """Info dictionary associated with the :class:`._ConnectionRecord + container referred to by this :class:`.ConnectionFairy`. + + Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan + of this dictionary is persistent across connections that are + disconnected and/or invalidated within the lifespan of a + :class:`._ConnectionRecord`. + + .. versionadded:: 1.1 + + """ + if self._connection_record: + return self._connection_record.record_info + else: + return None + + def invalidate(self, e=None, soft=False): + """Mark this connection as invalidated. + + This method can be called directly, and is also called as a result + of the :meth:`.Connection.invalidate` method. When invoked, + the DBAPI connection is immediately closed and discarded from + further use by the pool. The invalidation mechanism proceeds + via the :meth:`._ConnectionRecord.invalidate` internal method. + + :param e: an exception object indicating a reason for the invalidation. + + :param soft: if True, the connection isn't closed; instead, this + connection will be recycled on next checkout. + + .. versionadded:: 1.0.3 + + .. seealso:: + + :ref:`pool_connection_invalidation` + + """ + + if self.connection is None: + util.warn("Can't invalidate an already-closed connection.") + return + if self._connection_record: + self._connection_record.invalidate(e=e, soft=soft) + if not soft: + self.connection = None + self._checkin() + + def cursor(self, *args, **kwargs): + """Return a new DBAPI cursor for the underlying connection. + + This method is a proxy for the ``connection.cursor()`` DBAPI + method. + + """ + return self.connection.cursor(*args, **kwargs) + + def __getattr__(self, key): + return getattr(self.connection, key) + + def detach(self): + """Separate this connection from its Pool. + + This means that the connection will no longer be returned to the + pool when closed, and will instead be literally closed. The + containing ConnectionRecord is separated from the DB-API connection, + and will create a new connection when next used. + + Note that any overall connection limiting constraints imposed by a + Pool implementation may be violated after a detach, as the detached + connection is removed from the pool's knowledge and control. + """ + + if self._connection_record is not None: + rec = self._connection_record + _refs.remove(rec) + rec.fairy_ref = None + rec.connection = None + # TODO: should this be _return_conn? + self._pool._do_return_conn(self._connection_record) + self.info = self.info.copy() + self._connection_record = None + + if self._pool.dispatch.detach: + self._pool.dispatch.detach(self.connection, rec) + + def close(self): + self._counter -= 1 + if self._counter == 0: + self._checkin() + + +class SingletonThreadPool(Pool): + + """A Pool that maintains one connection per thread. + + Maintains one connection per each thread, never moving a connection to a + thread other than the one which it was created in. + + .. warning:: the :class:`.SingletonThreadPool` will call ``.close()`` + on arbitrary connections that exist beyond the size setting of + ``pool_size``, e.g. if more unique **thread identities** + than what ``pool_size`` states are used. This cleanup is + non-deterministic and not sensitive to whether or not the connections + linked to those thread identities are currently in use. + + :class:`.SingletonThreadPool` may be improved in a future release, + however in its current status it is generally used only for test + scenarios using a SQLite ``:memory:`` database and is not recommended + for production use. + + + Options are the same as those of :class:`.Pool`, as well as: + + :param pool_size: The number of threads in which to maintain connections + at once. Defaults to five. + + :class:`.SingletonThreadPool` is used by the SQLite dialect + automatically when a memory-based database is used. + See :ref:`sqlite_toplevel`. + + """ + + def __init__(self, creator, pool_size=5, **kw): + kw['use_threadlocal'] = True + Pool.__init__(self, creator, **kw) + self._conn = threading.local() + self._all_conns = set() + self.size = pool_size + + def recreate(self): + self.logger.info("Pool recreating") + return self.__class__(self._creator, + pool_size=self.size, + recycle=self._recycle, + echo=self.echo, + logging_name=self._orig_logging_name, + use_threadlocal=self._use_threadlocal, + reset_on_return=self._reset_on_return, + _dispatch=self.dispatch, + dialect=self._dialect) + + def dispose(self): + """Dispose of this pool.""" + + for conn in self._all_conns: + try: + conn.close() + except Exception: + # pysqlite won't even let you close a conn from a thread + # that didn't create it + pass + + self._all_conns.clear() + + def _cleanup(self): + while len(self._all_conns) >= self.size: + c = self._all_conns.pop() + c.close() + + def status(self): + return "SingletonThreadPool id:%d size: %d" % \ + (id(self), len(self._all_conns)) + + def _do_return_conn(self, conn): + pass + + def _do_get(self): + try: + c = self._conn.current() + if c: + return c + except AttributeError: + pass + c = self._create_connection() + self._conn.current = weakref.ref(c) + if len(self._all_conns) >= self.size: + self._cleanup() + self._all_conns.add(c) + return c + + +class QueuePool(Pool): + + """A :class:`.Pool` that imposes a limit on the number of open connections. + + :class:`.QueuePool` is the default pooling implementation used for + all :class:`.Engine` objects, unless the SQLite dialect is in use. + + """ + + def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, + **kw): + r""" + Construct a QueuePool. + + :param creator: a callable function that returns a DB-API + connection object, same as that of :paramref:`.Pool.creator`. + + :param pool_size: The size of the pool to be maintained, + defaults to 5. This is the largest number of connections that + will be kept persistently in the pool. Note that the pool + begins with no connections; once this number of connections + is requested, that number of connections will remain. + ``pool_size`` can be set to 0 to indicate no size limit; to + disable pooling, use a :class:`~sqlalchemy.pool.NullPool` + instead. + + :param max_overflow: The maximum overflow size of the + pool. When the number of checked-out connections reaches the + size set in pool_size, additional connections will be + returned up to this limit. When those additional connections + are returned to the pool, they are disconnected and + discarded. It follows then that the total number of + simultaneous connections the pool will allow is pool_size + + `max_overflow`, and the total number of "sleeping" + connections the pool will allow is pool_size. `max_overflow` + can be set to -1 to indicate no overflow limit; no limit + will be placed on the total number of concurrent + connections. Defaults to 10. + + :param timeout: The number of seconds to wait before giving up + on returning a connection. Defaults to 30. + + :param \**kw: Other keyword arguments including + :paramref:`.Pool.recycle`, :paramref:`.Pool.echo`, + :paramref:`.Pool.reset_on_return` and others are passed to the + :class:`.Pool` constructor. + + """ + Pool.__init__(self, creator, **kw) + self._pool = sqla_queue.Queue(pool_size) + self._overflow = 0 - pool_size + self._max_overflow = max_overflow + self._timeout = timeout + self._overflow_lock = threading.Lock() + + def _do_return_conn(self, conn): + try: + self._pool.put(conn, False) + except sqla_queue.Full: + try: + conn.close() + finally: + self._dec_overflow() + + def _do_get(self): + use_overflow = self._max_overflow > -1 + + try: + wait = use_overflow and self._overflow >= self._max_overflow + return self._pool.get(wait, self._timeout) + except sqla_queue.Empty: + if use_overflow and self._overflow >= self._max_overflow: + if not wait: + return self._do_get() + else: + raise exc.TimeoutError( + "QueuePool limit of size %d overflow %d reached, " + "connection timed out, timeout %d" % + (self.size(), self.overflow(), self._timeout)) + + if self._inc_overflow(): + try: + return self._create_connection() + except: + with util.safe_reraise(): + self._dec_overflow() + else: + return self._do_get() + + def _inc_overflow(self): + if self._max_overflow == -1: + self._overflow += 1 + return True + with self._overflow_lock: + if self._overflow < self._max_overflow: + self._overflow += 1 + return True + else: + return False + + def _dec_overflow(self): + if self._max_overflow == -1: + self._overflow -= 1 + return True + with self._overflow_lock: + self._overflow -= 1 + return True + + def recreate(self): + self.logger.info("Pool recreating") + return self.__class__(self._creator, pool_size=self._pool.maxsize, + max_overflow=self._max_overflow, + timeout=self._timeout, + recycle=self._recycle, echo=self.echo, + logging_name=self._orig_logging_name, + use_threadlocal=self._use_threadlocal, + reset_on_return=self._reset_on_return, + _dispatch=self.dispatch, + dialect=self._dialect) + + def dispose(self): + while True: + try: + conn = self._pool.get(False) + conn.close() + except sqla_queue.Empty: + break + + self._overflow = 0 - self.size() + self.logger.info("Pool disposed. %s", self.status()) + + def status(self): + return "Pool size: %d Connections in pool: %d "\ + "Current Overflow: %d Current Checked out "\ + "connections: %d" % (self.size(), + self.checkedin(), + self.overflow(), + self.checkedout()) + + def size(self): + return self._pool.maxsize + + def checkedin(self): + return self._pool.qsize() + + def overflow(self): + return self._overflow + + def checkedout(self): + return self._pool.maxsize - self._pool.qsize() + self._overflow + + +class NullPool(Pool): + + """A Pool which does not pool connections. + + Instead it literally opens and closes the underlying DB-API connection + per each connection open/close. + + Reconnect-related functions such as ``recycle`` and connection + invalidation are not supported by this Pool implementation, since + no connections are held persistently. + + .. versionchanged:: 0.7 + :class:`.NullPool` is used by the SQlite dialect automatically + when a file-based database is used. See :ref:`sqlite_toplevel`. + + """ + + def status(self): + return "NullPool" + + def _do_return_conn(self, conn): + conn.close() + + def _do_get(self): + return self._create_connection() + + def recreate(self): + self.logger.info("Pool recreating") + + return self.__class__(self._creator, + recycle=self._recycle, + echo=self.echo, + logging_name=self._orig_logging_name, + use_threadlocal=self._use_threadlocal, + reset_on_return=self._reset_on_return, + _dispatch=self.dispatch, + dialect=self._dialect) + + def dispose(self): + pass + + +class StaticPool(Pool): + + """A Pool of exactly one connection, used for all requests. + + Reconnect-related functions such as ``recycle`` and connection + invalidation (which is also used to support auto-reconnect) are not + currently supported by this Pool implementation but may be implemented + in a future release. + + """ + + @memoized_property + def _conn(self): + return self._creator() + + @memoized_property + def connection(self): + return _ConnectionRecord(self) + + def status(self): + return "StaticPool" + + def dispose(self): + if '_conn' in self.__dict__: + self._conn.close() + self._conn = None + + def recreate(self): + self.logger.info("Pool recreating") + return self.__class__(creator=self._creator, + recycle=self._recycle, + use_threadlocal=self._use_threadlocal, + reset_on_return=self._reset_on_return, + echo=self.echo, + logging_name=self._orig_logging_name, + _dispatch=self.dispatch, + dialect=self._dialect) + + def _create_connection(self): + return self._conn + + def _do_return_conn(self, conn): + pass + + def _do_get(self): + return self.connection + + +class AssertionPool(Pool): + + """A :class:`.Pool` that allows at most one checked out connection at + any given time. + + This will raise an exception if more than one connection is checked out + at a time. Useful for debugging code that is using more connections + than desired. + + .. versionchanged:: 0.7 + :class:`.AssertionPool` also logs a traceback of where + the original connection was checked out, and reports + this in the assertion error raised. + + """ + + def __init__(self, *args, **kw): + self._conn = None + self._checked_out = False + self._store_traceback = kw.pop('store_traceback', True) + self._checkout_traceback = None + Pool.__init__(self, *args, **kw) + + def status(self): + return "AssertionPool" + + def _do_return_conn(self, conn): + if not self._checked_out: + raise AssertionError("connection is not checked out") + self._checked_out = False + assert conn is self._conn + + def dispose(self): + self._checked_out = False + if self._conn: + self._conn.close() + + def recreate(self): + self.logger.info("Pool recreating") + return self.__class__(self._creator, echo=self.echo, + logging_name=self._orig_logging_name, + _dispatch=self.dispatch, + dialect=self._dialect) + + def _do_get(self): + if self._checked_out: + if self._checkout_traceback: + suffix = ' at:\n%s' % ''.join( + chop_traceback(self._checkout_traceback)) + else: + suffix = '' + raise AssertionError("connection is already checked out" + suffix) + + if not self._conn: + self._conn = self._create_connection() + + self._checked_out = True + if self._store_traceback: + self._checkout_traceback = traceback.format_stack() + return self._conn + + +class _DBProxy(object): + + """Layers connection pooling behavior on top of a standard DB-API module. + + Proxies a DB-API 2.0 connect() call to a connection pool keyed to the + specific connect parameters. Other functions and attributes are delegated + to the underlying DB-API module. + """ + + def __init__(self, module, poolclass=QueuePool, **kw): + """Initializes a new proxy. + + module + a DB-API 2.0 module + + poolclass + a Pool class, defaulting to QueuePool + + Other parameters are sent to the Pool object's constructor. + + """ + + self.module = module + self.kw = kw + self.poolclass = poolclass + self.pools = {} + self._create_pool_mutex = threading.Lock() + + def close(self): + for key in list(self.pools): + del self.pools[key] + + def __del__(self): + self.close() + + def __getattr__(self, key): + return getattr(self.module, key) + + def get_pool(self, *args, **kw): + key = self._serialize(*args, **kw) + try: + return self.pools[key] + except KeyError: + self._create_pool_mutex.acquire() + try: + if key not in self.pools: + kw.pop('sa_pool_key', None) + pool = self.poolclass( + lambda: self.module.connect(*args, **kw), **self.kw) + self.pools[key] = pool + return pool + else: + return self.pools[key] + finally: + self._create_pool_mutex.release() + + def connect(self, *args, **kw): + """Activate a connection to the database. + + Connect to the database using this DBProxy's module and the given + connect arguments. If the arguments match an existing pool, the + connection will be returned from the pool's current thread-local + connection instance, or if there is no thread-local connection + instance it will be checked out from the set of pooled connections. + + If the pool has no available connections and allows new connections + to be created, a new database connection will be made. + + """ + + return self.get_pool(*args, **kw).connect() + + def dispose(self, *args, **kw): + """Dispose the pool referenced by the given connect arguments.""" + + key = self._serialize(*args, **kw) + try: + del self.pools[key] + except KeyError: + pass + + def _serialize(self, *args, **kw): + if "sa_pool_key" in kw: + return kw['sa_pool_key'] + + return tuple( + list(args) + + [(k, kw[k]) for k in sorted(kw)] + ) diff --git a/app/lib/sqlalchemy/processors.py b/app/lib/sqlalchemy/processors.py new file mode 100644 index 0000000..17f7ecc --- /dev/null +++ b/app/lib/sqlalchemy/processors.py @@ -0,0 +1,155 @@ +# sqlalchemy/processors.py +# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors +# +# Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""defines generic type conversion functions, as used in bind and result +processors. + +They all share one common characteristic: None is passed through unchanged. + +""" + +import codecs +import re +import datetime +from . import util + + +def str_to_datetime_processor_factory(regexp, type_): + rmatch = regexp.match + # Even on python2.6 datetime.strptime is both slower than this code + # and it does not support microseconds. + has_named_groups = bool(regexp.groupindex) + + def process(value): + if value is None: + return None + else: + try: + m = rmatch(value) + except TypeError: + raise ValueError("Couldn't parse %s string '%r' " + "- value is not a string." % + (type_.__name__, value)) + if m is None: + raise ValueError("Couldn't parse %s string: " + "'%s'" % (type_.__name__, value)) + if has_named_groups: + groups = m.groupdict(0) + return type_(**dict(list(zip( + iter(groups.keys()), + list(map(int, iter(groups.values()))) + )))) + else: + return type_(*list(map(int, m.groups(0)))) + return process + + +def boolean_to_int(value): + if value is None: + return None + else: + return int(bool(value)) + + +def py_fallback(): + def to_unicode_processor_factory(encoding, errors=None): + decoder = codecs.getdecoder(encoding) + + def process(value): + if value is None: + return None + else: + # decoder returns a tuple: (value, len). Simply dropping the + # len part is safe: it is done that way in the normal + # 'xx'.decode(encoding) code path. + return decoder(value, errors)[0] + return process + + def to_conditional_unicode_processor_factory(encoding, errors=None): + decoder = codecs.getdecoder(encoding) + + def process(value): + if value is None: + return None + elif isinstance(value, util.text_type): + return value + else: + # decoder returns a tuple: (value, len). Simply dropping the + # len part is safe: it is done that way in the normal + # 'xx'.decode(encoding) code path. + return decoder(value, errors)[0] + return process + + def to_decimal_processor_factory(target_class, scale): + fstring = "%%.%df" % scale + + def process(value): + if value is None: + return None + else: + return target_class(fstring % value) + return process + + def to_float(value): + if value is None: + return None + else: + return float(value) + + def to_str(value): + if value is None: + return None + else: + return str(value) + + def int_to_boolean(value): + if value is None: + return None + else: + return bool(value) + + DATETIME_RE = re.compile( + r"(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)(?:\.(\d+))?") + TIME_RE = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?") + DATE_RE = re.compile(r"(\d+)-(\d+)-(\d+)") + + str_to_datetime = str_to_datetime_processor_factory(DATETIME_RE, + datetime.datetime) + str_to_time = str_to_datetime_processor_factory(TIME_RE, datetime.time) + str_to_date = str_to_datetime_processor_factory(DATE_RE, datetime.date) + return locals() + +try: + from sqlalchemy.cprocessors import UnicodeResultProcessor, \ + DecimalResultProcessor, \ + to_float, to_str, int_to_boolean, \ + str_to_datetime, str_to_time, \ + str_to_date + + def to_unicode_processor_factory(encoding, errors=None): + if errors is not None: + return UnicodeResultProcessor(encoding, errors).process + else: + return UnicodeResultProcessor(encoding).process + + def to_conditional_unicode_processor_factory(encoding, errors=None): + if errors is not None: + return UnicodeResultProcessor(encoding, errors).conditional_process + else: + return UnicodeResultProcessor(encoding).conditional_process + + def to_decimal_processor_factory(target_class, scale): + # Note that the scale argument is not taken into account for integer + # values in the C implementation while it is in the Python one. + # For example, the Python implementation might return + # Decimal('5.00000') whereas the C implementation will + # return Decimal('5'). These are equivalent of course. + return DecimalResultProcessor(target_class, "%%.%df" % scale).process + +except ImportError: + globals().update(py_fallback()) diff --git a/app/lib/sqlalchemy/schema.py b/app/lib/sqlalchemy/schema.py new file mode 100644 index 0000000..9924a67 --- /dev/null +++ b/app/lib/sqlalchemy/schema.py @@ -0,0 +1,66 @@ +# schema.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Compatibility namespace for sqlalchemy.sql.schema and related. + +""" + +from .sql.base import ( + SchemaVisitor + ) + + +from .sql.schema import ( + BLANK_SCHEMA, + CheckConstraint, + Column, + ColumnDefault, + Constraint, + DefaultClause, + DefaultGenerator, + FetchedValue, + ForeignKey, + ForeignKeyConstraint, + Index, + MetaData, + PassiveDefault, + PrimaryKeyConstraint, + SchemaItem, + Sequence, + Table, + ThreadLocalMetaData, + UniqueConstraint, + _get_table_key, + ColumnCollectionConstraint, + ColumnCollectionMixin + ) + + +from .sql.naming import conv + + +from .sql.ddl import ( + DDL, + CreateTable, + DropTable, + CreateSequence, + DropSequence, + CreateIndex, + DropIndex, + CreateSchema, + DropSchema, + _DropView, + CreateColumn, + AddConstraint, + DropConstraint, + DDLBase, + DDLElement, + _CreateDropBase, + _DDLCompiles, + sort_tables, + sort_tables_and_constraints +) diff --git a/app/lib/sqlalchemy/sql/__init__.py b/app/lib/sqlalchemy/sql/__init__.py new file mode 100644 index 0000000..5eebd7d --- /dev/null +++ b/app/lib/sqlalchemy/sql/__init__.py @@ -0,0 +1,98 @@ +# sql/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .expression import ( + Alias, + ClauseElement, + ColumnCollection, + ColumnElement, + CompoundSelect, + Delete, + FromClause, + Insert, + Join, + Select, + Selectable, + TableClause, + TableSample, + Update, + alias, + and_, + any_, + all_, + asc, + between, + bindparam, + case, + cast, + collate, + column, + delete, + desc, + distinct, + except_, + except_all, + exists, + extract, + false, + False_, + func, + funcfilter, + insert, + intersect, + intersect_all, + join, + label, + lateral, + literal, + literal_column, + modifier, + not_, + null, + or_, + outerjoin, + outparam, + over, + select, + subquery, + table, + tablesample, + text, + true, + True_, + tuple_, + type_coerce, + union, + union_all, + update, + within_group +) + +from .visitors import ClauseVisitor + + +def __go(lcls): + global __all__ + from .. import util as _sa_util + + import inspect as _inspect + + __all__ = sorted(name for name, obj in lcls.items() + if not (name.startswith('_') or _inspect.ismodule(obj))) + + from .annotation import _prepare_annotations, Annotated + from .elements import AnnotatedColumnElement, ClauseList + from .selectable import AnnotatedFromClause + _prepare_annotations(ColumnElement, AnnotatedColumnElement) + _prepare_annotations(FromClause, AnnotatedFromClause) + _prepare_annotations(ClauseList, Annotated) + + _sa_util.dependencies.resolve_all("sqlalchemy.sql") + + from . import naming + +__go(locals()) diff --git a/app/lib/sqlalchemy/sql/annotation.py b/app/lib/sqlalchemy/sql/annotation.py new file mode 100644 index 0000000..e6f6311 --- /dev/null +++ b/app/lib/sqlalchemy/sql/annotation.py @@ -0,0 +1,203 @@ +# sql/annotation.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""The :class:`.Annotated` class and related routines; creates hash-equivalent +copies of SQL constructs which contain context-specific markers and +associations. + +""" + +from .. import util +from . import operators + + +class Annotated(object): + """clones a ClauseElement and applies an 'annotations' dictionary. + + Unlike regular clones, this clone also mimics __hash__() and + __cmp__() of the original element so that it takes its place + in hashed collections. + + A reference to the original element is maintained, for the important + reason of keeping its hash value current. When GC'ed, the + hash value may be reused, causing conflicts. + + .. note:: The rationale for Annotated producing a brand new class, + rather than placing the functionality directly within ClauseElement, + is **performance**. The __hash__() method is absent on plain + ClauseElement which leads to significantly reduced function call + overhead, as the use of sets and dictionaries against ClauseElement + objects is prevalent, but most are not "annotated". + + """ + + def __new__(cls, *args): + if not args: + # clone constructor + return object.__new__(cls) + else: + element, values = args + # pull appropriate subclass from registry of annotated + # classes + try: + cls = annotated_classes[element.__class__] + except KeyError: + cls = _new_annotation_type(element.__class__, cls) + return object.__new__(cls) + + def __init__(self, element, values): + self.__dict__ = element.__dict__.copy() + self.__element = element + self._annotations = values + self._hash = hash(element) + + def _annotate(self, values): + _values = self._annotations.copy() + _values.update(values) + return self._with_annotations(_values) + + def _with_annotations(self, values): + clone = self.__class__.__new__(self.__class__) + clone.__dict__ = self.__dict__.copy() + clone._annotations = values + return clone + + def _deannotate(self, values=None, clone=True): + if values is None: + return self.__element + else: + _values = self._annotations.copy() + for v in values: + _values.pop(v, None) + return self._with_annotations(_values) + + def _compiler_dispatch(self, visitor, **kw): + return self.__element.__class__._compiler_dispatch( + self, visitor, **kw) + + @property + def _constructor(self): + return self.__element._constructor + + def _clone(self): + clone = self.__element._clone() + if clone is self.__element: + # detect immutable, don't change anything + return self + else: + # update the clone with any changes that have occurred + # to this object's __dict__. + clone.__dict__.update(self.__dict__) + return self.__class__(clone, self._annotations) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if isinstance(self.__element, operators.ColumnOperators): + return self.__element.__class__.__eq__(self, other) + else: + return hash(other) == hash(self) + + +# hard-generate Annotated subclasses. this technique +# is used instead of on-the-fly types (i.e. type.__new__()) +# so that the resulting objects are pickleable. +annotated_classes = {} + + +def _deep_annotate(element, annotations, exclude=None): + """Deep copy the given ClauseElement, annotating each element + with the given annotations dictionary. + + Elements within the exclude collection will be cloned but not annotated. + + """ + def clone(elem): + if exclude and \ + hasattr(elem, 'proxy_set') and \ + elem.proxy_set.intersection(exclude): + newelem = elem._clone() + elif annotations != elem._annotations: + newelem = elem._annotate(annotations) + else: + newelem = elem + newelem._copy_internals(clone=clone) + return newelem + + if element is not None: + element = clone(element) + return element + + +def _deep_deannotate(element, values=None): + """Deep copy the given element, removing annotations.""" + + cloned = util.column_dict() + + def clone(elem): + # if a values dict is given, + # the elem must be cloned each time it appears, + # as there may be different annotations in source + # elements that are remaining. if totally + # removing all annotations, can assume the same + # slate... + if values or elem not in cloned: + newelem = elem._deannotate(values=values, clone=True) + newelem._copy_internals(clone=clone) + if not values: + cloned[elem] = newelem + return newelem + else: + return cloned[elem] + + if element is not None: + element = clone(element) + return element + + +def _shallow_annotate(element, annotations): + """Annotate the given ClauseElement and copy its internals so that + internal objects refer to the new annotated object. + + Basically used to apply a "dont traverse" annotation to a + selectable, without digging throughout the whole + structure wasting time. + """ + element = element._annotate(annotations) + element._copy_internals() + return element + + +def _new_annotation_type(cls, base_cls): + if issubclass(cls, Annotated): + return cls + elif cls in annotated_classes: + return annotated_classes[cls] + + for super_ in cls.__mro__: + # check if an Annotated subclass more specific than + # the given base_cls is already registered, such + # as AnnotatedColumnElement. + if super_ in annotated_classes: + base_cls = annotated_classes[super_] + break + + annotated_classes[cls] = anno_cls = type( + "Annotated%s" % cls.__name__, + (base_cls, cls), {}) + globals()["Annotated%s" % cls.__name__] = anno_cls + return anno_cls + + +def _prepare_annotations(target_hierarchy, base_cls): + stack = [target_hierarchy] + while stack: + cls = stack.pop() + stack.extend(cls.__subclasses__()) + + _new_annotation_type(cls, base_cls) diff --git a/app/lib/sqlalchemy/sql/base.py b/app/lib/sqlalchemy/sql/base.py new file mode 100644 index 0000000..7a04beb --- /dev/null +++ b/app/lib/sqlalchemy/sql/base.py @@ -0,0 +1,633 @@ +# sql/base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Foundational utilities common to many sql modules. + +""" + + +from .. import util, exc +import itertools +from .visitors import ClauseVisitor +import re +import collections + +PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT') +NO_ARG = util.symbol('NO_ARG') + + +class Immutable(object): + """mark a ClauseElement as 'immutable' when expressions are cloned.""" + + def unique_params(self, *optionaldict, **kwargs): + raise NotImplementedError("Immutable objects do not support copying") + + def params(self, *optionaldict, **kwargs): + raise NotImplementedError("Immutable objects do not support copying") + + def _clone(self): + return self + + +def _from_objects(*elements): + return itertools.chain(*[element._from_objects for element in elements]) + + +@util.decorator +def _generative(fn, *args, **kw): + """Mark a method as generative.""" + + self = args[0]._generate() + fn(self, *args[1:], **kw) + return self + + +class _DialectArgView(collections.MutableMapping): + """A dictionary view of dialect-level arguments in the form + _. + + """ + + def __init__(self, obj): + self.obj = obj + + def _key(self, key): + try: + dialect, value_key = key.split("_", 1) + except ValueError: + raise KeyError(key) + else: + return dialect, value_key + + def __getitem__(self, key): + dialect, value_key = self._key(key) + + try: + opt = self.obj.dialect_options[dialect] + except exc.NoSuchModuleError: + raise KeyError(key) + else: + return opt[value_key] + + def __setitem__(self, key, value): + try: + dialect, value_key = self._key(key) + except KeyError: + raise exc.ArgumentError( + "Keys must be of the form _") + else: + self.obj.dialect_options[dialect][value_key] = value + + def __delitem__(self, key): + dialect, value_key = self._key(key) + del self.obj.dialect_options[dialect][value_key] + + def __len__(self): + return sum(len(args._non_defaults) for args in + self.obj.dialect_options.values()) + + def __iter__(self): + return ( + util.safe_kwarg("%s_%s" % (dialect_name, value_name)) + for dialect_name in self.obj.dialect_options + for value_name in + self.obj.dialect_options[dialect_name]._non_defaults + ) + + +class _DialectArgDict(collections.MutableMapping): + """A dictionary view of dialect-level arguments for a specific + dialect. + + Maintains a separate collection of user-specified arguments + and dialect-specified default arguments. + + """ + + def __init__(self): + self._non_defaults = {} + self._defaults = {} + + def __len__(self): + return len(set(self._non_defaults).union(self._defaults)) + + def __iter__(self): + return iter(set(self._non_defaults).union(self._defaults)) + + def __getitem__(self, key): + if key in self._non_defaults: + return self._non_defaults[key] + else: + return self._defaults[key] + + def __setitem__(self, key, value): + self._non_defaults[key] = value + + def __delitem__(self, key): + del self._non_defaults[key] + + +class DialectKWArgs(object): + """Establish the ability for a class to have dialect-specific arguments + with defaults and constructor validation. + + The :class:`.DialectKWArgs` interacts with the + :attr:`.DefaultDialect.construct_arguments` present on a dialect. + + .. seealso:: + + :attr:`.DefaultDialect.construct_arguments` + + """ + + @classmethod + def argument_for(cls, dialect_name, argument_name, default): + """Add a new kind of dialect-specific keyword argument for this class. + + E.g.:: + + Index.argument_for("mydialect", "length", None) + + some_index = Index('a', 'b', mydialect_length=5) + + The :meth:`.DialectKWArgs.argument_for` method is a per-argument + way adding extra arguments to the + :attr:`.DefaultDialect.construct_arguments` dictionary. This + dictionary provides a list of argument names accepted by various + schema-level constructs on behalf of a dialect. + + New dialects should typically specify this dictionary all at once as a + data member of the dialect class. The use case for ad-hoc addition of + argument names is typically for end-user code that is also using + a custom compilation scheme which consumes the additional arguments. + + :param dialect_name: name of a dialect. The dialect must be + locatable, else a :class:`.NoSuchModuleError` is raised. The + dialect must also include an existing + :attr:`.DefaultDialect.construct_arguments` collection, indicating + that it participates in the keyword-argument validation and default + system, else :class:`.ArgumentError` is raised. If the dialect does + not include this collection, then any keyword argument can be + specified on behalf of this dialect already. All dialects packaged + within SQLAlchemy include this collection, however for third party + dialects, support may vary. + + :param argument_name: name of the parameter. + + :param default: default value of the parameter. + + .. versionadded:: 0.9.4 + + """ + + construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] + if construct_arg_dictionary is None: + raise exc.ArgumentError( + "Dialect '%s' does have keyword-argument " + "validation and defaults enabled configured" % + dialect_name) + if cls not in construct_arg_dictionary: + construct_arg_dictionary[cls] = {} + construct_arg_dictionary[cls][argument_name] = default + + @util.memoized_property + def dialect_kwargs(self): + """A collection of keyword arguments specified as dialect-specific + options to this construct. + + The arguments are present here in their original ``_`` + format. Only arguments that were actually passed are included; + unlike the :attr:`.DialectKWArgs.dialect_options` collection, which + contains all options known by this dialect including defaults. + + The collection is also writable; keys are accepted of the + form ``_`` where the value will be assembled + into the list of options. + + .. versionadded:: 0.9.2 + + .. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs` + collection is now writable. + + .. seealso:: + + :attr:`.DialectKWArgs.dialect_options` - nested dictionary form + + """ + return _DialectArgView(self) + + @property + def kwargs(self): + """A synonym for :attr:`.DialectKWArgs.dialect_kwargs`.""" + return self.dialect_kwargs + + @util.dependencies("sqlalchemy.dialects") + def _kw_reg_for_dialect(dialects, dialect_name): + dialect_cls = dialects.registry.load(dialect_name) + if dialect_cls.construct_arguments is None: + return None + return dict(dialect_cls.construct_arguments) + _kw_registry = util.PopulateDict(_kw_reg_for_dialect) + + def _kw_reg_for_dialect_cls(self, dialect_name): + construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] + d = _DialectArgDict() + + if construct_arg_dictionary is None: + d._defaults.update({"*": None}) + else: + for cls in reversed(self.__class__.__mro__): + if cls in construct_arg_dictionary: + d._defaults.update(construct_arg_dictionary[cls]) + return d + + @util.memoized_property + def dialect_options(self): + """A collection of keyword arguments specified as dialect-specific + options to this construct. + + This is a two-level nested registry, keyed to ```` + and ````. For example, the ``postgresql_where`` + argument would be locatable as:: + + arg = my_object.dialect_options['postgresql']['where'] + + .. versionadded:: 0.9.2 + + .. seealso:: + + :attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form + + """ + + return util.PopulateDict( + util.portable_instancemethod(self._kw_reg_for_dialect_cls) + ) + + def _validate_dialect_kwargs(self, kwargs): + # validate remaining kwargs that they all specify DB prefixes + + if not kwargs: + return + + for k in kwargs: + m = re.match('^(.+?)_(.+)$', k) + if not m: + raise TypeError( + "Additional arguments should be " + "named _, got '%s'" % k) + dialect_name, arg_name = m.group(1, 2) + + try: + construct_arg_dictionary = self.dialect_options[dialect_name] + except exc.NoSuchModuleError: + util.warn( + "Can't validate argument %r; can't " + "locate any SQLAlchemy dialect named %r" % + (k, dialect_name)) + self.dialect_options[dialect_name] = d = _DialectArgDict() + d._defaults.update({"*": None}) + d._non_defaults[arg_name] = kwargs[k] + else: + if "*" not in construct_arg_dictionary and \ + arg_name not in construct_arg_dictionary: + raise exc.ArgumentError( + "Argument %r is not accepted by " + "dialect %r on behalf of %r" % ( + k, + dialect_name, self.__class__ + )) + else: + construct_arg_dictionary[arg_name] = kwargs[k] + + +class Generative(object): + """Allow a ClauseElement to generate itself via the + @_generative decorator. + + """ + + def _generate(self): + s = self.__class__.__new__(self.__class__) + s.__dict__ = self.__dict__.copy() + return s + + +class Executable(Generative): + """Mark a ClauseElement as supporting execution. + + :class:`.Executable` is a superclass for all "statement" types + of objects, including :func:`select`, :func:`delete`, :func:`update`, + :func:`insert`, :func:`text`. + + """ + + supports_execution = True + _execution_options = util.immutabledict() + _bind = None + + @_generative + def execution_options(self, **kw): + """ Set non-SQL options for the statement which take effect during + execution. + + Execution options can be set on a per-statement or + per :class:`.Connection` basis. Additionally, the + :class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide + access to execution options which they in turn configure upon + connections. + + The :meth:`execution_options` method is generative. A new + instance of this statement is returned that contains the options:: + + statement = select([table.c.x, table.c.y]) + statement = statement.execution_options(autocommit=True) + + Note that only a subset of possible execution options can be applied + to a statement - these include "autocommit" and "stream_results", + but not "isolation_level" or "compiled_cache". + See :meth:`.Connection.execution_options` for a full list of + possible options. + + .. seealso:: + + :meth:`.Connection.execution_options()` + + :meth:`.Query.execution_options()` + + """ + if 'isolation_level' in kw: + raise exc.ArgumentError( + "'isolation_level' execution option may only be specified " + "on Connection.execution_options(), or " + "per-engine using the isolation_level " + "argument to create_engine()." + ) + if 'compiled_cache' in kw: + raise exc.ArgumentError( + "'compiled_cache' execution option may only be specified " + "on Connection.execution_options(), not per statement." + ) + self._execution_options = self._execution_options.union(kw) + + def execute(self, *multiparams, **params): + """Compile and execute this :class:`.Executable`.""" + e = self.bind + if e is None: + label = getattr(self, 'description', self.__class__.__name__) + msg = ('This %s is not directly bound to a Connection or Engine.' + 'Use the .execute() method of a Connection or Engine ' + 'to execute this construct.' % label) + raise exc.UnboundExecutionError(msg) + return e._execute_clauseelement(self, multiparams, params) + + def scalar(self, *multiparams, **params): + """Compile and execute this :class:`.Executable`, returning the + result's scalar representation. + + """ + return self.execute(*multiparams, **params).scalar() + + @property + def bind(self): + """Returns the :class:`.Engine` or :class:`.Connection` to + which this :class:`.Executable` is bound, or None if none found. + + This is a traversal which checks locally, then + checks among the "from" clauses of associated objects + until a bound engine or connection is found. + + """ + if self._bind is not None: + return self._bind + + for f in _from_objects(self): + if f is self: + continue + engine = f.bind + if engine is not None: + return engine + else: + return None + + +class SchemaEventTarget(object): + """Base class for elements that are the targets of :class:`.DDLEvents` + events. + + This includes :class:`.SchemaItem` as well as :class:`.SchemaType`. + + """ + + def _set_parent(self, parent): + """Associate with this SchemaEvent's parent object.""" + + def _set_parent_with_dispatch(self, parent): + self.dispatch.before_parent_attach(self, parent) + self._set_parent(parent) + self.dispatch.after_parent_attach(self, parent) + + +class SchemaVisitor(ClauseVisitor): + """Define the visiting for ``SchemaItem`` objects.""" + + __traverse_options__ = {'schema_visitor': True} + + +class ColumnCollection(util.OrderedProperties): + """An ordered dictionary that stores a list of ColumnElement + instances. + + Overrides the ``__eq__()`` method to produce SQL clauses between + sets of correlated columns. + + """ + + __slots__ = '_all_columns' + + def __init__(self, *columns): + super(ColumnCollection, self).__init__() + object.__setattr__(self, '_all_columns', []) + for c in columns: + self.add(c) + + def __str__(self): + return repr([str(c) for c in self]) + + def replace(self, column): + """add the given column to this collection, removing unaliased + versions of this column as well as existing columns with the + same key. + + e.g.:: + + t = Table('sometable', metadata, Column('col1', Integer)) + t.columns.replace(Column('col1', Integer, key='columnone')) + + will remove the original 'col1' from the collection, and add + the new column under the name 'columnname'. + + Used by schema.Column to override columns during table reflection. + + """ + remove_col = None + if column.name in self and column.key != column.name: + other = self[column.name] + if other.name == other.key: + remove_col = other + del self._data[other.key] + + if column.key in self._data: + remove_col = self._data[column.key] + + self._data[column.key] = column + if remove_col is not None: + self._all_columns[:] = [column if c is remove_col + else c for c in self._all_columns] + else: + self._all_columns.append(column) + + def add(self, column): + """Add a column to this collection. + + The key attribute of the column will be used as the hash key + for this dictionary. + + """ + if not column.key: + raise exc.ArgumentError( + "Can't add unnamed column to column collection") + self[column.key] = column + + def __delitem__(self, key): + raise NotImplementedError() + + def __setattr__(self, key, object): + raise NotImplementedError() + + def __setitem__(self, key, value): + if key in self: + + # this warning is primarily to catch select() statements + # which have conflicting column names in their exported + # columns collection + + existing = self[key] + if not existing.shares_lineage(value): + util.warn('Column %r on table %r being replaced by ' + '%r, which has the same key. Consider ' + 'use_labels for select() statements.' % + (key, getattr(existing, 'table', None), value)) + + # pop out memoized proxy_set as this + # operation may very well be occurring + # in a _make_proxy operation + util.memoized_property.reset(value, "proxy_set") + + self._all_columns.append(value) + self._data[key] = value + + def clear(self): + raise NotImplementedError() + + def remove(self, column): + del self._data[column.key] + self._all_columns[:] = [ + c for c in self._all_columns if c is not column] + + def update(self, iter): + cols = list(iter) + all_col_set = set(self._all_columns) + self._all_columns.extend( + c for label, c in cols if c not in all_col_set) + self._data.update((label, c) for label, c in cols) + + def extend(self, iter): + cols = list(iter) + all_col_set = set(self._all_columns) + self._all_columns.extend(c for c in cols if c not in all_col_set) + self._data.update((c.key, c) for c in cols) + + __hash__ = None + + @util.dependencies("sqlalchemy.sql.elements") + def __eq__(self, elements, other): + l = [] + for c in getattr(other, "_all_columns", other): + for local in self._all_columns: + if c.shares_lineage(local): + l.append(c == local) + return elements.and_(*l) + + def __contains__(self, other): + if not isinstance(other, util.string_types): + raise exc.ArgumentError("__contains__ requires a string argument") + return util.OrderedProperties.__contains__(self, other) + + def __getstate__(self): + return {'_data': self._data, + '_all_columns': self._all_columns} + + def __setstate__(self, state): + object.__setattr__(self, '_data', state['_data']) + object.__setattr__(self, '_all_columns', state['_all_columns']) + + def contains_column(self, col): + return col in set(self._all_columns) + + def as_immutable(self): + return ImmutableColumnCollection(self._data, self._all_columns) + + +class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): + def __init__(self, data, all_columns): + util.ImmutableProperties.__init__(self, data) + object.__setattr__(self, '_all_columns', all_columns) + + extend = remove = util.ImmutableProperties._immutable + + +class ColumnSet(util.ordered_column_set): + def contains_column(self, col): + return col in self + + def extend(self, cols): + for col in cols: + self.add(col) + + def __add__(self, other): + return list(self) + list(other) + + @util.dependencies("sqlalchemy.sql.elements") + def __eq__(self, elements, other): + l = [] + for c in other: + for local in self: + if c.shares_lineage(local): + l.append(c == local) + return elements.and_(*l) + + def __hash__(self): + return hash(tuple(x for x in self)) + + +def _bind_or_error(schemaitem, msg=None): + bind = schemaitem.bind + if not bind: + name = schemaitem.__class__.__name__ + label = getattr(schemaitem, 'fullname', + getattr(schemaitem, 'name', None)) + if label: + item = '%s object %r' % (name, label) + else: + item = '%s object' % name + if msg is None: + msg = "%s is not bound to an Engine or Connection. "\ + "Execution can not proceed without a database to execute "\ + "against." % item + raise exc.UnboundExecutionError(msg) + return bind diff --git a/app/lib/sqlalchemy/sql/compiler.py b/app/lib/sqlalchemy/sql/compiler.py new file mode 100644 index 0000000..bfa22c2 --- /dev/null +++ b/app/lib/sqlalchemy/sql/compiler.py @@ -0,0 +1,3034 @@ +# sql/compiler.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Base SQL and DDL compiler implementations. + +Classes provided include: + +:class:`.compiler.SQLCompiler` - renders SQL +strings + +:class:`.compiler.DDLCompiler` - renders DDL +(data definition language) strings + +:class:`.compiler.GenericTypeCompiler` - renders +type specification strings. + +To generate user-defined SQL strings, see +:doc:`/ext/compiler`. + +""" + +import contextlib +import re +from . import schema, sqltypes, operators, functions, visitors, \ + elements, selectable, crud +from .. import util, exc +import itertools + +RESERVED_WORDS = set([ + 'all', 'analyse', 'analyze', 'and', 'any', 'array', + 'as', 'asc', 'asymmetric', 'authorization', 'between', + 'binary', 'both', 'case', 'cast', 'check', 'collate', + 'column', 'constraint', 'create', 'cross', 'current_date', + 'current_role', 'current_time', 'current_timestamp', + 'current_user', 'default', 'deferrable', 'desc', + 'distinct', 'do', 'else', 'end', 'except', 'false', + 'for', 'foreign', 'freeze', 'from', 'full', 'grant', + 'group', 'having', 'ilike', 'in', 'initially', 'inner', + 'intersect', 'into', 'is', 'isnull', 'join', 'leading', + 'left', 'like', 'limit', 'localtime', 'localtimestamp', + 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', + 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', + 'placing', 'primary', 'references', 'right', 'select', + 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', + 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', + 'using', 'verbose', 'when', 'where']) + +LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) +ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$']) + +BIND_PARAMS = re.compile(r'(? ', + operators.ge: ' >= ', + operators.eq: ' = ', + operators.is_distinct_from: ' IS DISTINCT FROM ', + operators.isnot_distinct_from: ' IS NOT DISTINCT FROM ', + operators.concat_op: ' || ', + operators.match_op: ' MATCH ', + operators.notmatch_op: ' NOT MATCH ', + operators.in_op: ' IN ', + operators.notin_op: ' NOT IN ', + operators.comma_op: ', ', + operators.from_: ' FROM ', + operators.as_: ' AS ', + operators.is_: ' IS ', + operators.isnot: ' IS NOT ', + operators.collate: ' COLLATE ', + + # unary + operators.exists: 'EXISTS ', + operators.distinct_op: 'DISTINCT ', + operators.inv: 'NOT ', + operators.any_op: 'ANY ', + operators.all_op: 'ALL ', + + # modifiers + operators.desc_op: ' DESC', + operators.asc_op: ' ASC', + operators.nullsfirst_op: ' NULLS FIRST', + operators.nullslast_op: ' NULLS LAST', + +} + +FUNCTIONS = { + functions.coalesce: 'coalesce%(expr)s', + functions.current_date: 'CURRENT_DATE', + functions.current_time: 'CURRENT_TIME', + functions.current_timestamp: 'CURRENT_TIMESTAMP', + functions.current_user: 'CURRENT_USER', + functions.localtime: 'LOCALTIME', + functions.localtimestamp: 'LOCALTIMESTAMP', + functions.random: 'random%(expr)s', + functions.sysdate: 'sysdate', + functions.session_user: 'SESSION_USER', + functions.user: 'USER' +} + +EXTRACT_MAP = { + 'month': 'month', + 'day': 'day', + 'year': 'year', + 'second': 'second', + 'hour': 'hour', + 'doy': 'doy', + 'minute': 'minute', + 'quarter': 'quarter', + 'dow': 'dow', + 'week': 'week', + 'epoch': 'epoch', + 'milliseconds': 'milliseconds', + 'microseconds': 'microseconds', + 'timezone_hour': 'timezone_hour', + 'timezone_minute': 'timezone_minute' +} + +COMPOUND_KEYWORDS = { + selectable.CompoundSelect.UNION: 'UNION', + selectable.CompoundSelect.UNION_ALL: 'UNION ALL', + selectable.CompoundSelect.EXCEPT: 'EXCEPT', + selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL', + selectable.CompoundSelect.INTERSECT: 'INTERSECT', + selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL' +} + + +class Compiled(object): + + """Represent a compiled SQL or DDL expression. + + The ``__str__`` method of the ``Compiled`` object should produce + the actual text of the statement. ``Compiled`` objects are + specific to their underlying database dialect, and also may + or may not be specific to the columns referenced within a + particular set of bind parameters. In no case should the + ``Compiled`` object be dependent on the actual values of those + bind parameters, even though it may reference those values as + defaults. + """ + + _cached_metadata = None + + execution_options = util.immutabledict() + """ + Execution options propagated from the statement. In some cases, + sub-elements of the statement can modify these. + """ + + def __init__(self, dialect, statement, bind=None, + schema_translate_map=None, + compile_kwargs=util.immutabledict()): + """Construct a new :class:`.Compiled` object. + + :param dialect: :class:`.Dialect` to compile against. + + :param statement: :class:`.ClauseElement` to be compiled. + + :param bind: Optional Engine or Connection to compile this + statement against. + + :param schema_translate_map: dictionary of schema names to be + translated when forming the resultant SQL + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`schema_translating` + + :param compile_kwargs: additional kwargs that will be + passed to the initial call to :meth:`.Compiled.process`. + + + """ + + self.dialect = dialect + self.bind = bind + self.preparer = self.dialect.identifier_preparer + if schema_translate_map: + self.preparer = self.preparer._with_schema_translate( + schema_translate_map) + + if statement is not None: + self.statement = statement + self.can_execute = statement.supports_execution + if self.can_execute: + self.execution_options = statement._execution_options + self.string = self.process(self.statement, **compile_kwargs) + + @util.deprecated("0.7", ":class:`.Compiled` objects now compile " + "within the constructor.") + def compile(self): + """Produce the internal string representation of this element. + """ + pass + + def _execute_on_connection(self, connection, multiparams, params): + if self.can_execute: + return connection._execute_compiled(self, multiparams, params) + else: + raise exc.ObjectNotExecutableError(self.statement) + + @property + def sql_compiler(self): + """Return a Compiled that is capable of processing SQL expressions. + + If this compiler is one, it would likely just return 'self'. + + """ + + raise NotImplementedError() + + def process(self, obj, **kwargs): + return obj._compiler_dispatch(self, **kwargs) + + def __str__(self): + """Return the string text of the generated SQL or DDL.""" + + return self.string or '' + + def construct_params(self, params=None): + """Return the bind params for this compiled object. + + :param params: a dict of string/object pairs whose values will + override bind values compiled in to the + statement. + """ + + raise NotImplementedError() + + @property + def params(self): + """Return the bind params for this compiled object.""" + return self.construct_params() + + def execute(self, *multiparams, **params): + """Execute this compiled object.""" + + e = self.bind + if e is None: + raise exc.UnboundExecutionError( + "This Compiled object is not bound to any Engine " + "or Connection.") + return e._execute_compiled(self, multiparams, params) + + def scalar(self, *multiparams, **params): + """Execute this compiled object and return the result's + scalar value.""" + + return self.execute(*multiparams, **params).scalar() + + +class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)): + """Produces DDL specification for TypeEngine objects.""" + + ensure_kwarg = r'visit_\w+' + + def __init__(self, dialect): + self.dialect = dialect + + def process(self, type_, **kw): + return type_._compiler_dispatch(self, **kw) + + +class _CompileLabel(visitors.Visitable): + + """lightweight label object which acts as an expression.Label.""" + + __visit_name__ = 'label' + __slots__ = 'element', 'name' + + def __init__(self, col, name, alt_names=()): + self.element = col + self.name = name + self._alt_names = (col,) + alt_names + + @property + def proxy_set(self): + return self.element.proxy_set + + @property + def type(self): + return self.element.type + + def self_group(self, **kw): + return self + + +class SQLCompiler(Compiled): + """Default implementation of :class:`.Compiled`. + + Compiles :class:`.ClauseElement` objects into SQL strings. + + """ + + extract_map = EXTRACT_MAP + + compound_keywords = COMPOUND_KEYWORDS + + isdelete = isinsert = isupdate = False + """class-level defaults which can be set at the instance + level to define if this Compiled instance represents + INSERT/UPDATE/DELETE + """ + + isplaintext = False + + returning = None + """holds the "returning" collection of columns if + the statement is CRUD and defines returning columns + either implicitly or explicitly + """ + + returning_precedes_values = False + """set to True classwide to generate RETURNING + clauses before the VALUES or WHERE clause (i.e. MSSQL) + """ + + render_table_with_column_in_update_from = False + """set to True classwide to indicate the SET clause + in a multi-table UPDATE statement should qualify + columns with the table name (i.e. MySQL only) + """ + + ansi_bind_rules = False + """SQL 92 doesn't allow bind parameters to be used + in the columns clause of a SELECT, nor does it allow + ambiguous expressions like "? = ?". A compiler + subclass can set this flag to False if the target + driver/DB enforces this + """ + + _textual_ordered_columns = False + """tell the result object that the column names as rendered are important, + but they are also "ordered" vs. what is in the compiled object here. + """ + + _ordered_columns = True + """ + if False, means we can't be sure the list of entries + in _result_columns is actually the rendered order. Usually + True unless using an unordered TextAsFrom. + """ + + insert_prefetch = update_prefetch = () + + + def __init__(self, dialect, statement, column_keys=None, + inline=False, **kwargs): + """Construct a new :class:`.SQLCompiler` object. + + :param dialect: :class:`.Dialect` to be used + + :param statement: :class:`.ClauseElement` to be compiled + + :param column_keys: a list of column names to be compiled into an + INSERT or UPDATE statement. + + :param inline: whether to generate INSERT statements as "inline", e.g. + not formatted to return any generated defaults + + :param kwargs: additional keyword arguments to be consumed by the + superclass. + + """ + self.column_keys = column_keys + + # compile INSERT/UPDATE defaults/sequences inlined (no pre- + # execute) + self.inline = inline or getattr(statement, 'inline', False) + + # a dictionary of bind parameter keys to BindParameter + # instances. + self.binds = {} + + # a dictionary of BindParameter instances to "compiled" names + # that are actually present in the generated SQL + self.bind_names = util.column_dict() + + # stack which keeps track of nested SELECT statements + self.stack = [] + + # relates label names in the final SQL to a tuple of local + # column/label name, ColumnElement object (if any) and + # TypeEngine. ResultProxy uses this for type processing and + # column targeting + self._result_columns = [] + + # true if the paramstyle is positional + self.positional = dialect.positional + if self.positional: + self.positiontup = [] + self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] + + self.ctes = None + + self.label_length = dialect.label_length \ + or dialect.max_identifier_length + + # a map which tracks "anonymous" identifiers that are created on + # the fly here + self.anon_map = util.PopulateDict(self._process_anon) + + # a map which tracks "truncated" names based on + # dialect.label_length or dialect.max_identifier_length + self.truncated_names = {} + Compiled.__init__(self, dialect, statement, **kwargs) + + if ( + self.isinsert or self.isupdate or self.isdelete + ) and statement._returning: + self.returning = statement._returning + + if self.positional and dialect.paramstyle == 'numeric': + self._apply_numbered_params() + + @property + def prefetch(self): + return list(self.insert_prefetch + self.update_prefetch) + + @util.memoized_instancemethod + def _init_cte_state(self): + """Initialize collections related to CTEs only if + a CTE is located, to save on the overhead of + these collections otherwise. + + """ + # collect CTEs to tack on top of a SELECT + self.ctes = util.OrderedDict() + self.ctes_by_name = {} + self.ctes_recursive = False + if self.positional: + self.cte_positional = {} + + @contextlib.contextmanager + def _nested_result(self): + """special API to support the use case of 'nested result sets'""" + result_columns, ordered_columns = ( + self._result_columns, self._ordered_columns) + self._result_columns, self._ordered_columns = [], False + + try: + if self.stack: + entry = self.stack[-1] + entry['need_result_map_for_nested'] = True + else: + entry = None + yield self._result_columns, self._ordered_columns + finally: + if entry: + entry.pop('need_result_map_for_nested') + self._result_columns, self._ordered_columns = ( + result_columns, ordered_columns) + + def _apply_numbered_params(self): + poscount = itertools.count(1) + self.string = re.sub( + r'\[_POSITION\]', + lambda m: str(util.next(poscount)), + self.string) + + @util.memoized_property + def _bind_processors(self): + return dict( + (key, value) for key, value in + ((self.bind_names[bindparam], + bindparam.type._cached_bind_processor(self.dialect)) + for bindparam in self.bind_names) + if value is not None + ) + + def is_subquery(self): + return len(self.stack) > 1 + + @property + def sql_compiler(self): + return self + + def construct_params(self, params=None, _group_number=None, _check=True): + """return a dictionary of bind parameter keys and values""" + + if params: + pd = {} + for bindparam in self.bind_names: + name = self.bind_names[bindparam] + if bindparam.key in params: + pd[name] = params[bindparam.key] + elif name in params: + pd[name] = params[name] + + elif _check and bindparam.required: + if _group_number: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r, " + "in parameter group %d" % + (bindparam.key, _group_number)) + else: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r" + % bindparam.key) + + elif bindparam.callable: + pd[name] = bindparam.effective_value + else: + pd[name] = bindparam.value + return pd + else: + pd = {} + for bindparam in self.bind_names: + if _check and bindparam.required: + if _group_number: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r, " + "in parameter group %d" % + (bindparam.key, _group_number)) + else: + raise exc.InvalidRequestError( + "A value is required for bind parameter %r" + % bindparam.key) + + if bindparam.callable: + pd[self.bind_names[bindparam]] = bindparam.effective_value + else: + pd[self.bind_names[bindparam]] = bindparam.value + return pd + + @property + def params(self): + """Return the bind param dictionary embedded into this + compiled object, for those values that are present.""" + return self.construct_params(_check=False) + + @util.dependencies("sqlalchemy.engine.result") + def _create_result_map(self, result): + """utility method used for unit tests only.""" + return result.ResultMetaData._create_result_map(self._result_columns) + + def default_from(self): + """Called when a SELECT statement has no froms, and no FROM clause is + to be appended. + + Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. + + """ + return "" + + def visit_grouping(self, grouping, asfrom=False, **kwargs): + return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" + + def visit_label_reference( + self, element, within_columns_clause=False, **kwargs): + if self.stack and self.dialect.supports_simple_order_by_label: + selectable = self.stack[-1]['selectable'] + + with_cols, only_froms, only_cols = selectable._label_resolve_dict + if within_columns_clause: + resolve_dict = only_froms + else: + resolve_dict = only_cols + + # this can be None in the case that a _label_reference() + # were subject to a replacement operation, in which case + # the replacement of the Label element may have changed + # to something else like a ColumnClause expression. + order_by_elem = element.element._order_by_label_element + + if order_by_elem is not None and order_by_elem.name in \ + resolve_dict and \ + order_by_elem.shares_lineage( + resolve_dict[order_by_elem.name]): + kwargs['render_label_as_label'] = \ + element.element._order_by_label_element + return self.process( + element.element, within_columns_clause=within_columns_clause, + **kwargs) + + def visit_textual_label_reference( + self, element, within_columns_clause=False, **kwargs): + if not self.stack: + # compiling the element outside of the context of a SELECT + return self.process( + element._text_clause + ) + + selectable = self.stack[-1]['selectable'] + with_cols, only_froms, only_cols = selectable._label_resolve_dict + try: + if within_columns_clause: + col = only_froms[element.element] + else: + col = with_cols[element.element] + except KeyError: + # treat it like text() + util.warn_limited( + "Can't resolve label reference %r; converting to text()", + util.ellipses_string(element.element)) + return self.process( + element._text_clause + ) + else: + kwargs['render_label_as_label'] = col + return self.process( + col, within_columns_clause=within_columns_clause, **kwargs) + + def visit_label(self, label, + add_to_result_map=None, + within_label_clause=False, + within_columns_clause=False, + render_label_as_label=None, + **kw): + # only render labels within the columns clause + # or ORDER BY clause of a select. dialect-specific compilers + # can modify this behavior. + render_label_with_as = (within_columns_clause and not + within_label_clause) + render_label_only = render_label_as_label is label + + if render_label_only or render_label_with_as: + if isinstance(label.name, elements._truncated_label): + labelname = self._truncated_identifier("colident", label.name) + else: + labelname = label.name + + if render_label_with_as: + if add_to_result_map is not None: + add_to_result_map( + labelname, + label.name, + (label, labelname, ) + label._alt_names, + label.type + ) + + return label.element._compiler_dispatch( + self, within_columns_clause=True, + within_label_clause=True, **kw) + \ + OPERATORS[operators.as_] + \ + self.preparer.format_label(label, labelname) + elif render_label_only: + return self.preparer.format_label(label, labelname) + else: + return label.element._compiler_dispatch( + self, within_columns_clause=False, **kw) + + def _fallback_column_name(self, column): + raise exc.CompileError("Cannot compile Column object until " + "its 'name' is assigned.") + + def visit_column(self, column, add_to_result_map=None, + include_table=True, **kwargs): + name = orig_name = column.name + if name is None: + name = self._fallback_column_name(column) + + is_literal = column.is_literal + if not is_literal and isinstance(name, elements._truncated_label): + name = self._truncated_identifier("colident", name) + + if add_to_result_map is not None: + add_to_result_map( + name, + orig_name, + (column, name, column.key), + column.type + ) + + if is_literal: + name = self.escape_literal_column(name) + else: + name = self.preparer.quote(name) + + table = column.table + if table is None or not include_table or not table.named_with_column: + return name + else: + effective_schema = self.preparer.schema_for_object(table) + + if effective_schema: + schema_prefix = self.preparer.quote_schema( + effective_schema) + '.' + else: + schema_prefix = '' + tablename = table.name + if isinstance(tablename, elements._truncated_label): + tablename = self._truncated_identifier("alias", tablename) + + return schema_prefix + \ + self.preparer.quote(tablename) + \ + "." + name + + def escape_literal_column(self, text): + """provide escaping for the literal_column() construct.""" + + # TODO: some dialects might need different behavior here + return text.replace('%', '%%') + + def visit_fromclause(self, fromclause, **kwargs): + return fromclause.name + + def visit_index(self, index, **kwargs): + return index.name + + def visit_typeclause(self, typeclause, **kw): + kw['type_expression'] = typeclause + return self.dialect.type_compiler.process(typeclause.type, **kw) + + def post_process_text(self, text): + return text + + def visit_textclause(self, textclause, **kw): + def do_bindparam(m): + name = m.group(1) + if name in textclause._bindparams: + return self.process(textclause._bindparams[name], **kw) + else: + return self.bindparam_string(name, **kw) + + if not self.stack: + self.isplaintext = True + + # un-escape any \:params + return BIND_PARAMS_ESC.sub( + lambda m: m.group(1), + BIND_PARAMS.sub( + do_bindparam, + self.post_process_text(textclause.text)) + ) + + def visit_text_as_from(self, taf, + compound_index=None, + asfrom=False, + parens=True, **kw): + + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + + populate_result_map = toplevel or \ + ( + compound_index == 0 and entry.get( + 'need_result_map_for_compound', False) + ) or entry.get('need_result_map_for_nested', False) + + if populate_result_map: + self._ordered_columns = \ + self._textual_ordered_columns = taf.positional + for c in taf.column_args: + self.process(c, within_columns_clause=True, + add_to_result_map=self._add_to_result_map) + + text = self.process(taf.element, **kw) + if asfrom and parens: + text = "(%s)" % text + return text + + def visit_null(self, expr, **kw): + return 'NULL' + + def visit_true(self, expr, **kw): + if self.dialect.supports_native_boolean: + return 'true' + else: + return "1" + + def visit_false(self, expr, **kw): + if self.dialect.supports_native_boolean: + return 'false' + else: + return "0" + + def visit_clauselist(self, clauselist, **kw): + sep = clauselist.operator + if sep is None: + sep = " " + else: + sep = OPERATORS[clauselist.operator] + return sep.join( + s for s in + ( + c._compiler_dispatch(self, **kw) + for c in clauselist.clauses) + if s) + + def visit_case(self, clause, **kwargs): + x = "CASE " + if clause.value is not None: + x += clause.value._compiler_dispatch(self, **kwargs) + " " + for cond, result in clause.whens: + x += "WHEN " + cond._compiler_dispatch( + self, **kwargs + ) + " THEN " + result._compiler_dispatch( + self, **kwargs) + " " + if clause.else_ is not None: + x += "ELSE " + clause.else_._compiler_dispatch( + self, **kwargs + ) + " " + x += "END" + return x + + def visit_type_coerce(self, type_coerce, **kw): + return type_coerce.typed_expression._compiler_dispatch(self, **kw) + + def visit_cast(self, cast, **kwargs): + return "CAST(%s AS %s)" % \ + (cast.clause._compiler_dispatch(self, **kwargs), + cast.typeclause._compiler_dispatch(self, **kwargs)) + + def _format_frame_clause(self, range_, **kw): + return '%s AND %s' % ( + "UNBOUNDED PRECEDING" + if range_[0] is elements.RANGE_UNBOUNDED + else "CURRENT ROW" if range_[0] is elements.RANGE_CURRENT + else "%s PRECEDING" % (self.process(range_[0], **kw), ), + + "UNBOUNDED FOLLOWING" + if range_[1] is elements.RANGE_UNBOUNDED + else "CURRENT ROW" if range_[1] is elements.RANGE_CURRENT + else "%s FOLLOWING" % (self.process(range_[1], **kw), ) + ) + + def visit_over(self, over, **kwargs): + if over.range_: + range_ = "RANGE BETWEEN %s" % self._format_frame_clause( + over.range_, **kwargs) + elif over.rows: + range_ = "ROWS BETWEEN %s" % self._format_frame_clause( + over.rows, **kwargs) + else: + range_ = None + + return "%s OVER (%s)" % ( + over.element._compiler_dispatch(self, **kwargs), + ' '.join([ + '%s BY %s' % ( + word, clause._compiler_dispatch(self, **kwargs) + ) + for word, clause in ( + ('PARTITION', over.partition_by), + ('ORDER', over.order_by) + ) + if clause is not None and len(clause) + ] + ([range_] if range_ else []) + ) + ) + + def visit_withingroup(self, withingroup, **kwargs): + return "%s WITHIN GROUP (ORDER BY %s)" % ( + withingroup.element._compiler_dispatch(self, **kwargs), + withingroup.order_by._compiler_dispatch(self, **kwargs) + ) + + def visit_funcfilter(self, funcfilter, **kwargs): + return "%s FILTER (WHERE %s)" % ( + funcfilter.func._compiler_dispatch(self, **kwargs), + funcfilter.criterion._compiler_dispatch(self, **kwargs) + ) + + def visit_extract(self, extract, **kwargs): + field = self.extract_map.get(extract.field, extract.field) + return "EXTRACT(%s FROM %s)" % ( + field, extract.expr._compiler_dispatch(self, **kwargs)) + + def visit_function(self, func, add_to_result_map=None, **kwargs): + if add_to_result_map is not None: + add_to_result_map( + func.name, func.name, (), func.type + ) + + disp = getattr(self, "visit_%s_func" % func.name.lower(), None) + if disp: + return disp(func, **kwargs) + else: + name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") + return ".".join(list(func.packagenames) + [name]) % \ + {'expr': self.function_argspec(func, **kwargs)} + + def visit_next_value_func(self, next_value, **kw): + return self.visit_sequence(next_value.sequence) + + def visit_sequence(self, sequence): + raise NotImplementedError( + "Dialect '%s' does not support sequence increments." % + self.dialect.name + ) + + def function_argspec(self, func, **kwargs): + return func.clause_expr._compiler_dispatch(self, **kwargs) + + def visit_compound_select(self, cs, asfrom=False, + parens=True, compound_index=0, **kwargs): + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + need_result_map = toplevel or \ + (compound_index == 0 + and entry.get('need_result_map_for_compound', False)) + + self.stack.append( + { + 'correlate_froms': entry['correlate_froms'], + 'asfrom_froms': entry['asfrom_froms'], + 'selectable': cs, + 'need_result_map_for_compound': need_result_map + }) + + keyword = self.compound_keywords.get(cs.keyword) + + text = (" " + keyword + " ").join( + (c._compiler_dispatch(self, + asfrom=asfrom, parens=False, + compound_index=i, **kwargs) + for i, c in enumerate(cs.selects)) + ) + + group_by = cs._group_by_clause._compiler_dispatch( + self, asfrom=asfrom, **kwargs) + if group_by: + text += " GROUP BY " + group_by + + text += self.order_by_clause(cs, **kwargs) + text += (cs._limit_clause is not None + or cs._offset_clause is not None) and \ + self.limit_clause(cs, **kwargs) or "" + + if self.ctes and toplevel: + text = self._render_cte_clause() + text + + self.stack.pop(-1) + if asfrom and parens: + return "(" + text + ")" + else: + return text + + def _get_operator_dispatch(self, operator_, qualifier1, qualifier2): + attrname = "visit_%s_%s%s" % ( + operator_.__name__, qualifier1, + "_" + qualifier2 if qualifier2 else "") + return getattr(self, attrname, None) + + def visit_unary(self, unary, **kw): + if unary.operator: + if unary.modifier: + raise exc.CompileError( + "Unary expression does not support operator " + "and modifier simultaneously") + disp = self._get_operator_dispatch( + unary.operator, "unary", "operator") + if disp: + return disp(unary, unary.operator, **kw) + else: + return self._generate_generic_unary_operator( + unary, OPERATORS[unary.operator], **kw) + elif unary.modifier: + disp = self._get_operator_dispatch( + unary.modifier, "unary", "modifier") + if disp: + return disp(unary, unary.modifier, **kw) + else: + return self._generate_generic_unary_modifier( + unary, OPERATORS[unary.modifier], **kw) + else: + raise exc.CompileError( + "Unary expression has no operator or modifier") + + def visit_istrue_unary_operator(self, element, operator, **kw): + if self.dialect.supports_native_boolean: + return self.process(element.element, **kw) + else: + return "%s = 1" % self.process(element.element, **kw) + + def visit_isfalse_unary_operator(self, element, operator, **kw): + if self.dialect.supports_native_boolean: + return "NOT %s" % self.process(element.element, **kw) + else: + return "%s = 0" % self.process(element.element, **kw) + + def visit_notmatch_op_binary(self, binary, operator, **kw): + return "NOT %s" % self.visit_binary( + binary, override_operator=operators.match_op) + + def visit_binary(self, binary, override_operator=None, + eager_grouping=False, **kw): + + # don't allow "? = ?" to render + if self.ansi_bind_rules and \ + isinstance(binary.left, elements.BindParameter) and \ + isinstance(binary.right, elements.BindParameter): + kw['literal_binds'] = True + + operator_ = override_operator or binary.operator + disp = self._get_operator_dispatch(operator_, "binary", None) + if disp: + return disp(binary, operator_, **kw) + else: + try: + opstring = OPERATORS[operator_] + except KeyError: + raise exc.UnsupportedCompilationError(self, operator_) + else: + return self._generate_generic_binary(binary, opstring, **kw) + + def visit_custom_op_binary(self, element, operator, **kw): + kw['eager_grouping'] = operator.eager_grouping + return self._generate_generic_binary( + element, " " + operator.opstring + " ", **kw) + + def visit_custom_op_unary_operator(self, element, operator, **kw): + return self._generate_generic_unary_operator( + element, operator.opstring + " ", **kw) + + def visit_custom_op_unary_modifier(self, element, operator, **kw): + return self._generate_generic_unary_modifier( + element, " " + operator.opstring, **kw) + + def _generate_generic_binary( + self, binary, opstring, eager_grouping=False, **kw): + + _in_binary = kw.get('_in_binary', False) + + kw['_in_binary'] = True + text = binary.left._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw) + \ + opstring + \ + binary.right._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw) + + if _in_binary and eager_grouping: + text = "(%s)" % text + return text + + def _generate_generic_unary_operator(self, unary, opstring, **kw): + return opstring + unary.element._compiler_dispatch(self, **kw) + + def _generate_generic_unary_modifier(self, unary, opstring, **kw): + return unary.element._compiler_dispatch(self, **kw) + opstring + + @util.memoized_property + def _like_percent_literal(self): + return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE) + + def visit_contains_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.__add__(binary.right).__add__(percent) + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_notcontains_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.__add__(binary.right).__add__(percent) + return self.visit_notlike_op_binary(binary, operator, **kw) + + def visit_startswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.__radd__( + binary.right + ) + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_notstartswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.__radd__( + binary.right + ) + return self.visit_notlike_op_binary(binary, operator, **kw) + + def visit_endswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.__add__(binary.right) + return self.visit_like_op_binary(binary, operator, **kw) + + def visit_notendswith_op_binary(self, binary, operator, **kw): + binary = binary._clone() + percent = self._like_percent_literal + binary.right = percent.__add__(binary.right) + return self.visit_notlike_op_binary(binary, operator, **kw) + + def visit_like_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + + # TODO: use ternary here, not "and"/ "or" + return '%s LIKE %s' % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw)) \ + + ( + ' ESCAPE ' + + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape else '' + ) + + def visit_notlike_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + return '%s NOT LIKE %s' % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw)) \ + + ( + ' ESCAPE ' + + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape else '' + ) + + def visit_ilike_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + return 'lower(%s) LIKE lower(%s)' % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw)) \ + + ( + ' ESCAPE ' + + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape else '' + ) + + def visit_notilike_op_binary(self, binary, operator, **kw): + escape = binary.modifiers.get("escape", None) + return 'lower(%s) NOT LIKE lower(%s)' % ( + binary.left._compiler_dispatch(self, **kw), + binary.right._compiler_dispatch(self, **kw)) \ + + ( + ' ESCAPE ' + + self.render_literal_value(escape, sqltypes.STRINGTYPE) + if escape else '' + ) + + def visit_between_op_binary(self, binary, operator, **kw): + symmetric = binary.modifiers.get("symmetric", False) + return self._generate_generic_binary( + binary, " BETWEEN SYMMETRIC " + if symmetric else " BETWEEN ", **kw) + + def visit_notbetween_op_binary(self, binary, operator, **kw): + symmetric = binary.modifiers.get("symmetric", False) + return self._generate_generic_binary( + binary, " NOT BETWEEN SYMMETRIC " + if symmetric else " NOT BETWEEN ", **kw) + + def visit_bindparam(self, bindparam, within_columns_clause=False, + literal_binds=False, + skip_bind_expression=False, + **kwargs): + if not skip_bind_expression and bindparam.type._has_bind_expression: + bind_expression = bindparam.type.bind_expression(bindparam) + return self.process(bind_expression, + skip_bind_expression=True) + + if literal_binds or \ + (within_columns_clause and + self.ansi_bind_rules): + if bindparam.value is None and bindparam.callable is None: + raise exc.CompileError("Bind parameter '%s' without a " + "renderable value not allowed here." + % bindparam.key) + return self.render_literal_bindparam( + bindparam, within_columns_clause=True, **kwargs) + + name = self._truncate_bindparam(bindparam) + + if name in self.binds: + existing = self.binds[name] + if existing is not bindparam: + if (existing.unique or bindparam.unique) and \ + not existing.proxy_set.intersection( + bindparam.proxy_set): + raise exc.CompileError( + "Bind parameter '%s' conflicts with " + "unique bind parameter of the same name" % + bindparam.key + ) + elif existing._is_crud or bindparam._is_crud: + raise exc.CompileError( + "bindparam() name '%s' is reserved " + "for automatic usage in the VALUES or SET " + "clause of this " + "insert/update statement. Please use a " + "name other than column name when using bindparam() " + "with insert() or update() (for example, 'b_%s')." % + (bindparam.key, bindparam.key) + ) + + self.binds[bindparam.key] = self.binds[name] = bindparam + + return self.bindparam_string(name, **kwargs) + + def render_literal_bindparam(self, bindparam, **kw): + value = bindparam.effective_value + return self.render_literal_value(value, bindparam.type) + + def render_literal_value(self, value, type_): + """Render the value of a bind parameter as a quoted literal. + + This is used for statement sections that do not accept bind parameters + on the target driver/database. + + This should be implemented by subclasses using the quoting services + of the DBAPI. + + """ + + processor = type_._cached_literal_processor(self.dialect) + if processor: + return processor(value) + else: + raise NotImplementedError( + "Don't know how to literal-quote value %r" % value) + + def _truncate_bindparam(self, bindparam): + if bindparam in self.bind_names: + return self.bind_names[bindparam] + + bind_name = bindparam.key + if isinstance(bind_name, elements._truncated_label): + bind_name = self._truncated_identifier("bindparam", bind_name) + + # add to bind_names for translation + self.bind_names[bindparam] = bind_name + + return bind_name + + def _truncated_identifier(self, ident_class, name): + if (ident_class, name) in self.truncated_names: + return self.truncated_names[(ident_class, name)] + + anonname = name.apply_map(self.anon_map) + + if len(anonname) > self.label_length - 6: + counter = self.truncated_names.get(ident_class, 1) + truncname = anonname[0:max(self.label_length - 6, 0)] + \ + "_" + hex(counter)[2:] + self.truncated_names[ident_class] = counter + 1 + else: + truncname = anonname + self.truncated_names[(ident_class, name)] = truncname + return truncname + + def _anonymize(self, name): + return name % self.anon_map + + def _process_anon(self, key): + (ident, derived) = key.split(' ', 1) + anonymous_counter = self.anon_map.get(derived, 1) + self.anon_map[derived] = anonymous_counter + 1 + return derived + "_" + str(anonymous_counter) + + def bindparam_string(self, name, positional_names=None, **kw): + if self.positional: + if positional_names is not None: + positional_names.append(name) + else: + self.positiontup.append(name) + return self.bindtemplate % {'name': name} + + def visit_cte(self, cte, asfrom=False, ashint=False, + fromhints=None, + **kwargs): + self._init_cte_state() + + if isinstance(cte.name, elements._truncated_label): + cte_name = self._truncated_identifier("alias", cte.name) + else: + cte_name = cte.name + + if cte_name in self.ctes_by_name: + existing_cte = self.ctes_by_name[cte_name] + # we've generated a same-named CTE that we are enclosed in, + # or this is the same CTE. just return the name. + if cte in existing_cte._restates or cte is existing_cte: + return self.preparer.format_alias(cte, cte_name) + elif existing_cte in cte._restates: + # we've generated a same-named CTE that is + # enclosed in us - we take precedence, so + # discard the text for the "inner". + del self.ctes[existing_cte] + else: + raise exc.CompileError( + "Multiple, unrelated CTEs found with " + "the same name: %r" % + cte_name) + + self.ctes_by_name[cte_name] = cte + + # look for embedded DML ctes and propagate autocommit + if 'autocommit' in cte.element._execution_options and \ + 'autocommit' not in self.execution_options: + self.execution_options = self.execution_options.union( + {"autocommit": cte.element._execution_options['autocommit']}) + + if cte._cte_alias is not None: + orig_cte = cte._cte_alias + if orig_cte not in self.ctes: + self.visit_cte(orig_cte, **kwargs) + cte_alias_name = cte._cte_alias.name + if isinstance(cte_alias_name, elements._truncated_label): + cte_alias_name = self._truncated_identifier( + "alias", cte_alias_name) + else: + orig_cte = cte + cte_alias_name = None + if not cte_alias_name and cte not in self.ctes: + if cte.recursive: + self.ctes_recursive = True + text = self.preparer.format_alias(cte, cte_name) + if cte.recursive: + if isinstance(cte.original, selectable.Select): + col_source = cte.original + elif isinstance(cte.original, selectable.CompoundSelect): + col_source = cte.original.selects[0] + else: + assert False + recur_cols = [c for c in + util.unique_list(col_source.inner_columns) + if c is not None] + + text += "(%s)" % (", ".join( + self.preparer.format_column(ident) + for ident in recur_cols)) + + if self.positional: + kwargs['positional_names'] = self.cte_positional[cte] = [] + + text += " AS \n" + \ + cte.original._compiler_dispatch( + self, asfrom=True, **kwargs + ) + + if cte._suffixes: + text += " " + self._generate_prefixes( + cte, cte._suffixes, **kwargs) + + self.ctes[cte] = text + + if asfrom: + if cte_alias_name: + text = self.preparer.format_alias(cte, cte_alias_name) + text += self.get_render_as_alias_suffix(cte_name) + else: + return self.preparer.format_alias(cte, cte_name) + return text + + def visit_alias(self, alias, asfrom=False, ashint=False, + iscrud=False, + fromhints=None, **kwargs): + if asfrom or ashint: + if isinstance(alias.name, elements._truncated_label): + alias_name = self._truncated_identifier("alias", alias.name) + else: + alias_name = alias.name + + if ashint: + return self.preparer.format_alias(alias, alias_name) + elif asfrom: + ret = alias.original._compiler_dispatch(self, + asfrom=True, **kwargs) + \ + self.get_render_as_alias_suffix( + self.preparer.format_alias(alias, alias_name)) + + if fromhints and alias in fromhints: + ret = self.format_from_hint_text(ret, alias, + fromhints[alias], iscrud) + + return ret + else: + return alias.original._compiler_dispatch(self, **kwargs) + + def visit_lateral(self, lateral, **kw): + kw['lateral'] = True + return "LATERAL %s" % self.visit_alias(lateral, **kw) + + def visit_tablesample(self, tablesample, asfrom=False, **kw): + text = "%s TABLESAMPLE %s" % ( + self.visit_alias(tablesample, asfrom=True, **kw), + tablesample._get_method()._compiler_dispatch(self, **kw)) + + if tablesample.seed is not None: + text += " REPEATABLE (%s)" % ( + tablesample.seed._compiler_dispatch(self, **kw)) + + return text + + def get_render_as_alias_suffix(self, alias_name_text): + return " AS " + alias_name_text + + def _add_to_result_map(self, keyname, name, objects, type_): + self._result_columns.append((keyname, name, objects, type_)) + + def _label_select_column(self, select, column, + populate_result_map, + asfrom, column_clause_args, + name=None, + within_columns_clause=True): + """produce labeled columns present in a select().""" + + if column.type._has_column_expression and \ + populate_result_map: + col_expr = column.type.column_expression(column) + add_to_result_map = lambda keyname, name, objects, type_: \ + self._add_to_result_map( + keyname, name, + (column,) + objects, type_) + else: + col_expr = column + if populate_result_map: + add_to_result_map = self._add_to_result_map + else: + add_to_result_map = None + + if not within_columns_clause: + result_expr = col_expr + elif isinstance(column, elements.Label): + if col_expr is not column: + result_expr = _CompileLabel( + col_expr, + column.name, + alt_names=(column.element,) + ) + else: + result_expr = col_expr + + elif select is not None and name: + result_expr = _CompileLabel( + col_expr, + name, + alt_names=(column._key_label,) + ) + + elif \ + asfrom and \ + isinstance(column, elements.ColumnClause) and \ + not column.is_literal and \ + column.table is not None and \ + not isinstance(column.table, selectable.Select): + result_expr = _CompileLabel(col_expr, + elements._as_truncated(column.name), + alt_names=(column.key,)) + elif ( + not isinstance(column, elements.TextClause) and + ( + not isinstance(column, elements.UnaryExpression) or + column.wraps_column_expression + ) and + ( + not hasattr(column, 'name') or + isinstance(column, functions.Function) + ) + ): + result_expr = _CompileLabel(col_expr, column.anon_label) + elif col_expr is not column: + # TODO: are we sure "column" has a .name and .key here ? + # assert isinstance(column, elements.ColumnClause) + result_expr = _CompileLabel(col_expr, + elements._as_truncated(column.name), + alt_names=(column.key,)) + else: + result_expr = col_expr + + column_clause_args.update( + within_columns_clause=within_columns_clause, + add_to_result_map=add_to_result_map + ) + return result_expr._compiler_dispatch( + self, + **column_clause_args + ) + + def format_from_hint_text(self, sqltext, table, hint, iscrud): + hinttext = self.get_from_hint_text(table, hint) + if hinttext: + sqltext += " " + hinttext + return sqltext + + def get_select_hint_text(self, byfroms): + return None + + def get_from_hint_text(self, table, text): + return None + + def get_crud_hint_text(self, table, text): + return None + + def get_statement_hint_text(self, hint_texts): + return " ".join(hint_texts) + + def _transform_select_for_nested_joins(self, select): + """Rewrite any "a JOIN (b JOIN c)" expression as + "a JOIN (select * from b JOIN c) AS anon", to support + databases that can't parse a parenthesized join correctly + (i.e. sqlite < 3.7.16). + + """ + cloned = {} + column_translate = [{}] + + def visit(element, **kw): + if element in column_translate[-1]: + return column_translate[-1][element] + + elif element in cloned: + return cloned[element] + + newelem = cloned[element] = element._clone() + + if newelem.is_selectable and newelem._is_join and \ + isinstance(newelem.right, selectable.FromGrouping): + + newelem._reset_exported() + newelem.left = visit(newelem.left, **kw) + + right = visit(newelem.right, **kw) + + selectable_ = selectable.Select( + [right.element], + use_labels=True).alias() + + for c in selectable_.c: + c._key_label = c.key + c._label = c.name + + translate_dict = dict( + zip(newelem.right.element.c, selectable_.c) + ) + + # translating from both the old and the new + # because different select() structures will lead us + # to traverse differently + translate_dict[right.element.left] = selectable_ + translate_dict[right.element.right] = selectable_ + translate_dict[newelem.right.element.left] = selectable_ + translate_dict[newelem.right.element.right] = selectable_ + + # propagate translations that we've gained + # from nested visit(newelem.right) outwards + # to the enclosing select here. this happens + # only when we have more than one level of right + # join nesting, i.e. "a JOIN (b JOIN (c JOIN d))" + for k, v in list(column_translate[-1].items()): + if v in translate_dict: + # remarkably, no current ORM tests (May 2013) + # hit this condition, only test_join_rewriting + # does. + column_translate[-1][k] = translate_dict[v] + + column_translate[-1].update(translate_dict) + + newelem.right = selectable_ + + newelem.onclause = visit(newelem.onclause, **kw) + + elif newelem._is_from_container: + # if we hit an Alias, CompoundSelect or ScalarSelect, put a + # marker in the stack. + kw['transform_clue'] = 'select_container' + newelem._copy_internals(clone=visit, **kw) + elif newelem.is_selectable and newelem._is_select: + barrier_select = kw.get('transform_clue', None) == \ + 'select_container' + # if we're still descended from an + # Alias/CompoundSelect/ScalarSelect, we're + # in a FROM clause, so start with a new translate collection + if barrier_select: + column_translate.append({}) + kw['transform_clue'] = 'inside_select' + newelem._copy_internals(clone=visit, **kw) + if barrier_select: + del column_translate[-1] + else: + newelem._copy_internals(clone=visit, **kw) + + return newelem + + return visit(select) + + def _transform_result_map_for_nested_joins( + self, select, transformed_select): + inner_col = dict((c._key_label, c) for + c in transformed_select.inner_columns) + + d = dict( + (inner_col[c._key_label], c) + for c in select.inner_columns + ) + + self._result_columns = [ + (key, name, tuple([d.get(col, col) for col in objs]), typ) + for key, name, objs, typ in self._result_columns + ] + + _default_stack_entry = util.immutabledict([ + ('correlate_froms', frozenset()), + ('asfrom_froms', frozenset()) + ]) + + def _display_froms_for_select(self, select, asfrom, lateral=False): + # utility method to help external dialects + # get the correct from list for a select. + # specifically the oracle dialect needs this feature + # right now. + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + + correlate_froms = entry['correlate_froms'] + asfrom_froms = entry['asfrom_froms'] + + if asfrom and not lateral: + froms = select._get_display_froms( + explicit_correlate_froms=correlate_froms.difference( + asfrom_froms), + implicit_correlate_froms=()) + else: + froms = select._get_display_froms( + explicit_correlate_froms=correlate_froms, + implicit_correlate_froms=asfrom_froms) + return froms + + def visit_select(self, select, asfrom=False, parens=True, + fromhints=None, + compound_index=0, + nested_join_translation=False, + select_wraps_for=None, + lateral=False, + **kwargs): + + needs_nested_translation = \ + select.use_labels and \ + not nested_join_translation and \ + not self.stack and \ + not self.dialect.supports_right_nested_joins + + if needs_nested_translation: + transformed_select = self._transform_select_for_nested_joins( + select) + text = self.visit_select( + transformed_select, asfrom=asfrom, parens=parens, + fromhints=fromhints, + compound_index=compound_index, + nested_join_translation=True, **kwargs + ) + + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + + populate_result_map = toplevel or \ + ( + compound_index == 0 and entry.get( + 'need_result_map_for_compound', False) + ) or entry.get('need_result_map_for_nested', False) + + # this was first proposed as part of #3372; however, it is not + # reached in current tests and could possibly be an assertion + # instead. + if not populate_result_map and 'add_to_result_map' in kwargs: + del kwargs['add_to_result_map'] + + if needs_nested_translation: + if populate_result_map: + self._transform_result_map_for_nested_joins( + select, transformed_select) + return text + + froms = self._setup_select_stack(select, entry, asfrom, lateral) + + column_clause_args = kwargs.copy() + column_clause_args.update({ + 'within_label_clause': False, + 'within_columns_clause': False + }) + + text = "SELECT " # we're off to a good start ! + + if select._hints: + hint_text, byfrom = self._setup_select_hints(select) + if hint_text: + text += hint_text + " " + else: + byfrom = None + + if select._prefixes: + text += self._generate_prefixes( + select, select._prefixes, **kwargs) + + text += self.get_select_precolumns(select, **kwargs) + # the actual list of columns to print in the SELECT column list. + inner_columns = [ + c for c in [ + self._label_select_column( + select, + column, + populate_result_map, asfrom, + column_clause_args, + name=name) + for name, column in select._columns_plus_names + ] + if c is not None + ] + + if populate_result_map and select_wraps_for is not None: + # if this select is a compiler-generated wrapper, + # rewrite the targeted columns in the result map + + translate = dict( + zip( + [name for (key, name) in select._columns_plus_names], + [name for (key, name) in + select_wraps_for._columns_plus_names]) + ) + + self._result_columns = [ + (key, name, tuple(translate.get(o, o) for o in obj), type_) + for key, name, obj, type_ in self._result_columns + ] + + text = self._compose_select_body( + text, select, inner_columns, froms, byfrom, kwargs) + + if select._statement_hints: + per_dialect = [ + ht for (dialect_name, ht) + in select._statement_hints + if dialect_name in ('*', self.dialect.name) + ] + if per_dialect: + text += " " + self.get_statement_hint_text(per_dialect) + + if self.ctes and toplevel: + text = self._render_cte_clause() + text + + if select._suffixes: + text += " " + self._generate_prefixes( + select, select._suffixes, **kwargs) + + self.stack.pop(-1) + + if (asfrom or lateral) and parens: + return "(" + text + ")" + else: + return text + + def _setup_select_hints(self, select): + byfrom = dict([ + (from_, hinttext % { + 'name': from_._compiler_dispatch( + self, ashint=True) + }) + for (from_, dialect), hinttext in + select._hints.items() + if dialect in ('*', self.dialect.name) + ]) + hint_text = self.get_select_hint_text(byfrom) + return hint_text, byfrom + + def _setup_select_stack(self, select, entry, asfrom, lateral): + correlate_froms = entry['correlate_froms'] + asfrom_froms = entry['asfrom_froms'] + + if asfrom and not lateral: + froms = select._get_display_froms( + explicit_correlate_froms=correlate_froms.difference( + asfrom_froms), + implicit_correlate_froms=()) + else: + froms = select._get_display_froms( + explicit_correlate_froms=correlate_froms, + implicit_correlate_froms=asfrom_froms) + + new_correlate_froms = set(selectable._from_objects(*froms)) + all_correlate_froms = new_correlate_froms.union(correlate_froms) + + new_entry = { + 'asfrom_froms': new_correlate_froms, + 'correlate_froms': all_correlate_froms, + 'selectable': select, + } + self.stack.append(new_entry) + + return froms + + def _compose_select_body( + self, text, select, inner_columns, froms, byfrom, kwargs): + text += ', '.join(inner_columns) + + if froms: + text += " \nFROM " + + if select._hints: + text += ', '.join( + [f._compiler_dispatch(self, asfrom=True, + fromhints=byfrom, **kwargs) + for f in froms]) + else: + text += ', '.join( + [f._compiler_dispatch(self, asfrom=True, **kwargs) + for f in froms]) + else: + text += self.default_from() + + if select._whereclause is not None: + t = select._whereclause._compiler_dispatch(self, **kwargs) + if t: + text += " \nWHERE " + t + + if select._group_by_clause.clauses: + group_by = select._group_by_clause._compiler_dispatch( + self, **kwargs) + if group_by: + text += " GROUP BY " + group_by + + if select._having is not None: + t = select._having._compiler_dispatch(self, **kwargs) + if t: + text += " \nHAVING " + t + + if select._order_by_clause.clauses: + text += self.order_by_clause(select, **kwargs) + + if (select._limit_clause is not None or + select._offset_clause is not None): + text += self.limit_clause(select, **kwargs) + + if select._for_update_arg is not None: + text += self.for_update_clause(select, **kwargs) + + return text + + def _generate_prefixes(self, stmt, prefixes, **kw): + clause = " ".join( + prefix._compiler_dispatch(self, **kw) + for prefix, dialect_name in prefixes + if dialect_name is None or + dialect_name == self.dialect.name + ) + if clause: + clause += " " + return clause + + def _render_cte_clause(self): + if self.positional: + self.positiontup = sum([ + self.cte_positional[cte] + for cte in self.ctes], []) + \ + self.positiontup + cte_text = self.get_cte_preamble(self.ctes_recursive) + " " + cte_text += ", \n".join( + [txt for txt in self.ctes.values()] + ) + cte_text += "\n " + return cte_text + + def get_cte_preamble(self, recursive): + if recursive: + return "WITH RECURSIVE" + else: + return "WITH" + + def get_select_precolumns(self, select, **kw): + """Called when building a ``SELECT`` statement, position is just + before column list. + + """ + return select._distinct and "DISTINCT " or "" + + def order_by_clause(self, select, **kw): + order_by = select._order_by_clause._compiler_dispatch(self, **kw) + if order_by: + return " ORDER BY " + order_by + else: + return "" + + def for_update_clause(self, select, **kw): + return " FOR UPDATE" + + def returning_clause(self, stmt, returning_cols): + raise exc.CompileError( + "RETURNING is not supported by this " + "dialect's statement compiler.") + + def limit_clause(self, select, **kw): + text = "" + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: + text += "\n LIMIT -1" + text += " OFFSET " + self.process(select._offset_clause, **kw) + return text + + def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, + fromhints=None, use_schema=True, **kwargs): + if asfrom or ashint: + effective_schema = self.preparer.schema_for_object(table) + + if use_schema and effective_schema: + ret = self.preparer.quote_schema(effective_schema) + \ + "." + self.preparer.quote(table.name) + else: + ret = self.preparer.quote(table.name) + if fromhints and table in fromhints: + ret = self.format_from_hint_text(ret, table, + fromhints[table], iscrud) + return ret + else: + return "" + + def visit_join(self, join, asfrom=False, **kwargs): + if join.full: + join_type = " FULL OUTER JOIN " + elif join.isouter: + join_type = " LEFT OUTER JOIN " + else: + join_type = " JOIN " + return ( + join.left._compiler_dispatch(self, asfrom=True, **kwargs) + + join_type + + join.right._compiler_dispatch(self, asfrom=True, **kwargs) + + " ON " + + join.onclause._compiler_dispatch(self, **kwargs) + ) + + def _setup_crud_hints(self, stmt, table_text): + dialect_hints = dict([ + (table, hint_text) + for (table, dialect), hint_text in + stmt._hints.items() + if dialect in ('*', self.dialect.name) + ]) + if stmt.table in dialect_hints: + table_text = self.format_from_hint_text( + table_text, + stmt.table, + dialect_hints[stmt.table], + True + ) + return dialect_hints, table_text + + def visit_insert(self, insert_stmt, asfrom=False, **kw): + toplevel = not self.stack + + self.stack.append( + {'correlate_froms': set(), + "asfrom_froms": set(), + "selectable": insert_stmt}) + + crud_params = crud._setup_crud_params( + self, insert_stmt, crud.ISINSERT, **kw) + + if not crud_params and \ + not self.dialect.supports_default_values and \ + not self.dialect.supports_empty_insert: + raise exc.CompileError("The '%s' dialect with current database " + "version settings does not support empty " + "inserts." % + self.dialect.name) + + if insert_stmt._has_multi_parameters: + if not self.dialect.supports_multivalues_insert: + raise exc.CompileError( + "The '%s' dialect with current database " + "version settings does not support " + "in-place multirow inserts." % + self.dialect.name) + crud_params_single = crud_params[0] + else: + crud_params_single = crud_params + + preparer = self.preparer + supports_default_values = self.dialect.supports_default_values + + text = "INSERT " + + if insert_stmt._prefixes: + text += self._generate_prefixes(insert_stmt, + insert_stmt._prefixes, **kw) + + text += "INTO " + table_text = preparer.format_table(insert_stmt.table) + + if insert_stmt._hints: + dialect_hints, table_text = self._setup_crud_hints( + insert_stmt, table_text) + else: + dialect_hints = None + + text += table_text + + if crud_params_single or not supports_default_values: + text += " (%s)" % ', '.join([preparer.format_column(c[0]) + for c in crud_params_single]) + + if self.returning or insert_stmt._returning: + returning_clause = self.returning_clause( + insert_stmt, self.returning or insert_stmt._returning) + + if self.returning_precedes_values: + text += " " + returning_clause + else: + returning_clause = None + + if insert_stmt.select is not None: + text += " %s" % self.process(self._insert_from_select, **kw) + elif not crud_params and supports_default_values: + text += " DEFAULT VALUES" + elif insert_stmt._has_multi_parameters: + text += " VALUES %s" % ( + ", ".join( + "(%s)" % ( + ', '.join(c[1] for c in crud_param_set) + ) + for crud_param_set in crud_params + ) + ) + else: + text += " VALUES (%s)" % \ + ', '.join([c[1] for c in crud_params]) + + if insert_stmt._post_values_clause is not None: + post_values_clause = self.process( + insert_stmt._post_values_clause, **kw) + if post_values_clause: + text += " " + post_values_clause + + if returning_clause and not self.returning_precedes_values: + text += " " + returning_clause + + if self.ctes and toplevel: + text = self._render_cte_clause() + text + + self.stack.pop(-1) + + if asfrom: + return "(" + text + ")" + else: + return text + + def update_limit_clause(self, update_stmt): + """Provide a hook for MySQL to add LIMIT to the UPDATE""" + return None + + def update_tables_clause(self, update_stmt, from_table, + extra_froms, **kw): + """Provide a hook to override the initial table clause + in an UPDATE statement. + + MySQL overrides this. + + """ + kw['asfrom'] = True + return from_table._compiler_dispatch(self, iscrud=True, **kw) + + def update_from_clause(self, update_stmt, + from_table, extra_froms, + from_hints, + **kw): + """Provide a hook to override the generation of an + UPDATE..FROM clause. + + MySQL and MSSQL override this. + + """ + return "FROM " + ', '.join( + t._compiler_dispatch(self, asfrom=True, + fromhints=from_hints, **kw) + for t in extra_froms) + + def visit_update(self, update_stmt, asfrom=False, **kw): + toplevel = not self.stack + + self.stack.append( + {'correlate_froms': set([update_stmt.table]), + "asfrom_froms": set([update_stmt.table]), + "selectable": update_stmt}) + + extra_froms = update_stmt._extra_froms + + text = "UPDATE " + + if update_stmt._prefixes: + text += self._generate_prefixes(update_stmt, + update_stmt._prefixes, **kw) + + table_text = self.update_tables_clause(update_stmt, update_stmt.table, + extra_froms, **kw) + + crud_params = crud._setup_crud_params( + self, update_stmt, crud.ISUPDATE, **kw) + + if update_stmt._hints: + dialect_hints, table_text = self._setup_crud_hints( + update_stmt, table_text) + else: + dialect_hints = None + + text += table_text + + text += ' SET ' + include_table = extra_froms and \ + self.render_table_with_column_in_update_from + text += ', '.join( + c[0]._compiler_dispatch(self, + include_table=include_table) + + '=' + c[1] for c in crud_params + ) + + if self.returning or update_stmt._returning: + if self.returning_precedes_values: + text += " " + self.returning_clause( + update_stmt, self.returning or update_stmt._returning) + + if extra_froms: + extra_from_text = self.update_from_clause( + update_stmt, + update_stmt.table, + extra_froms, + dialect_hints, **kw) + if extra_from_text: + text += " " + extra_from_text + + if update_stmt._whereclause is not None: + t = self.process(update_stmt._whereclause, **kw) + if t: + text += " WHERE " + t + + limit_clause = self.update_limit_clause(update_stmt) + if limit_clause: + text += " " + limit_clause + + if (self.returning or update_stmt._returning) and \ + not self.returning_precedes_values: + text += " " + self.returning_clause( + update_stmt, self.returning or update_stmt._returning) + + if self.ctes and toplevel: + text = self._render_cte_clause() + text + + self.stack.pop(-1) + + if asfrom: + return "(" + text + ")" + else: + return text + + @util.memoized_property + def _key_getters_for_crud_column(self): + return crud._key_getters_for_crud_column(self, self.statement) + + def visit_delete(self, delete_stmt, asfrom=False, **kw): + toplevel = not self.stack + + self.stack.append({'correlate_froms': set([delete_stmt.table]), + "asfrom_froms": set([delete_stmt.table]), + "selectable": delete_stmt}) + + crud._setup_crud_params(self, delete_stmt, crud.ISDELETE, **kw) + + text = "DELETE " + + if delete_stmt._prefixes: + text += self._generate_prefixes(delete_stmt, + delete_stmt._prefixes, **kw) + + text += "FROM " + table_text = delete_stmt.table._compiler_dispatch( + self, asfrom=True, iscrud=True) + + if delete_stmt._hints: + dialect_hints, table_text = self._setup_crud_hints( + delete_stmt, table_text) + + text += table_text + + if delete_stmt._returning: + if self.returning_precedes_values: + text += " " + self.returning_clause( + delete_stmt, delete_stmt._returning) + + if delete_stmt._whereclause is not None: + t = delete_stmt._whereclause._compiler_dispatch(self, **kw) + if t: + text += " WHERE " + t + + if delete_stmt._returning and not self.returning_precedes_values: + text += " " + self.returning_clause( + delete_stmt, delete_stmt._returning) + + if self.ctes and toplevel: + text = self._render_cte_clause() + text + + self.stack.pop(-1) + + if asfrom: + return "(" + text + ")" + else: + return text + + def visit_savepoint(self, savepoint_stmt): + return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) + + def visit_rollback_to_savepoint(self, savepoint_stmt): + return "ROLLBACK TO SAVEPOINT %s" % \ + self.preparer.format_savepoint(savepoint_stmt) + + def visit_release_savepoint(self, savepoint_stmt): + return "RELEASE SAVEPOINT %s" % \ + self.preparer.format_savepoint(savepoint_stmt) + + +class StrSQLCompiler(SQLCompiler): + """"a compiler subclass with a few non-standard SQL features allowed. + + Used for stringification of SQL statements when a real dialect is not + available. + + """ + + def _fallback_column_name(self, column): + return "" + + def visit_getitem_binary(self, binary, operator, **kw): + return "%s[%s]" % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw) + ) + + def visit_json_getitem_op_binary(self, binary, operator, **kw): + return self.visit_getitem_binary(binary, operator, **kw) + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + return self.visit_getitem_binary(binary, operator, **kw) + + def returning_clause(self, stmt, returning_cols): + columns = [ + self._label_select_column(None, c, True, False, {}) + for c in elements._select_iterables(returning_cols) + ] + + return 'RETURNING ' + ', '.join(columns) + + +class DDLCompiler(Compiled): + + @util.memoized_property + def sql_compiler(self): + return self.dialect.statement_compiler(self.dialect, None) + + @util.memoized_property + def type_compiler(self): + return self.dialect.type_compiler + + def construct_params(self, params=None): + return None + + def visit_ddl(self, ddl, **kwargs): + # table events can substitute table and schema name + context = ddl.context + if isinstance(ddl.target, schema.Table): + context = context.copy() + + preparer = self.preparer + path = preparer.format_table_seq(ddl.target) + if len(path) == 1: + table, sch = path[0], '' + else: + table, sch = path[-1], path[0] + + context.setdefault('table', table) + context.setdefault('schema', sch) + context.setdefault('fullname', preparer.format_table(ddl.target)) + + return self.sql_compiler.post_process_text(ddl.statement % context) + + def visit_create_schema(self, create): + schema = self.preparer.format_schema(create.element) + return "CREATE SCHEMA " + schema + + def visit_drop_schema(self, drop): + schema = self.preparer.format_schema(drop.element) + text = "DROP SCHEMA " + schema + if drop.cascade: + text += " CASCADE" + return text + + def visit_create_table(self, create): + table = create.element + preparer = self.preparer + + text = "\nCREATE " + if table._prefixes: + text += " ".join(table._prefixes) + " " + text += "TABLE " + preparer.format_table(table) + " " + + create_table_suffix = self.create_table_suffix(table) + if create_table_suffix: + text += create_table_suffix + " " + + text += "(" + + separator = "\n" + + # if only one primary key, specify it along with the column + first_pk = False + for create_column in create.columns: + column = create_column.element + try: + processed = self.process(create_column, + first_pk=column.primary_key + and not first_pk) + if processed is not None: + text += separator + separator = ", \n" + text += "\t" + processed + if column.primary_key: + first_pk = True + except exc.CompileError as ce: + util.raise_from_cause( + exc.CompileError( + util.u("(in table '%s', column '%s'): %s") % + (table.description, column.name, ce.args[0]) + )) + + const = self.create_table_constraints( + table, _include_foreign_key_constraints= # noqa + create.include_foreign_key_constraints) + if const: + text += separator + "\t" + const + + text += "\n)%s\n\n" % self.post_create_table(table) + return text + + def visit_create_column(self, create, first_pk=False): + column = create.element + + if column.system: + return None + + text = self.get_column_specification( + column, + first_pk=first_pk + ) + const = " ".join(self.process(constraint) + for constraint in column.constraints) + if const: + text += " " + const + + return text + + def create_table_constraints( + self, table, + _include_foreign_key_constraints=None): + + # On some DB order is significant: visit PK first, then the + # other constraints (engine.ReflectionTest.testbasic failed on FB2) + constraints = [] + if table.primary_key: + constraints.append(table.primary_key) + + all_fkcs = table.foreign_key_constraints + if _include_foreign_key_constraints is not None: + omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints) + else: + omit_fkcs = set() + + constraints.extend([c for c in table._sorted_constraints + if c is not table.primary_key and + c not in omit_fkcs]) + + return ", \n\t".join( + p for p in + (self.process(constraint) + for constraint in constraints + if ( + constraint._create_rule is None or + constraint._create_rule(self)) + and ( + not self.dialect.supports_alter or + not getattr(constraint, 'use_alter', False) + )) if p is not None + ) + + def visit_drop_table(self, drop): + return "\nDROP TABLE " + self.preparer.format_table(drop.element) + + def visit_drop_view(self, drop): + return "\nDROP VIEW " + self.preparer.format_table(drop.element) + + def _verify_index_table(self, index): + if index.table is None: + raise exc.CompileError("Index '%s' is not associated " + "with any table." % index.name) + + def visit_create_index(self, create, include_schema=False, + include_table_schema=True): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + text += "INDEX %s ON %s (%s)" \ + % ( + self._prepared_index_name(index, + include_schema=include_schema), + preparer.format_table(index.table, + use_schema=include_table_schema), + ', '.join( + self.sql_compiler.process( + expr, include_table=False, literal_binds=True) for + expr in index.expressions) + ) + return text + + def visit_drop_index(self, drop): + index = drop.element + return "\nDROP INDEX " + self._prepared_index_name( + index, include_schema=True) + + def _prepared_index_name(self, index, include_schema=False): + if index.table is not None: + effective_schema = self.preparer.schema_for_object(index.table) + else: + effective_schema = None + if include_schema and effective_schema: + schema_name = self.preparer.quote_schema(effective_schema) + else: + schema_name = None + + ident = index.name + if isinstance(ident, elements._truncated_label): + max_ = self.dialect.max_index_name_length or \ + self.dialect.max_identifier_length + if len(ident) > max_: + ident = ident[0:max_ - 8] + \ + "_" + util.md5_hex(ident)[-4:] + else: + self.dialect.validate_identifier(ident) + + index_name = self.preparer.quote(ident) + + if schema_name: + index_name = schema_name + "." + index_name + return index_name + + def visit_add_constraint(self, create): + return "ALTER TABLE %s ADD %s" % ( + self.preparer.format_table(create.element.table), + self.process(create.element) + ) + + def visit_create_sequence(self, create): + text = "CREATE SEQUENCE %s" % \ + self.preparer.format_sequence(create.element) + if create.element.increment is not None: + text += " INCREMENT BY %d" % create.element.increment + if create.element.start is not None: + text += " START WITH %d" % create.element.start + if create.element.minvalue is not None: + text += " MINVALUE %d" % create.element.minvalue + if create.element.maxvalue is not None: + text += " MAXVALUE %d" % create.element.maxvalue + if create.element.nominvalue is not None: + text += " NO MINVALUE" + if create.element.nomaxvalue is not None: + text += " NO MAXVALUE" + if create.element.cycle is not None: + text += " CYCLE" + return text + + def visit_drop_sequence(self, drop): + return "DROP SEQUENCE %s" % \ + self.preparer.format_sequence(drop.element) + + def visit_drop_constraint(self, drop): + constraint = drop.element + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + else: + formatted_name = None + + if formatted_name is None: + raise exc.CompileError( + "Can't emit DROP CONSTRAINT for constraint %r; " + "it has no name" % drop.element) + return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( + self.preparer.format_table(drop.element.table), + formatted_name, + drop.cascade and " CASCADE" or "" + ) + + def get_column_specification(self, column, **kwargs): + colspec = self.preparer.format_column(column) + " " + \ + self.dialect.type_compiler.process( + column.type, type_expression=column) + default = self.get_column_default_string(column) + if default is not None: + colspec += " DEFAULT " + default + + if not column.nullable: + colspec += " NOT NULL" + return colspec + + def create_table_suffix(self, table): + return '' + + def post_create_table(self, table): + return '' + + def get_column_default_string(self, column): + if isinstance(column.server_default, schema.DefaultClause): + if isinstance(column.server_default.arg, util.string_types): + return self.sql_compiler.render_literal_value( + column.server_default.arg, sqltypes.STRINGTYPE) + else: + return self.sql_compiler.process( + column.server_default.arg, literal_binds=True) + else: + return None + + def visit_check_constraint(self, constraint): + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext, + include_table=False, + literal_binds=True) + text += self.define_constraint_deferrability(constraint) + return text + + def visit_column_check_constraint(self, constraint): + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "CHECK (%s)" % constraint.sqltext + text += self.define_constraint_deferrability(constraint) + return text + + def visit_primary_key_constraint(self, constraint): + if len(constraint) == 0: + return '' + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + text += "PRIMARY KEY " + text += "(%s)" % ', '.join(self.preparer.quote(c.name) + for c in (constraint.columns_autoinc_first + if constraint._implicit_generated + else constraint.columns)) + text += self.define_constraint_deferrability(constraint) + return text + + def visit_foreign_key_constraint(self, constraint): + preparer = self.preparer + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + if formatted_name is not None: + text += "CONSTRAINT %s " % formatted_name + remote_table = list(constraint.elements)[0].column.table + text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( + ', '.join(preparer.quote(f.parent.name) + for f in constraint.elements), + self.define_constraint_remote_table( + constraint, remote_table, preparer), + ', '.join(preparer.quote(f.column.name) + for f in constraint.elements) + ) + text += self.define_constraint_match(constraint) + text += self.define_constraint_cascades(constraint) + text += self.define_constraint_deferrability(constraint) + return text + + def define_constraint_remote_table(self, constraint, table, preparer): + """Format the remote table clause of a CREATE CONSTRAINT clause.""" + + return preparer.format_table(table) + + def visit_unique_constraint(self, constraint): + if len(constraint) == 0: + return '' + text = "" + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + text += "CONSTRAINT %s " % formatted_name + text += "UNIQUE (%s)" % ( + ', '.join(self.preparer.quote(c.name) + for c in constraint)) + text += self.define_constraint_deferrability(constraint) + return text + + def define_constraint_cascades(self, constraint): + text = "" + if constraint.ondelete is not None: + text += " ON DELETE %s" % constraint.ondelete + if constraint.onupdate is not None: + text += " ON UPDATE %s" % constraint.onupdate + return text + + def define_constraint_deferrability(self, constraint): + text = "" + if constraint.deferrable is not None: + if constraint.deferrable: + text += " DEFERRABLE" + else: + text += " NOT DEFERRABLE" + if constraint.initially is not None: + text += " INITIALLY %s" % constraint.initially + return text + + def define_constraint_match(self, constraint): + text = "" + if constraint.match is not None: + text += " MATCH %s" % constraint.match + return text + + +class GenericTypeCompiler(TypeCompiler): + + def visit_FLOAT(self, type_, **kw): + return "FLOAT" + + def visit_REAL(self, type_, **kw): + return "REAL" + + def visit_NUMERIC(self, type_, **kw): + if type_.precision is None: + return "NUMERIC" + elif type_.scale is None: + return "NUMERIC(%(precision)s)" % \ + {'precision': type_.precision} + else: + return "NUMERIC(%(precision)s, %(scale)s)" % \ + {'precision': type_.precision, + 'scale': type_.scale} + + def visit_DECIMAL(self, type_, **kw): + if type_.precision is None: + return "DECIMAL" + elif type_.scale is None: + return "DECIMAL(%(precision)s)" % \ + {'precision': type_.precision} + else: + return "DECIMAL(%(precision)s, %(scale)s)" % \ + {'precision': type_.precision, + 'scale': type_.scale} + + def visit_INTEGER(self, type_, **kw): + return "INTEGER" + + def visit_SMALLINT(self, type_, **kw): + return "SMALLINT" + + def visit_BIGINT(self, type_, **kw): + return "BIGINT" + + def visit_TIMESTAMP(self, type_, **kw): + return 'TIMESTAMP' + + def visit_DATETIME(self, type_, **kw): + return "DATETIME" + + def visit_DATE(self, type_, **kw): + return "DATE" + + def visit_TIME(self, type_, **kw): + return "TIME" + + def visit_CLOB(self, type_, **kw): + return "CLOB" + + def visit_NCLOB(self, type_, **kw): + return "NCLOB" + + def _render_string_type(self, type_, name): + + text = name + if type_.length: + text += "(%d)" % type_.length + if type_.collation: + text += ' COLLATE "%s"' % type_.collation + return text + + def visit_CHAR(self, type_, **kw): + return self._render_string_type(type_, "CHAR") + + def visit_NCHAR(self, type_, **kw): + return self._render_string_type(type_, "NCHAR") + + def visit_VARCHAR(self, type_, **kw): + return self._render_string_type(type_, "VARCHAR") + + def visit_NVARCHAR(self, type_, **kw): + return self._render_string_type(type_, "NVARCHAR") + + def visit_TEXT(self, type_, **kw): + return self._render_string_type(type_, "TEXT") + + def visit_BLOB(self, type_, **kw): + return "BLOB" + + def visit_BINARY(self, type_, **kw): + return "BINARY" + (type_.length and "(%d)" % type_.length or "") + + def visit_VARBINARY(self, type_, **kw): + return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") + + def visit_BOOLEAN(self, type_, **kw): + return "BOOLEAN" + + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_, **kw) + + def visit_boolean(self, type_, **kw): + return self.visit_BOOLEAN(type_, **kw) + + def visit_time(self, type_, **kw): + return self.visit_TIME(type_, **kw) + + def visit_datetime(self, type_, **kw): + return self.visit_DATETIME(type_, **kw) + + def visit_date(self, type_, **kw): + return self.visit_DATE(type_, **kw) + + def visit_big_integer(self, type_, **kw): + return self.visit_BIGINT(type_, **kw) + + def visit_small_integer(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) + + def visit_integer(self, type_, **kw): + return self.visit_INTEGER(type_, **kw) + + def visit_real(self, type_, **kw): + return self.visit_REAL(type_, **kw) + + def visit_float(self, type_, **kw): + return self.visit_FLOAT(type_, **kw) + + def visit_numeric(self, type_, **kw): + return self.visit_NUMERIC(type_, **kw) + + def visit_string(self, type_, **kw): + return self.visit_VARCHAR(type_, **kw) + + def visit_unicode(self, type_, **kw): + return self.visit_VARCHAR(type_, **kw) + + def visit_text(self, type_, **kw): + return self.visit_TEXT(type_, **kw) + + def visit_unicode_text(self, type_, **kw): + return self.visit_TEXT(type_, **kw) + + def visit_enum(self, type_, **kw): + return self.visit_VARCHAR(type_, **kw) + + def visit_null(self, type_, **kw): + raise exc.CompileError("Can't generate DDL for %r; " + "did you forget to specify a " + "type on this Column?" % type_) + + def visit_type_decorator(self, type_, **kw): + return self.process(type_.type_engine(self.dialect), **kw) + + def visit_user_defined(self, type_, **kw): + return type_.get_col_spec(**kw) + + +class StrSQLTypeCompiler(GenericTypeCompiler): + def __getattr__(self, key): + if key.startswith("visit_"): + return self._visit_unknown + else: + raise AttributeError(key) + + def _visit_unknown(self, type_, **kw): + return "%s" % type_.__class__.__name__ + + +class IdentifierPreparer(object): + + """Handle quoting and case-folding of identifiers based on options.""" + + reserved_words = RESERVED_WORDS + + legal_characters = LEGAL_CHARACTERS + + illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS + + schema_for_object = schema._schema_getter(None) + + def __init__(self, dialect, initial_quote='"', + final_quote=None, escape_quote='"', omit_schema=False): + """Construct a new ``IdentifierPreparer`` object. + + initial_quote + Character that begins a delimited identifier. + + final_quote + Character that ends a delimited identifier. Defaults to + `initial_quote`. + + omit_schema + Prevent prepending schema name. Useful for databases that do + not support schemae. + """ + + self.dialect = dialect + self.initial_quote = initial_quote + self.final_quote = final_quote or self.initial_quote + self.escape_quote = escape_quote + self.escape_to_quote = self.escape_quote * 2 + self.omit_schema = omit_schema + self._strings = {} + + def _with_schema_translate(self, schema_translate_map): + prep = self.__class__.__new__(self.__class__) + prep.__dict__.update(self.__dict__) + prep.schema_for_object = schema._schema_getter(schema_translate_map) + return prep + + def _escape_identifier(self, value): + """Escape an identifier. + + Subclasses should override this to provide database-dependent + escaping behavior. + """ + + return value.replace(self.escape_quote, self.escape_to_quote) + + def _unescape_identifier(self, value): + """Canonicalize an escaped identifier. + + Subclasses should override this to provide database-dependent + unescaping behavior that reverses _escape_identifier. + """ + + return value.replace(self.escape_to_quote, self.escape_quote) + + def quote_identifier(self, value): + """Quote an identifier. + + Subclasses should override this to provide database-dependent + quoting behavior. + """ + + return self.initial_quote + \ + self._escape_identifier(value) + \ + self.final_quote + + def _requires_quotes(self, value): + """Return True if the given identifier requires quoting.""" + lc_value = value.lower() + return (lc_value in self.reserved_words + or value[0] in self.illegal_initial_characters + or not self.legal_characters.match(util.text_type(value)) + or (lc_value != value)) + + def quote_schema(self, schema, force=None): + """Conditionally quote a schema. + + Subclasses can override this to provide database-dependent + quoting behavior for schema names. + + the 'force' flag should be considered deprecated. + + """ + return self.quote(schema, force) + + def quote(self, ident, force=None): + """Conditionally quote an identifier. + + the 'force' flag should be considered deprecated. + """ + + force = getattr(ident, "quote", None) + + if force is None: + if ident in self._strings: + return self._strings[ident] + else: + if self._requires_quotes(ident): + self._strings[ident] = self.quote_identifier(ident) + else: + self._strings[ident] = ident + return self._strings[ident] + elif force: + return self.quote_identifier(ident) + else: + return ident + + def format_sequence(self, sequence, use_schema=True): + name = self.quote(sequence.name) + + effective_schema = self.schema_for_object(sequence) + + if (not self.omit_schema and use_schema and + effective_schema is not None): + name = self.quote_schema(effective_schema) + "." + name + return name + + def format_label(self, label, name=None): + return self.quote(name or label.name) + + def format_alias(self, alias, name=None): + return self.quote(name or alias.name) + + def format_savepoint(self, savepoint, name=None): + # Running the savepoint name through quoting is unnecessary + # for all known dialects. This is here to support potential + # third party use cases + ident = name or savepoint.ident + if self._requires_quotes(ident): + ident = self.quote_identifier(ident) + return ident + + @util.dependencies("sqlalchemy.sql.naming") + def format_constraint(self, naming, constraint): + if isinstance(constraint.name, elements._defer_name): + name = naming._constraint_name_for_table( + constraint, constraint.table) + if name: + return self.quote(name) + elif isinstance(constraint.name, elements._defer_none_name): + return None + return self.quote(constraint.name) + + def format_table(self, table, use_schema=True, name=None): + """Prepare a quoted table and schema name.""" + + if name is None: + name = table.name + result = self.quote(name) + + effective_schema = self.schema_for_object(table) + + if not self.omit_schema and use_schema \ + and effective_schema: + result = self.quote_schema(effective_schema) + "." + result + return result + + def format_schema(self, name, quote=None): + """Prepare a quoted schema name.""" + + return self.quote(name, quote) + + def format_column(self, column, use_table=False, + name=None, table_name=None): + """Prepare a quoted column name.""" + + if name is None: + name = column.name + if not getattr(column, 'is_literal', False): + if use_table: + return self.format_table( + column.table, use_schema=False, + name=table_name) + "." + self.quote(name) + else: + return self.quote(name) + else: + # literal textual elements get stuck into ColumnClause a lot, + # which shouldn't get quoted + + if use_table: + return self.format_table( + column.table, use_schema=False, + name=table_name) + '.' + name + else: + return name + + def format_table_seq(self, table, use_schema=True): + """Format table name and schema as a tuple.""" + + # Dialects with more levels in their fully qualified references + # ('database', 'owner', etc.) could override this and return + # a longer sequence. + + effective_schema = self.schema_for_object(table) + + if not self.omit_schema and use_schema and \ + effective_schema: + return (self.quote_schema(effective_schema), + self.format_table(table, use_schema=False)) + else: + return (self.format_table(table, use_schema=False), ) + + @util.memoized_property + def _r_identifiers(self): + initial, final, escaped_final = \ + [re.escape(s) for s in + (self.initial_quote, self.final_quote, + self._escape_identifier(self.final_quote))] + r = re.compile( + r'(?:' + r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' + r'|([^\.]+))(?=\.|$))+' % + {'initial': initial, + 'final': final, + 'escaped': escaped_final}) + return r + + def unformat_identifiers(self, identifiers): + """Unpack 'schema.table.column'-like strings into components.""" + + r = self._r_identifiers + return [self._unescape_identifier(i) + for i in [a or b for a, b in r.findall(identifiers)]] diff --git a/app/lib/sqlalchemy/sql/crud.py b/app/lib/sqlalchemy/sql/crud.py new file mode 100644 index 0000000..5739c22 --- /dev/null +++ b/app/lib/sqlalchemy/sql/crud.py @@ -0,0 +1,692 @@ +# sql/crud.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Functions used by compiler.py to determine the parameters rendered +within INSERT and UPDATE statements. + +""" +from .. import util +from .. import exc +from . import dml +from . import elements +import operator + +REQUIRED = util.symbol('REQUIRED', """ +Placeholder for the value within a :class:`.BindParameter` +which is required to be present when the statement is passed +to :meth:`.Connection.execute`. + +This symbol is typically used when a :func:`.expression.insert` +or :func:`.expression.update` statement is compiled without parameter +values present. + +""") + +ISINSERT = util.symbol('ISINSERT') +ISUPDATE = util.symbol('ISUPDATE') +ISDELETE = util.symbol('ISDELETE') + + +def _setup_crud_params(compiler, stmt, local_stmt_type, **kw): + restore_isinsert = compiler.isinsert + restore_isupdate = compiler.isupdate + restore_isdelete = compiler.isdelete + + should_restore = ( + restore_isinsert or restore_isupdate or restore_isdelete + ) or len(compiler.stack) > 1 + + if local_stmt_type is ISINSERT: + compiler.isupdate = False + compiler.isinsert = True + elif local_stmt_type is ISUPDATE: + compiler.isupdate = True + compiler.isinsert = False + elif local_stmt_type is ISDELETE: + if not should_restore: + compiler.isdelete = True + else: + assert False, "ISINSERT, ISUPDATE, or ISDELETE expected" + + try: + if local_stmt_type in (ISINSERT, ISUPDATE): + return _get_crud_params(compiler, stmt, **kw) + finally: + if should_restore: + compiler.isinsert = restore_isinsert + compiler.isupdate = restore_isupdate + compiler.isdelete = restore_isdelete + + +def _get_crud_params(compiler, stmt, **kw): + """create a set of tuples representing column/string pairs for use + in an INSERT or UPDATE statement. + + Also generates the Compiled object's postfetch, prefetch, and + returning column collections, used for default handling and ultimately + populating the ResultProxy's prefetch_cols() and postfetch_cols() + collections. + + """ + + compiler.postfetch = [] + compiler.insert_prefetch = [] + compiler.update_prefetch = [] + compiler.returning = [] + + # no parameters in the statement, no parameters in the + # compiled params - return binds for all columns + if compiler.column_keys is None and stmt.parameters is None: + return [ + (c, _create_bind_param( + compiler, c, None, required=True)) + for c in stmt.table.columns + ] + + if stmt._has_multi_parameters: + stmt_parameters = stmt.parameters[0] + else: + stmt_parameters = stmt.parameters + + # getters - these are normally just column.key, + # but in the case of mysql multi-table update, the rules for + # .key must conditionally take tablename into account + _column_as_key, _getattr_col_key, _col_bind_name = \ + _key_getters_for_crud_column(compiler, stmt) + + # if we have statement parameters - set defaults in the + # compiled params + if compiler.column_keys is None: + parameters = {} + else: + parameters = dict((_column_as_key(key), REQUIRED) + for key in compiler.column_keys + if not stmt_parameters or + key not in stmt_parameters) + + # create a list of column assignment clauses as tuples + values = [] + + if stmt_parameters is not None: + _get_stmt_parameters_params( + compiler, + parameters, stmt_parameters, _column_as_key, values, kw) + + check_columns = {} + + # special logic that only occurs for multi-table UPDATE + # statements + if compiler.isupdate and stmt._extra_froms and stmt_parameters: + _get_multitable_params( + compiler, stmt, stmt_parameters, check_columns, + _col_bind_name, _getattr_col_key, values, kw) + + if compiler.isinsert and stmt.select_names: + _scan_insert_from_select_cols( + compiler, stmt, parameters, + _getattr_col_key, _column_as_key, + _col_bind_name, check_columns, values, kw) + else: + _scan_cols( + compiler, stmt, parameters, + _getattr_col_key, _column_as_key, + _col_bind_name, check_columns, values, kw) + + if parameters and stmt_parameters: + check = set(parameters).intersection( + _column_as_key(k) for k in stmt_parameters + ).difference(check_columns) + if check: + raise exc.CompileError( + "Unconsumed column names: %s" % + (", ".join("%s" % c for c in check)) + ) + + if stmt._has_multi_parameters: + values = _extend_values_for_multiparams(compiler, stmt, values, kw) + + return values + + +def _create_bind_param( + compiler, col, value, process=True, + required=False, name=None, **kw): + if name is None: + name = col.key + bindparam = elements.BindParameter( + name, value, type_=col.type, required=required) + bindparam._is_crud = True + if process: + bindparam = bindparam._compiler_dispatch(compiler, **kw) + return bindparam + + +def _key_getters_for_crud_column(compiler, stmt): + if compiler.isupdate and stmt._extra_froms: + # when extra tables are present, refer to the columns + # in those extra tables as table-qualified, including in + # dictionaries and when rendering bind param names. + # the "main" table of the statement remains unqualified, + # allowing the most compatibility with a non-multi-table + # statement. + _et = set(stmt._extra_froms) + + def _column_as_key(key): + str_key = elements._column_as_key(key) + if hasattr(key, 'table') and key.table in _et: + return (key.table.name, str_key) + else: + return str_key + + def _getattr_col_key(col): + if col.table in _et: + return (col.table.name, col.key) + else: + return col.key + + def _col_bind_name(col): + if col.table in _et: + return "%s_%s" % (col.table.name, col.key) + else: + return col.key + + else: + _column_as_key = elements._column_as_key + _getattr_col_key = _col_bind_name = operator.attrgetter("key") + + return _column_as_key, _getattr_col_key, _col_bind_name + + +def _scan_insert_from_select_cols( + compiler, stmt, parameters, _getattr_col_key, + _column_as_key, _col_bind_name, check_columns, values, kw): + + need_pks, implicit_returning, \ + implicit_return_defaults, postfetch_lastrowid = \ + _get_returning_modifiers(compiler, stmt) + + cols = [stmt.table.c[_column_as_key(name)] + for name in stmt.select_names] + + compiler._insert_from_select = stmt.select + + add_select_cols = [] + if stmt.include_insert_from_select_defaults: + col_set = set(cols) + for col in stmt.table.columns: + if col not in col_set and col.default: + cols.append(col) + + for c in cols: + col_key = _getattr_col_key(c) + if col_key in parameters and col_key not in check_columns: + parameters.pop(col_key) + values.append((c, None)) + else: + _append_param_insert_select_hasdefault( + compiler, stmt, c, add_select_cols, kw) + + if add_select_cols: + values.extend(add_select_cols) + compiler._insert_from_select = compiler._insert_from_select._generate() + compiler._insert_from_select._raw_columns = \ + tuple(compiler._insert_from_select._raw_columns) + tuple( + expr for col, expr in add_select_cols) + + +def _scan_cols( + compiler, stmt, parameters, _getattr_col_key, + _column_as_key, _col_bind_name, check_columns, values, kw): + + need_pks, implicit_returning, \ + implicit_return_defaults, postfetch_lastrowid = \ + _get_returning_modifiers(compiler, stmt) + + if stmt._parameter_ordering: + parameter_ordering = [ + _column_as_key(key) for key in stmt._parameter_ordering + ] + ordered_keys = set(parameter_ordering) + cols = [ + stmt.table.c[key] for key in parameter_ordering + ] + [ + c for c in stmt.table.c if c.key not in ordered_keys + ] + else: + cols = stmt.table.columns + + for c in cols: + col_key = _getattr_col_key(c) + + if col_key in parameters and col_key not in check_columns: + + _append_param_parameter( + compiler, stmt, c, col_key, parameters, _col_bind_name, + implicit_returning, implicit_return_defaults, values, kw) + + elif compiler.isinsert: + if c.primary_key and \ + need_pks and \ + ( + implicit_returning or + not postfetch_lastrowid or + c is not stmt.table._autoincrement_column + ): + + if implicit_returning: + _append_param_insert_pk_returning( + compiler, stmt, c, values, kw) + else: + _append_param_insert_pk(compiler, stmt, c, values, kw) + + elif c.default is not None: + + _append_param_insert_hasdefault( + compiler, stmt, c, implicit_return_defaults, + values, kw) + + elif c.server_default is not None: + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif not c.primary_key: + compiler.postfetch.append(c) + elif implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif c.primary_key and \ + c is not stmt.table._autoincrement_column and \ + not c.nullable: + _warn_pk_with_no_anticipated_value(c) + + elif compiler.isupdate: + _append_param_update( + compiler, stmt, c, implicit_return_defaults, values, kw) + + +def _append_param_parameter( + compiler, stmt, c, col_key, parameters, _col_bind_name, + implicit_returning, implicit_return_defaults, values, kw): + value = parameters.pop(col_key) + if elements._is_literal(value): + value = _create_bind_param( + compiler, c, value, required=value is REQUIRED, + name=_col_bind_name(c) + if not stmt._has_multi_parameters + else "%s_m0" % _col_bind_name(c), + **kw + ) + else: + if isinstance(value, elements.BindParameter) and \ + value.type._isnull: + value = value._clone() + value.type = c.type + + if c.primary_key and implicit_returning: + compiler.returning.append(c) + value = compiler.process(value.self_group(), **kw) + elif implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + value = compiler.process(value.self_group(), **kw) + else: + compiler.postfetch.append(c) + value = compiler.process(value.self_group(), **kw) + values.append((c, value)) + + +def _append_param_insert_pk_returning(compiler, stmt, c, values, kw): + """Create a primary key expression in the INSERT statement and + possibly a RETURNING clause for it. + + If the column has a Python-side default, we will create a bound + parameter for it and "pre-execute" the Python function. If + the column has a SQL expression default, or is a sequence, + we will add it directly into the INSERT statement and add a + RETURNING element to get the new value. If the column has a + server side default or is marked as the "autoincrement" column, + we will add a RETRUNING element to get at the value. + + If all the above tests fail, that indicates a primary key column with no + noted default generation capabilities that has no parameter passed; + raise an exception. + + """ + if c.default is not None: + if c.default.is_sequence: + if compiler.dialect.supports_sequences and \ + (not c.default.optional or + not compiler.dialect.sequences_optional): + proc = compiler.process(c.default, **kw) + values.append((c, proc)) + compiler.returning.append(c) + elif c.default.is_clause_element: + values.append( + (c, compiler.process( + c.default.arg.self_group(), **kw)) + ) + compiler.returning.append(c) + else: + values.append( + (c, _create_insert_prefetch_bind_param(compiler, c)) + ) + elif c is stmt.table._autoincrement_column or c.server_default is not None: + compiler.returning.append(c) + elif not c.nullable: + # no .default, no .server_default, not autoincrement, we have + # no indication this primary key column will have any value + _warn_pk_with_no_anticipated_value(c) + + +def _create_insert_prefetch_bind_param(compiler, c, process=True, name=None): + param = _create_bind_param(compiler, c, None, process=process, name=name) + compiler.insert_prefetch.append(c) + return param + + +def _create_update_prefetch_bind_param(compiler, c, process=True, name=None): + param = _create_bind_param(compiler, c, None, process=process, name=name) + compiler.update_prefetch.append(c) + return param + + +class _multiparam_column(elements.ColumnElement): + def __init__(self, original, index): + self.key = "%s_m%d" % (original.key, index + 1) + self.original = original + self.default = original.default + self.type = original.type + + def __eq__(self, other): + return isinstance(other, _multiparam_column) and \ + other.key == self.key and \ + other.original == self.original + + +def _process_multiparam_default_bind(compiler, stmt, c, index, kw): + + if not c.default: + raise exc.CompileError( + "INSERT value for column %s is explicitly rendered as a bound" + "parameter in the VALUES clause; " + "a Python-side value or SQL expression is required" % c) + elif c.default.is_clause_element: + return compiler.process(c.default.arg.self_group(), **kw) + else: + col = _multiparam_column(c, index) + if isinstance(stmt, dml.Insert): + return _create_insert_prefetch_bind_param(compiler, col) + else: + return _create_update_prefetch_bind_param(compiler, col) + + +def _append_param_insert_pk(compiler, stmt, c, values, kw): + """Create a bound parameter in the INSERT statement to receive a + 'prefetched' default value. + + The 'prefetched' value indicates that we are to invoke a Python-side + default function or expliclt SQL expression before the INSERT statement + proceeds, so that we have a primary key value available. + + if the column has no noted default generation capabilities, it has + no value passed in either; raise an exception. + + """ + if ( + ( + # column has a Python-side default + c.default is not None and + ( + # and it won't be a Sequence + not c.default.is_sequence or + compiler.dialect.supports_sequences + ) + ) + or + ( + # column is the "autoincrement column" + c is stmt.table._autoincrement_column and + ( + # and it's either a "sequence" or a + # pre-executable "autoincrement" sequence + compiler.dialect.supports_sequences or + compiler.dialect.preexecute_autoincrement_sequences + ) + ) + ): + values.append( + (c, _create_insert_prefetch_bind_param(compiler, c)) + ) + elif c.default is None and c.server_default is None and not c.nullable: + # no .default, no .server_default, not autoincrement, we have + # no indication this primary key column will have any value + _warn_pk_with_no_anticipated_value(c) + + +def _append_param_insert_hasdefault( + compiler, stmt, c, implicit_return_defaults, values, kw): + + if c.default.is_sequence: + if compiler.dialect.supports_sequences and \ + (not c.default.optional or + not compiler.dialect.sequences_optional): + proc = compiler.process(c.default, **kw) + values.append((c, proc)) + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif not c.primary_key: + compiler.postfetch.append(c) + elif c.default.is_clause_element: + proc = compiler.process(c.default.arg.self_group(), **kw) + values.append((c, proc)) + + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif not c.primary_key: + # don't add primary key column to postfetch + compiler.postfetch.append(c) + else: + values.append( + (c, _create_insert_prefetch_bind_param(compiler, c)) + ) + + +def _append_param_insert_select_hasdefault( + compiler, stmt, c, values, kw): + + if c.default.is_sequence: + if compiler.dialect.supports_sequences and \ + (not c.default.optional or + not compiler.dialect.sequences_optional): + proc = c.default + values.append((c, proc.next_value())) + elif c.default.is_clause_element: + proc = c.default.arg.self_group() + values.append((c, proc)) + else: + values.append( + (c, _create_insert_prefetch_bind_param(compiler, c, process=False)) + ) + + +def _append_param_update( + compiler, stmt, c, implicit_return_defaults, values, kw): + + if c.onupdate is not None and not c.onupdate.is_sequence: + if c.onupdate.is_clause_element: + values.append( + (c, compiler.process( + c.onupdate.arg.self_group(), **kw)) + ) + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + else: + compiler.postfetch.append(c) + else: + values.append( + (c, _create_update_prefetch_bind_param(compiler, c)) + ) + elif c.server_onupdate is not None: + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + else: + compiler.postfetch.append(c) + elif implicit_return_defaults and \ + stmt._return_defaults is not True and \ + c in implicit_return_defaults: + compiler.returning.append(c) + + +def _get_multitable_params( + compiler, stmt, stmt_parameters, check_columns, + _col_bind_name, _getattr_col_key, values, kw): + + normalized_params = dict( + (elements._clause_element_as_expr(c), param) + for c, param in stmt_parameters.items() + ) + affected_tables = set() + for t in stmt._extra_froms: + for c in t.c: + if c in normalized_params: + affected_tables.add(t) + check_columns[_getattr_col_key(c)] = c + value = normalized_params[c] + if elements._is_literal(value): + value = _create_bind_param( + compiler, c, value, required=value is REQUIRED, + name=_col_bind_name(c)) + else: + compiler.postfetch.append(c) + value = compiler.process(value.self_group(), **kw) + values.append((c, value)) + # determine tables which are actually to be updated - process onupdate + # and server_onupdate for these + for t in affected_tables: + for c in t.c: + if c in normalized_params: + continue + elif (c.onupdate is not None and not + c.onupdate.is_sequence): + if c.onupdate.is_clause_element: + values.append( + (c, compiler.process( + c.onupdate.arg.self_group(), + **kw) + ) + ) + compiler.postfetch.append(c) + else: + values.append( + (c, _create_update_prefetch_bind_param( + compiler, c, name=_col_bind_name(c))) + ) + elif c.server_onupdate is not None: + compiler.postfetch.append(c) + + +def _extend_values_for_multiparams(compiler, stmt, values, kw): + values_0 = values + values = [values] + + values.extend( + [ + ( + c, + (_create_bind_param( + compiler, c, row[c.key], + name="%s_m%d" % (c.key, i + 1), **kw + ) if elements._is_literal(row[c.key]) + else compiler.process( + row[c.key].self_group(), **kw)) + if c.key in row else + _process_multiparam_default_bind(compiler, stmt, c, i, kw) + ) + for (c, param) in values_0 + ] + for i, row in enumerate(stmt.parameters[1:]) + ) + return values + + +def _get_stmt_parameters_params( + compiler, parameters, stmt_parameters, _column_as_key, values, kw): + for k, v in stmt_parameters.items(): + colkey = _column_as_key(k) + if colkey is not None: + parameters.setdefault(colkey, v) + else: + # a non-Column expression on the left side; + # add it to values() in an "as-is" state, + # coercing right side to bound param + if elements._is_literal(v): + v = compiler.process( + elements.BindParameter(None, v, type_=k.type), + **kw) + else: + v = compiler.process(v.self_group(), **kw) + + values.append((k, v)) + + +def _get_returning_modifiers(compiler, stmt): + need_pks = compiler.isinsert and \ + not compiler.inline and \ + not stmt._returning and \ + not stmt._has_multi_parameters + + implicit_returning = need_pks and \ + compiler.dialect.implicit_returning and \ + stmt.table.implicit_returning + + if compiler.isinsert: + implicit_return_defaults = (implicit_returning and + stmt._return_defaults) + elif compiler.isupdate: + implicit_return_defaults = (compiler.dialect.implicit_returning and + stmt.table.implicit_returning and + stmt._return_defaults) + else: + # this line is unused, currently we are always + # isinsert or isupdate + implicit_return_defaults = False # pragma: no cover + + if implicit_return_defaults: + if stmt._return_defaults is True: + implicit_return_defaults = set(stmt.table.c) + else: + implicit_return_defaults = set(stmt._return_defaults) + + postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid + + return need_pks, implicit_returning, \ + implicit_return_defaults, postfetch_lastrowid + + +def _warn_pk_with_no_anticipated_value(c): + msg = ( + "Column '%s.%s' is marked as a member of the " + "primary key for table '%s', " + "but has no Python-side or server-side default generator indicated, " + "nor does it indicate 'autoincrement=True' or 'nullable=True', " + "and no explicit value is passed. " + "Primary key columns typically may not store NULL." + % + (c.table.fullname, c.name, c.table.fullname)) + if len(c.table.primary_key) > 1: + msg += ( + " Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be " + "indicated explicitly for composite (e.g. multicolumn) primary " + "keys if AUTO_INCREMENT/SERIAL/IDENTITY " + "behavior is expected for one of the columns in the primary key. " + "CREATE TABLE statements are impacted by this change as well on " + "most backends.") + util.warn(msg) diff --git a/app/lib/sqlalchemy/sql/ddl.py b/app/lib/sqlalchemy/sql/ddl.py new file mode 100644 index 0000000..5463afe --- /dev/null +++ b/app/lib/sqlalchemy/sql/ddl.py @@ -0,0 +1,1100 @@ +# sql/ddl.py +# Copyright (C) 2009-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" +Provides the hierarchy of DDL-defining schema items as well as routines +to invoke them for a create/drop call. + +""" + +from .. import util +from .elements import ClauseElement +from .base import Executable, _generative, SchemaVisitor, _bind_or_error +from ..util import topological +from .. import event +from .. import exc + + +class _DDLCompiles(ClauseElement): + def _compiler(self, dialect, **kw): + """Return a compiler appropriate for this ClauseElement, given a + Dialect.""" + + return dialect.ddl_compiler(dialect, self, **kw) + + +class DDLElement(Executable, _DDLCompiles): + """Base class for DDL expression constructs. + + This class is the base for the general purpose :class:`.DDL` class, + as well as the various create/drop clause constructs such as + :class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`, + etc. + + :class:`.DDLElement` integrates closely with SQLAlchemy events, + introduced in :ref:`event_toplevel`. An instance of one is + itself an event receiving callable:: + + event.listen( + users, + 'after_create', + AddConstraint(constraint).execute_if(dialect='postgresql') + ) + + .. seealso:: + + :class:`.DDL` + + :class:`.DDLEvents` + + :ref:`event_toplevel` + + :ref:`schema_ddl_sequences` + + """ + + _execution_options = Executable.\ + _execution_options.union({'autocommit': True}) + + target = None + on = None + dialect = None + callable_ = None + + def _execute_on_connection(self, connection, multiparams, params): + return connection._execute_ddl(self, multiparams, params) + + def execute(self, bind=None, target=None): + """Execute this DDL immediately. + + Executes the DDL statement in isolation using the supplied + :class:`.Connectable` or + :class:`.Connectable` assigned to the ``.bind`` + property, if not supplied. If the DDL has a conditional ``on`` + criteria, it will be invoked with None as the event. + + :param bind: + Optional, an ``Engine`` or ``Connection``. If not supplied, a valid + :class:`.Connectable` must be present in the + ``.bind`` property. + + :param target: + Optional, defaults to None. The target SchemaItem for the + execute call. Will be passed to the ``on`` callable if any, + and may also provide string expansion data for the + statement. See ``execute_at`` for more information. + + """ + + if bind is None: + bind = _bind_or_error(self) + + if self._should_execute(target, bind): + return bind.execute(self.against(target)) + else: + bind.engine.logger.info( + "DDL execution skipped, criteria not met.") + + @util.deprecated("0.7", "See :class:`.DDLEvents`, as well as " + ":meth:`.DDLElement.execute_if`.") + def execute_at(self, event_name, target): + """Link execution of this DDL to the DDL lifecycle of a SchemaItem. + + Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance, + executing it when that schema item is created or dropped. The DDL + statement will be executed using the same Connection and transactional + context as the Table create/drop itself. The ``.bind`` property of + this statement is ignored. + + :param event: + One of the events defined in the schema item's ``.ddl_events``; + e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop' + + :param target: + The Table or MetaData instance for which this DDLElement will + be associated with. + + A DDLElement instance can be linked to any number of schema items. + + ``execute_at`` builds on the ``append_ddl_listener`` interface of + :class:`.MetaData` and :class:`.Table` objects. + + Caveat: Creating or dropping a Table in isolation will also trigger + any DDL set to ``execute_at`` that Table's MetaData. This may change + in a future release. + + """ + + def call_event(target, connection, **kw): + if self._should_execute_deprecated(event_name, + target, connection, **kw): + return connection.execute(self.against(target)) + + event.listen(target, "" + event_name.replace('-', '_'), call_event) + + @_generative + def against(self, target): + """Return a copy of this DDL against a specific schema item.""" + + self.target = target + + @_generative + def execute_if(self, dialect=None, callable_=None, state=None): + r"""Return a callable that will execute this + DDLElement conditionally. + + Used to provide a wrapper for event listening:: + + event.listen( + metadata, + 'before_create', + DDL("my_ddl").execute_if(dialect='postgresql') + ) + + :param dialect: May be a string, tuple or a callable + predicate. If a string, it will be compared to the name of the + executing database dialect:: + + DDL('something').execute_if(dialect='postgresql') + + If a tuple, specifies multiple dialect names:: + + DDL('something').execute_if(dialect=('postgresql', 'mysql')) + + :param callable_: A callable, which will be invoked with + four positional arguments as well as optional keyword + arguments: + + :ddl: + This DDL element. + + :target: + The :class:`.Table` or :class:`.MetaData` object which is the + target of this event. May be None if the DDL is executed + explicitly. + + :bind: + The :class:`.Connection` being used for DDL execution + + :tables: + Optional keyword argument - a list of Table objects which are to + be created/ dropped within a MetaData.create_all() or drop_all() + method call. + + :state: + Optional keyword argument - will be the ``state`` argument + passed to this function. + + :checkfirst: + Keyword argument, will be True if the 'checkfirst' flag was + set during the call to ``create()``, ``create_all()``, + ``drop()``, ``drop_all()``. + + If the callable returns a true value, the DDL statement will be + executed. + + :param state: any value which will be passed to the callable\_ + as the ``state`` keyword argument. + + .. seealso:: + + :class:`.DDLEvents` + + :ref:`event_toplevel` + + """ + self.dialect = dialect + self.callable_ = callable_ + self.state = state + + def _should_execute(self, target, bind, **kw): + if self.on is not None and \ + not self._should_execute_deprecated(None, target, bind, **kw): + return False + + if isinstance(self.dialect, util.string_types): + if self.dialect != bind.engine.name: + return False + elif isinstance(self.dialect, (tuple, list, set)): + if bind.engine.name not in self.dialect: + return False + if (self.callable_ is not None and + not self.callable_(self, target, bind, + state=self.state, **kw)): + return False + + return True + + def _should_execute_deprecated(self, event, target, bind, **kw): + if self.on is None: + return True + elif isinstance(self.on, util.string_types): + return self.on == bind.engine.name + elif isinstance(self.on, (tuple, list, set)): + return bind.engine.name in self.on + else: + return self.on(self, event, target, bind, **kw) + + def __call__(self, target, bind, **kw): + """Execute the DDL as a ddl_listener.""" + + if self._should_execute(target, bind, **kw): + return bind.execute(self.against(target)) + + def _check_ddl_on(self, on): + if (on is not None and + (not isinstance(on, util.string_types + (tuple, list, set)) and + not util.callable(on))): + raise exc.ArgumentError( + "Expected the name of a database dialect, a tuple " + "of names, or a callable for " + "'on' criteria, got type '%s'." % type(on).__name__) + + def bind(self): + if self._bind: + return self._bind + + def _set_bind(self, bind): + self._bind = bind + bind = property(bind, _set_bind) + + def _generate(self): + s = self.__class__.__new__(self.__class__) + s.__dict__ = self.__dict__.copy() + return s + + +class DDL(DDLElement): + """A literal DDL statement. + + Specifies literal SQL DDL to be executed by the database. DDL objects + function as DDL event listeners, and can be subscribed to those events + listed in :class:`.DDLEvents`, using either :class:`.Table` or + :class:`.MetaData` objects as targets. Basic templating support allows + a single DDL instance to handle repetitive tasks for multiple tables. + + Examples:: + + from sqlalchemy import event, DDL + + tbl = Table('users', metadata, Column('uid', Integer)) + event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger')) + + spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE') + event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb')) + + drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE') + connection.execute(drop_spow) + + When operating on Table events, the following ``statement`` + string substitions are available:: + + %(table)s - the Table name, with any required quoting applied + %(schema)s - the schema name, with any required quoting applied + %(fullname)s - the Table name including schema, quoted if needed + + The DDL's "context", if any, will be combined with the standard + substitutions noted above. Keys present in the context will override + the standard substitutions. + + """ + + __visit_name__ = "ddl" + + def __init__(self, statement, on=None, context=None, bind=None): + """Create a DDL statement. + + :param statement: + A string or unicode string to be executed. Statements will be + processed with Python's string formatting operator. See the + ``context`` argument and the ``execute_at`` method. + + A literal '%' in a statement must be escaped as '%%'. + + SQL bind parameters are not available in DDL statements. + + :param on: + .. deprecated:: 0.7 + See :meth:`.DDLElement.execute_if`. + + Optional filtering criteria. May be a string, tuple or a callable + predicate. If a string, it will be compared to the name of the + executing database dialect:: + + DDL('something', on='postgresql') + + If a tuple, specifies multiple dialect names:: + + DDL('something', on=('postgresql', 'mysql')) + + If a callable, it will be invoked with four positional arguments + as well as optional keyword arguments: + + :ddl: + This DDL element. + + :event: + The name of the event that has triggered this DDL, such as + 'after-create' Will be None if the DDL is executed explicitly. + + :target: + The ``Table`` or ``MetaData`` object which is the target of + this event. May be None if the DDL is executed explicitly. + + :connection: + The ``Connection`` being used for DDL execution + + :tables: + Optional keyword argument - a list of Table objects which are to + be created/ dropped within a MetaData.create_all() or drop_all() + method call. + + + If the callable returns a true value, the DDL statement will be + executed. + + :param context: + Optional dictionary, defaults to None. These values will be + available for use in string substitutions on the DDL statement. + + :param bind: + Optional. A :class:`.Connectable`, used by + default when ``execute()`` is invoked without a bind argument. + + + .. seealso:: + + :class:`.DDLEvents` + + :ref:`event_toplevel` + + """ + + if not isinstance(statement, util.string_types): + raise exc.ArgumentError( + "Expected a string or unicode SQL statement, got '%r'" % + statement) + + self.statement = statement + self.context = context or {} + + self._check_ddl_on(on) + self.on = on + self._bind = bind + + def __repr__(self): + return '<%s@%s; %s>' % ( + type(self).__name__, id(self), + ', '.join([repr(self.statement)] + + ['%s=%r' % (key, getattr(self, key)) + for key in ('on', 'context') + if getattr(self, key)])) + + +class _CreateDropBase(DDLElement): + """Base class for DDL constructs that represent CREATE and DROP or + equivalents. + + The common theme of _CreateDropBase is a single + ``element`` attribute which refers to the element + to be created or dropped. + + """ + + def __init__(self, element, on=None, bind=None): + self.element = element + self._check_ddl_on(on) + self.on = on + self.bind = bind + + def _create_rule_disable(self, compiler): + """Allow disable of _create_rule using a callable. + + Pass to _create_rule using + util.portable_instancemethod(self._create_rule_disable) + to retain serializability. + + """ + return False + + +class CreateSchema(_CreateDropBase): + """Represent a CREATE SCHEMA statement. + + .. versionadded:: 0.7.4 + + The argument here is the string name of the schema. + + """ + + __visit_name__ = "create_schema" + + def __init__(self, name, quote=None, **kw): + """Create a new :class:`.CreateSchema` construct.""" + + self.quote = quote + super(CreateSchema, self).__init__(name, **kw) + + +class DropSchema(_CreateDropBase): + """Represent a DROP SCHEMA statement. + + The argument here is the string name of the schema. + + .. versionadded:: 0.7.4 + + """ + + __visit_name__ = "drop_schema" + + def __init__(self, name, quote=None, cascade=False, **kw): + """Create a new :class:`.DropSchema` construct.""" + + self.quote = quote + self.cascade = cascade + super(DropSchema, self).__init__(name, **kw) + + +class CreateTable(_CreateDropBase): + """Represent a CREATE TABLE statement.""" + + __visit_name__ = "create_table" + + def __init__( + self, element, on=None, bind=None, + include_foreign_key_constraints=None): + """Create a :class:`.CreateTable` construct. + + :param element: a :class:`.Table` that's the subject + of the CREATE + :param on: See the description for 'on' in :class:`.DDL`. + :param bind: See the description for 'bind' in :class:`.DDL`. + :param include_foreign_key_constraints: optional sequence of + :class:`.ForeignKeyConstraint` objects that will be included + inline within the CREATE construct; if omitted, all foreign key + constraints that do not specify use_alter=True are included. + + .. versionadded:: 1.0.0 + + """ + super(CreateTable, self).__init__(element, on=on, bind=bind) + self.columns = [CreateColumn(column) + for column in element.columns + ] + self.include_foreign_key_constraints = include_foreign_key_constraints + + +class _DropView(_CreateDropBase): + """Semi-public 'DROP VIEW' construct. + + Used by the test suite for dialect-agnostic drops of views. + This object will eventually be part of a public "view" API. + + """ + __visit_name__ = "drop_view" + + +class CreateColumn(_DDLCompiles): + """Represent a :class:`.Column` as rendered in a CREATE TABLE statement, + via the :class:`.CreateTable` construct. + + This is provided to support custom column DDL within the generation + of CREATE TABLE statements, by using the + compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel` + to extend :class:`.CreateColumn`. + + Typical integration is to examine the incoming :class:`.Column` + object, and to redirect compilation if a particular flag or condition + is found:: + + from sqlalchemy import schema + from sqlalchemy.ext.compiler import compiles + + @compiles(schema.CreateColumn) + def compile(element, compiler, **kw): + column = element.element + + if "special" not in column.info: + return compiler.visit_create_column(element, **kw) + + text = "%s SPECIAL DIRECTIVE %s" % ( + column.name, + compiler.type_compiler.process(column.type) + ) + default = compiler.get_column_default_string(column) + if default is not None: + text += " DEFAULT " + default + + if not column.nullable: + text += " NOT NULL" + + if column.constraints: + text += " ".join( + compiler.process(const) + for const in column.constraints) + return text + + The above construct can be applied to a :class:`.Table` as follows:: + + from sqlalchemy import Table, Metadata, Column, Integer, String + from sqlalchemy import schema + + metadata = MetaData() + + table = Table('mytable', MetaData(), + Column('x', Integer, info={"special":True}, primary_key=True), + Column('y', String(50)), + Column('z', String(20), info={"special":True}) + ) + + metadata.create_all(conn) + + Above, the directives we've added to the :attr:`.Column.info` collection + will be detected by our custom compilation scheme:: + + CREATE TABLE mytable ( + x SPECIAL DIRECTIVE INTEGER NOT NULL, + y VARCHAR(50), + z SPECIAL DIRECTIVE VARCHAR(20), + PRIMARY KEY (x) + ) + + The :class:`.CreateColumn` construct can also be used to skip certain + columns when producing a ``CREATE TABLE``. This is accomplished by + creating a compilation rule that conditionally returns ``None``. + This is essentially how to produce the same effect as using the + ``system=True`` argument on :class:`.Column`, which marks a column + as an implicitly-present "system" column. + + For example, suppose we wish to produce a :class:`.Table` which skips + rendering of the PostgreSQL ``xmin`` column against the PostgreSQL + backend, but on other backends does render it, in anticipation of a + triggered rule. A conditional compilation rule could skip this name only + on PostgreSQL:: + + from sqlalchemy.schema import CreateColumn + + @compiles(CreateColumn, "postgresql") + def skip_xmin(element, compiler, **kw): + if element.element.name == 'xmin': + return None + else: + return compiler.visit_create_column(element, **kw) + + + my_table = Table('mytable', metadata, + Column('id', Integer, primary_key=True), + Column('xmin', Integer) + ) + + Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE`` + which only includes the ``id`` column in the string; the ``xmin`` column + will be omitted, but only against the PostgreSQL backend. + + .. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports + skipping of columns by returning ``None`` from a custom compilation + rule. + + .. versionadded:: 0.8 The :class:`.CreateColumn` construct was added + to support custom column creation styles. + + """ + __visit_name__ = 'create_column' + + def __init__(self, element): + self.element = element + + +class DropTable(_CreateDropBase): + """Represent a DROP TABLE statement.""" + + __visit_name__ = "drop_table" + + +class CreateSequence(_CreateDropBase): + """Represent a CREATE SEQUENCE statement.""" + + __visit_name__ = "create_sequence" + + +class DropSequence(_CreateDropBase): + """Represent a DROP SEQUENCE statement.""" + + __visit_name__ = "drop_sequence" + + +class CreateIndex(_CreateDropBase): + """Represent a CREATE INDEX statement.""" + + __visit_name__ = "create_index" + + +class DropIndex(_CreateDropBase): + """Represent a DROP INDEX statement.""" + + __visit_name__ = "drop_index" + + +class AddConstraint(_CreateDropBase): + """Represent an ALTER TABLE ADD CONSTRAINT statement.""" + + __visit_name__ = "add_constraint" + + def __init__(self, element, *args, **kw): + super(AddConstraint, self).__init__(element, *args, **kw) + element._create_rule = util.portable_instancemethod( + self._create_rule_disable) + + +class DropConstraint(_CreateDropBase): + """Represent an ALTER TABLE DROP CONSTRAINT statement.""" + + __visit_name__ = "drop_constraint" + + def __init__(self, element, cascade=False, **kw): + self.cascade = cascade + super(DropConstraint, self).__init__(element, **kw) + element._create_rule = util.portable_instancemethod( + self._create_rule_disable) + + +class DDLBase(SchemaVisitor): + def __init__(self, connection): + self.connection = connection + + +class SchemaGenerator(DDLBase): + + def __init__(self, dialect, connection, checkfirst=False, + tables=None, **kwargs): + super(SchemaGenerator, self).__init__(connection, **kwargs) + self.checkfirst = checkfirst + self.tables = tables + self.preparer = dialect.identifier_preparer + self.dialect = dialect + self.memo = {} + + def _can_create_table(self, table): + self.dialect.validate_identifier(table.name) + effective_schema = self.connection.schema_for_object(table) + if effective_schema: + self.dialect.validate_identifier(effective_schema) + return not self.checkfirst or \ + not self.dialect.has_table(self.connection, + table.name, schema=effective_schema) + + def _can_create_sequence(self, sequence): + effective_schema = self.connection.schema_for_object(sequence) + + return self.dialect.supports_sequences and \ + ( + (not self.dialect.sequences_optional or + not sequence.optional) and + ( + not self.checkfirst or + not self.dialect.has_sequence( + self.connection, + sequence.name, + schema=effective_schema) + ) + ) + + def visit_metadata(self, metadata): + if self.tables is not None: + tables = self.tables + else: + tables = list(metadata.tables.values()) + + collection = sort_tables_and_constraints( + [t for t in tables if self._can_create_table(t)]) + + seq_coll = [s for s in metadata._sequences.values() + if s.column is None and self._can_create_sequence(s)] + + event_collection = [ + t for (t, fks) in collection if t is not None + ] + metadata.dispatch.before_create(metadata, self.connection, + tables=event_collection, + checkfirst=self.checkfirst, + _ddl_runner=self) + + for seq in seq_coll: + self.traverse_single(seq, create_ok=True) + + for table, fkcs in collection: + if table is not None: + self.traverse_single( + table, create_ok=True, + include_foreign_key_constraints=fkcs, + _is_metadata_operation=True) + else: + for fkc in fkcs: + self.traverse_single(fkc) + + metadata.dispatch.after_create(metadata, self.connection, + tables=event_collection, + checkfirst=self.checkfirst, + _ddl_runner=self) + + def visit_table( + self, table, create_ok=False, + include_foreign_key_constraints=None, + _is_metadata_operation=False): + if not create_ok and not self._can_create_table(table): + return + + table.dispatch.before_create( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) + + for column in table.columns: + if column.default is not None: + self.traverse_single(column.default) + + if not self.dialect.supports_alter: + # e.g., don't omit any foreign key constraints + include_foreign_key_constraints = None + + self.connection.execute( + CreateTable( + table, + include_foreign_key_constraints=include_foreign_key_constraints + )) + + if hasattr(table, 'indexes'): + for index in table.indexes: + self.traverse_single(index) + + table.dispatch.after_create( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) + + def visit_foreign_key_constraint(self, constraint): + if not self.dialect.supports_alter: + return + self.connection.execute(AddConstraint(constraint)) + + def visit_sequence(self, sequence, create_ok=False): + if not create_ok and not self._can_create_sequence(sequence): + return + self.connection.execute(CreateSequence(sequence)) + + def visit_index(self, index): + self.connection.execute(CreateIndex(index)) + + +class SchemaDropper(DDLBase): + + def __init__(self, dialect, connection, checkfirst=False, + tables=None, **kwargs): + super(SchemaDropper, self).__init__(connection, **kwargs) + self.checkfirst = checkfirst + self.tables = tables + self.preparer = dialect.identifier_preparer + self.dialect = dialect + self.memo = {} + + def visit_metadata(self, metadata): + if self.tables is not None: + tables = self.tables + else: + tables = list(metadata.tables.values()) + + try: + unsorted_tables = [t for t in tables if self._can_drop_table(t)] + collection = list(reversed( + sort_tables_and_constraints( + unsorted_tables, + filter_fn=lambda constraint: False + if not self.dialect.supports_alter + or constraint.name is None + else None + ) + )) + except exc.CircularDependencyError as err2: + if not self.dialect.supports_alter: + util.warn( + "Can't sort tables for DROP; an " + "unresolvable foreign key " + "dependency exists between tables: %s, and backend does " + "not support ALTER. To restore at least a partial sort, " + "apply use_alter=True to ForeignKey and " + "ForeignKeyConstraint " + "objects involved in the cycle to mark these as known " + "cycles that will be ignored." + % ( + ", ".join(sorted([t.fullname for t in err2.cycles])) + ) + ) + collection = [(t, ()) for t in unsorted_tables] + else: + util.raise_from_cause( + exc.CircularDependencyError( + err2.args[0], + err2.cycles, err2.edges, + msg="Can't sort tables for DROP; an " + "unresolvable foreign key " + "dependency exists between tables: %s. Please ensure " + "that the ForeignKey and ForeignKeyConstraint objects " + "involved in the cycle have " + "names so that they can be dropped using " + "DROP CONSTRAINT." + % ( + ", ".join(sorted([t.fullname for t in err2.cycles])) + ) + + ) + ) + + seq_coll = [ + s + for s in metadata._sequences.values() + if s.column is None and self._can_drop_sequence(s) + ] + + event_collection = [ + t for (t, fks) in collection if t is not None + ] + + metadata.dispatch.before_drop( + metadata, self.connection, tables=event_collection, + checkfirst=self.checkfirst, _ddl_runner=self) + + for table, fkcs in collection: + if table is not None: + self.traverse_single( + table, drop_ok=True, _is_metadata_operation=True) + else: + for fkc in fkcs: + self.traverse_single(fkc) + + for seq in seq_coll: + self.traverse_single(seq, drop_ok=True) + + metadata.dispatch.after_drop( + metadata, self.connection, tables=event_collection, + checkfirst=self.checkfirst, _ddl_runner=self) + + def _can_drop_table(self, table): + self.dialect.validate_identifier(table.name) + effective_schema = self.connection.schema_for_object(table) + if effective_schema: + self.dialect.validate_identifier(effective_schema) + return not self.checkfirst or self.dialect.has_table( + self.connection, table.name, schema=effective_schema) + + def _can_drop_sequence(self, sequence): + effective_schema = self.connection.schema_for_object(sequence) + return self.dialect.supports_sequences and \ + ((not self.dialect.sequences_optional or + not sequence.optional) and + (not self.checkfirst or + self.dialect.has_sequence( + self.connection, + sequence.name, + schema=effective_schema)) + ) + + def visit_index(self, index): + self.connection.execute(DropIndex(index)) + + def visit_table(self, table, drop_ok=False, _is_metadata_operation=False): + if not drop_ok and not self._can_drop_table(table): + return + + table.dispatch.before_drop( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) + + for column in table.columns: + if column.default is not None: + self.traverse_single(column.default) + + self.connection.execute(DropTable(table)) + + table.dispatch.after_drop( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) + + def visit_foreign_key_constraint(self, constraint): + if not self.dialect.supports_alter: + return + self.connection.execute(DropConstraint(constraint)) + + def visit_sequence(self, sequence, drop_ok=False): + if not drop_ok and not self._can_drop_sequence(sequence): + return + self.connection.execute(DropSequence(sequence)) + + +def sort_tables(tables, skip_fn=None, extra_dependencies=None): + """sort a collection of :class:`.Table` objects based on dependency. + + This is a dependency-ordered sort which will emit :class:`.Table` + objects such that they will follow their dependent :class:`.Table` objects. + Tables are dependent on another based on the presence of + :class:`.ForeignKeyConstraint` objects as well as explicit dependencies + added by :meth:`.Table.add_is_dependent_on`. + + .. warning:: + + The :func:`.sort_tables` function cannot by itself accommodate + automatic resolution of dependency cycles between tables, which + are usually caused by mutually dependent foreign key constraints. + To resolve these cycles, either the + :paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled + to those constraints, or use the + :func:`.sql.sort_tables_and_constraints` function which will break + out foreign key constraints involved in cycles separately. + + :param tables: a sequence of :class:`.Table` objects. + + :param skip_fn: optional callable which will be passed a + :class:`.ForeignKey` object; if it returns True, this + constraint will not be considered as a dependency. Note this is + **different** from the same parameter in + :func:`.sort_tables_and_constraints`, which is + instead passed the owning :class:`.ForeignKeyConstraint` object. + + :param extra_dependencies: a sequence of 2-tuples of tables which will + also be considered as dependent on each other. + + .. seealso:: + + :func:`.sort_tables_and_constraints` + + :meth:`.MetaData.sorted_tables` - uses this function to sort + + + """ + + if skip_fn is not None: + def _skip_fn(fkc): + for fk in fkc.elements: + if skip_fn(fk): + return True + else: + return None + else: + _skip_fn = None + + return [ + t for (t, fkcs) in + sort_tables_and_constraints( + tables, filter_fn=_skip_fn, extra_dependencies=extra_dependencies) + if t is not None + ] + + +def sort_tables_and_constraints( + tables, filter_fn=None, extra_dependencies=None): + """sort a collection of :class:`.Table` / :class:`.ForeignKeyConstraint` + objects. + + This is a dependency-ordered sort which will emit tuples of + ``(Table, [ForeignKeyConstraint, ...])`` such that each + :class:`.Table` follows its dependent :class:`.Table` objects. + Remaining :class:`.ForeignKeyConstraint` objects that are separate due to + dependency rules not satisfied by the sort are emitted afterwards + as ``(None, [ForeignKeyConstraint ...])``. + + Tables are dependent on another based on the presence of + :class:`.ForeignKeyConstraint` objects, explicit dependencies + added by :meth:`.Table.add_is_dependent_on`, as well as dependencies + stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn` + and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies` + parameters. + + :param tables: a sequence of :class:`.Table` objects. + + :param filter_fn: optional callable which will be passed a + :class:`.ForeignKeyConstraint` object, and returns a value based on + whether this constraint should definitely be included or excluded as + an inline constraint, or neither. If it returns False, the constraint + will definitely be included as a dependency that cannot be subject + to ALTER; if True, it will **only** be included as an ALTER result at + the end. Returning None means the constraint is included in the + table-based result unless it is detected as part of a dependency cycle. + + :param extra_dependencies: a sequence of 2-tuples of tables which will + also be considered as dependent on each other. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :func:`.sort_tables` + + + """ + + fixed_dependencies = set() + mutable_dependencies = set() + + if extra_dependencies is not None: + fixed_dependencies.update(extra_dependencies) + + remaining_fkcs = set() + for table in tables: + for fkc in table.foreign_key_constraints: + if fkc.use_alter is True: + remaining_fkcs.add(fkc) + continue + + if filter_fn: + filtered = filter_fn(fkc) + + if filtered is True: + remaining_fkcs.add(fkc) + continue + + dependent_on = fkc.referred_table + if dependent_on is not table: + mutable_dependencies.add((dependent_on, table)) + + fixed_dependencies.update( + (parent, table) for parent in table._extra_dependencies + ) + + try: + candidate_sort = list( + topological.sort( + fixed_dependencies.union(mutable_dependencies), tables, + deterministic_order=True + ) + ) + except exc.CircularDependencyError as err: + for edge in err.edges: + if edge in mutable_dependencies: + table = edge[1] + can_remove = [ + fkc for fkc in table.foreign_key_constraints + if filter_fn is None or filter_fn(fkc) is not False] + remaining_fkcs.update(can_remove) + for fkc in can_remove: + dependent_on = fkc.referred_table + if dependent_on is not table: + mutable_dependencies.discard((dependent_on, table)) + candidate_sort = list( + topological.sort( + fixed_dependencies.union(mutable_dependencies), tables, + deterministic_order=True + ) + ) + + return [ + (table, table.foreign_key_constraints.difference(remaining_fkcs)) + for table in candidate_sort + ] + [(None, list(remaining_fkcs))] diff --git a/app/lib/sqlalchemy/sql/default_comparator.py b/app/lib/sqlalchemy/sql/default_comparator.py new file mode 100644 index 0000000..7498bbe --- /dev/null +++ b/app/lib/sqlalchemy/sql/default_comparator.py @@ -0,0 +1,308 @@ +# sql/default_comparator.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Default implementation of SQL comparison operations. +""" + +from .. import exc, util +from . import type_api +from . import operators +from .elements import BindParameter, True_, False_, BinaryExpression, \ + Null, _const_expr, _clause_element_as_expr, \ + ClauseList, ColumnElement, TextClause, UnaryExpression, \ + collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \ + Slice, Visitable, _literal_as_binds +from .selectable import SelectBase, Alias, Selectable, ScalarSelect + + +def _boolean_compare(expr, op, obj, negate=None, reverse=False, + _python_is_types=(util.NoneType, bool), + result_type = None, + **kwargs): + + if result_type is None: + result_type = type_api.BOOLEANTYPE + + if isinstance(obj, _python_is_types + (Null, True_, False_)): + + # allow x ==/!= True/False to be treated as a literal. + # this comes out to "== / != true/false" or "1/0" if those + # constants aren't supported and works on all platforms + if op in (operators.eq, operators.ne) and \ + isinstance(obj, (bool, True_, False_)): + return BinaryExpression(expr, + _literal_as_text(obj), + op, + type_=result_type, + negate=negate, modifiers=kwargs) + elif op in (operators.is_distinct_from, operators.isnot_distinct_from): + return BinaryExpression(expr, + _literal_as_text(obj), + op, + type_=result_type, + negate=negate, modifiers=kwargs) + else: + # all other None/True/False uses IS, IS NOT + if op in (operators.eq, operators.is_): + return BinaryExpression(expr, _const_expr(obj), + operators.is_, + negate=operators.isnot) + elif op in (operators.ne, operators.isnot): + return BinaryExpression(expr, _const_expr(obj), + operators.isnot, + negate=operators.is_) + else: + raise exc.ArgumentError( + "Only '=', '!=', 'is_()', 'isnot()', " + "'is_distinct_from()', 'isnot_distinct_from()' " + "operators can be used with None/True/False") + else: + obj = _check_literal(expr, op, obj) + + if reverse: + return BinaryExpression(obj, + expr, + op, + type_=result_type, + negate=negate, modifiers=kwargs) + else: + return BinaryExpression(expr, + obj, + op, + type_=result_type, + negate=negate, modifiers=kwargs) + + +def _binary_operate(expr, op, obj, reverse=False, result_type=None, + **kw): + obj = _check_literal(expr, op, obj) + + if reverse: + left, right = obj, expr + else: + left, right = expr, obj + + if result_type is None: + op, result_type = left.comparator._adapt_expression( + op, right.comparator) + + return BinaryExpression( + left, right, op, type_=result_type, modifiers=kw) + + +def _conjunction_operate(expr, op, other, **kw): + if op is operators.and_: + return and_(expr, other) + elif op is operators.or_: + return or_(expr, other) + else: + raise NotImplementedError() + + +def _scalar(expr, op, fn, **kw): + return fn(expr) + + +def _in_impl(expr, op, seq_or_selectable, negate_op, **kw): + seq_or_selectable = _clause_element_as_expr(seq_or_selectable) + + if isinstance(seq_or_selectable, ScalarSelect): + return _boolean_compare(expr, op, seq_or_selectable, + negate=negate_op) + elif isinstance(seq_or_selectable, SelectBase): + + # TODO: if we ever want to support (x, y, z) IN (select x, + # y, z from table), we would need a multi-column version of + # as_scalar() to produce a multi- column selectable that + # does not export itself as a FROM clause + + return _boolean_compare( + expr, op, seq_or_selectable.as_scalar(), + negate=negate_op, **kw) + elif isinstance(seq_or_selectable, (Selectable, TextClause)): + return _boolean_compare(expr, op, seq_or_selectable, + negate=negate_op, **kw) + elif isinstance(seq_or_selectable, ClauseElement): + raise exc.InvalidRequestError( + 'in_() accepts' + ' either a list of expressions ' + 'or a selectable: %r' % seq_or_selectable) + + # Handle non selectable arguments as sequences + args = [] + for o in seq_or_selectable: + if not _is_literal(o): + if not isinstance(o, operators.ColumnOperators): + raise exc.InvalidRequestError( + 'in_() accepts' + ' either a list of expressions ' + 'or a selectable: %r' % o) + elif o is None: + o = Null() + else: + o = expr._bind_param(op, o) + args.append(o) + if len(args) == 0: + + # Special case handling for empty IN's, behave like + # comparison against zero row selectable. We use != to + # build the contradiction as it handles NULL values + # appropriately, i.e. "not (x IN ())" should not return NULL + # values for x. + + util.warn('The IN-predicate on "%s" was invoked with an ' + 'empty sequence. This results in a ' + 'contradiction, which nonetheless can be ' + 'expensive to evaluate. Consider alternative ' + 'strategies for improved performance.' % expr) + if op is operators.in_op: + return expr != expr + else: + return expr == expr + + return _boolean_compare(expr, op, + ClauseList(*args).self_group(against=op), + negate=negate_op) + + +def _getitem_impl(expr, op, other, **kw): + if isinstance(expr.type, type_api.INDEXABLE): + other = _check_literal(expr, op, other) + return _binary_operate(expr, op, other, **kw) + else: + _unsupported_impl(expr, op, other, **kw) + + +def _unsupported_impl(expr, op, *arg, **kw): + raise NotImplementedError("Operator '%s' is not supported on " + "this expression" % op.__name__) + + +def _inv_impl(expr, op, **kw): + """See :meth:`.ColumnOperators.__inv__`.""" + if hasattr(expr, 'negation_clause'): + return expr.negation_clause + else: + return expr._negate() + + +def _neg_impl(expr, op, **kw): + """See :meth:`.ColumnOperators.__neg__`.""" + return UnaryExpression(expr, operator=operators.neg, type_=expr.type) + + +def _match_impl(expr, op, other, **kw): + """See :meth:`.ColumnOperators.match`.""" + + return _boolean_compare( + expr, operators.match_op, + _check_literal( + expr, operators.match_op, other), + result_type=type_api.MATCHTYPE, + negate=operators.notmatch_op + if op is operators.match_op else operators.match_op, + **kw + ) + + +def _distinct_impl(expr, op, **kw): + """See :meth:`.ColumnOperators.distinct`.""" + return UnaryExpression(expr, operator=operators.distinct_op, + type_=expr.type) + + +def _between_impl(expr, op, cleft, cright, **kw): + """See :meth:`.ColumnOperators.between`.""" + return BinaryExpression( + expr, + ClauseList( + _check_literal(expr, operators.and_, cleft), + _check_literal(expr, operators.and_, cright), + operator=operators.and_, + group=False, group_contents=False), + op, + negate=operators.notbetween_op + if op is operators.between_op + else operators.between_op, + modifiers=kw) + + +def _collate_impl(expr, op, other, **kw): + return collate(expr, other) + +# a mapping of operators with the method they use, along with +# their negated operator for comparison operators +operator_lookup = { + "and_": (_conjunction_operate,), + "or_": (_conjunction_operate,), + "inv": (_inv_impl,), + "add": (_binary_operate,), + "mul": (_binary_operate,), + "sub": (_binary_operate,), + "div": (_binary_operate,), + "mod": (_binary_operate,), + "truediv": (_binary_operate,), + "custom_op": (_binary_operate,), + "json_path_getitem_op": (_binary_operate, ), + "json_getitem_op": (_binary_operate, ), + "concat_op": (_binary_operate,), + "lt": (_boolean_compare, operators.ge), + "le": (_boolean_compare, operators.gt), + "ne": (_boolean_compare, operators.eq), + "gt": (_boolean_compare, operators.le), + "ge": (_boolean_compare, operators.lt), + "eq": (_boolean_compare, operators.ne), + "is_distinct_from": (_boolean_compare, operators.isnot_distinct_from), + "isnot_distinct_from": (_boolean_compare, operators.is_distinct_from), + "like_op": (_boolean_compare, operators.notlike_op), + "ilike_op": (_boolean_compare, operators.notilike_op), + "notlike_op": (_boolean_compare, operators.like_op), + "notilike_op": (_boolean_compare, operators.ilike_op), + "contains_op": (_boolean_compare, operators.notcontains_op), + "startswith_op": (_boolean_compare, operators.notstartswith_op), + "endswith_op": (_boolean_compare, operators.notendswith_op), + "desc_op": (_scalar, UnaryExpression._create_desc), + "asc_op": (_scalar, UnaryExpression._create_asc), + "nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst), + "nullslast_op": (_scalar, UnaryExpression._create_nullslast), + "in_op": (_in_impl, operators.notin_op), + "notin_op": (_in_impl, operators.in_op), + "is_": (_boolean_compare, operators.is_), + "isnot": (_boolean_compare, operators.isnot), + "collate": (_collate_impl,), + "match_op": (_match_impl,), + "notmatch_op": (_match_impl,), + "distinct_op": (_distinct_impl,), + "between_op": (_between_impl, ), + "notbetween_op": (_between_impl, ), + "neg": (_neg_impl,), + "getitem": (_getitem_impl,), + "lshift": (_unsupported_impl,), + "rshift": (_unsupported_impl,), + "contains": (_unsupported_impl,), +} + + +def _check_literal(expr, operator, other, bindparam_type=None): + if isinstance(other, (ColumnElement, TextClause)): + if isinstance(other, BindParameter) and \ + other.type._isnull: + other = other._clone() + other.type = expr.type + return other + elif hasattr(other, '__clause_element__'): + other = other.__clause_element__() + elif isinstance(other, type_api.TypeEngine.Comparator): + other = other.expr + + if isinstance(other, (SelectBase, Alias)): + return other.as_scalar() + elif not isinstance(other, Visitable): + return expr._bind_param(operator, other, type_=bindparam_type) + else: + return other + diff --git a/app/lib/sqlalchemy/sql/dml.py b/app/lib/sqlalchemy/sql/dml.py new file mode 100644 index 0000000..767e913 --- /dev/null +++ b/app/lib/sqlalchemy/sql/dml.py @@ -0,0 +1,851 @@ +# sql/dml.py +# Copyright (C) 2009-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" +Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`. + +""" + +from .base import Executable, _generative, _from_objects, DialectKWArgs, \ + ColumnCollection +from .elements import ClauseElement, _literal_as_text, Null, and_, _clone, \ + _column_as_key +from .selectable import _interpret_as_from, _interpret_as_select, \ + HasPrefixes, HasCTE +from .. import util +from .. import exc + + +class UpdateBase( + HasCTE, DialectKWArgs, HasPrefixes, Executable, ClauseElement): + """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements. + + """ + + __visit_name__ = 'update_base' + + _execution_options = \ + Executable._execution_options.union({'autocommit': True}) + _hints = util.immutabledict() + _parameter_ordering = None + _prefixes = () + named_with_column = False + + def _process_colparams(self, parameters): + def process_single(p): + if isinstance(p, (list, tuple)): + return dict( + (c.key, pval) + for c, pval in zip(self.table.c, p) + ) + else: + return p + + if self._preserve_parameter_order and parameters is not None: + if not isinstance(parameters, list) or \ + (parameters and not isinstance(parameters[0], tuple)): + raise ValueError( + "When preserve_parameter_order is True, " + "values() only accepts a list of 2-tuples") + self._parameter_ordering = [key for key, value in parameters] + + return dict(parameters), False + + if (isinstance(parameters, (list, tuple)) and parameters and + isinstance(parameters[0], (list, tuple, dict))): + + if not self._supports_multi_parameters: + raise exc.InvalidRequestError( + "This construct does not support " + "multiple parameter sets.") + + return [process_single(p) for p in parameters], True + else: + return process_single(parameters), False + + def params(self, *arg, **kw): + """Set the parameters for the statement. + + This method raises ``NotImplementedError`` on the base class, + and is overridden by :class:`.ValuesBase` to provide the + SET/VALUES clause of UPDATE and INSERT. + + """ + raise NotImplementedError( + "params() is not supported for INSERT/UPDATE/DELETE statements." + " To set the values for an INSERT or UPDATE statement, use" + " stmt.values(**parameters).") + + def bind(self): + """Return a 'bind' linked to this :class:`.UpdateBase` + or a :class:`.Table` associated with it. + + """ + return self._bind or self.table.bind + + def _set_bind(self, bind): + self._bind = bind + bind = property(bind, _set_bind) + + @_generative + def returning(self, *cols): + r"""Add a :term:`RETURNING` or equivalent clause to this statement. + + e.g.:: + + stmt = table.update().\ + where(table.c.data == 'value').\ + values(status='X').\ + returning(table.c.server_flag, + table.c.updated_timestamp) + + for server_flag, updated_timestamp in connection.execute(stmt): + print(server_flag, updated_timestamp) + + The given collection of column expressions should be derived from + the table that is + the target of the INSERT, UPDATE, or DELETE. While :class:`.Column` + objects are typical, the elements can also be expressions:: + + stmt = table.insert().returning( + (table.c.first_name + " " + table.c.last_name). + label('fullname')) + + Upon compilation, a RETURNING clause, or database equivalent, + will be rendered within the statement. For INSERT and UPDATE, + the values are the newly inserted/updated values. For DELETE, + the values are those of the rows which were deleted. + + Upon execution, the values of the columns to be returned are made + available via the result set and can be iterated using + :meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not + natively support returning values (i.e. cx_oracle), SQLAlchemy will + approximate this behavior at the result level so that a reasonable + amount of behavioral neutrality is provided. + + Note that not all databases/DBAPIs + support RETURNING. For those backends with no support, + an exception is raised upon compilation and/or execution. + For those who do support it, the functionality across backends + varies greatly, including restrictions on executemany() + and other statements which return multiple rows. Please + read the documentation notes for the database in use in + order to determine the availability of RETURNING. + + .. seealso:: + + :meth:`.ValuesBase.return_defaults` - an alternative method tailored + towards efficient fetching of server-side defaults and triggers + for single-row INSERTs or UPDATEs. + + + """ + self._returning = cols + + @_generative + def with_hint(self, text, selectable=None, dialect_name="*"): + """Add a table hint for a single table to this + INSERT/UPDATE/DELETE statement. + + .. note:: + + :meth:`.UpdateBase.with_hint` currently applies only to + Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use + :meth:`.UpdateBase.prefix_with`. + + The text of the hint is rendered in the appropriate + location for the database backend in use, relative + to the :class:`.Table` that is the subject of this + statement, or optionally to that of the given + :class:`.Table` passed as the ``selectable`` argument. + + The ``dialect_name`` option will limit the rendering of a particular + hint to a particular backend. Such as, to add a hint + that only takes effect for SQL Server:: + + mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql") + + .. versionadded:: 0.7.6 + + :param text: Text of the hint. + :param selectable: optional :class:`.Table` that specifies + an element of the FROM clause within an UPDATE or DELETE + to be the subject of the hint - applies only to certain backends. + :param dialect_name: defaults to ``*``, if specified as the name + of a particular dialect, will apply these hints only when + that dialect is in use. + """ + if selectable is None: + selectable = self.table + + self._hints = self._hints.union( + {(selectable, dialect_name): text}) + + +class ValuesBase(UpdateBase): + """Supplies support for :meth:`.ValuesBase.values` to + INSERT and UPDATE constructs.""" + + __visit_name__ = 'values_base' + + _supports_multi_parameters = False + _has_multi_parameters = False + _preserve_parameter_order = False + select = None + _post_values_clause = None + + def __init__(self, table, values, prefixes): + self.table = _interpret_as_from(table) + self.parameters, self._has_multi_parameters = \ + self._process_colparams(values) + if prefixes: + self._setup_prefixes(prefixes) + + @_generative + def values(self, *args, **kwargs): + r"""specify a fixed VALUES clause for an INSERT statement, or the SET + clause for an UPDATE. + + Note that the :class:`.Insert` and :class:`.Update` constructs support + per-execution time formatting of the VALUES and/or SET clauses, + based on the arguments passed to :meth:`.Connection.execute`. + However, the :meth:`.ValuesBase.values` method can be used to "fix" a + particular set of parameters into the statement. + + Multiple calls to :meth:`.ValuesBase.values` will produce a new + construct, each one with the parameter list modified to include + the new parameters sent. In the typical case of a single + dictionary of parameters, the newly passed keys will replace + the same keys in the previous construct. In the case of a list-based + "multiple values" construct, each new list of values is extended + onto the existing list of values. + + :param \**kwargs: key value pairs representing the string key + of a :class:`.Column` mapped to the value to be rendered into the + VALUES or SET clause:: + + users.insert().values(name="some name") + + users.update().where(users.c.id==5).values(name="some name") + + :param \*args: As an alternative to passing key/value parameters, + a dictionary, tuple, or list of dictionaries or tuples can be passed + as a single positional argument in order to form the VALUES or + SET clause of the statement. The forms that are accepted vary + based on whether this is an :class:`.Insert` or an :class:`.Update` + construct. + + For either an :class:`.Insert` or :class:`.Update` construct, a + single dictionary can be passed, which works the same as that of + the kwargs form:: + + users.insert().values({"name": "some name"}) + + users.update().values({"name": "some new name"}) + + Also for either form but more typically for the :class:`.Insert` + construct, a tuple that contains an entry for every column in the + table is also accepted:: + + users.insert().values((5, "some name")) + + The :class:`.Insert` construct also supports being passed a list + of dictionaries or full-table-tuples, which on the server will + render the less common SQL syntax of "multiple values" - this + syntax is supported on backends such as SQLite, PostgreSQL, MySQL, + but not necessarily others:: + + users.insert().values([ + {"name": "some name"}, + {"name": "some other name"}, + {"name": "yet another name"}, + ]) + + The above form would render a multiple VALUES statement similar to:: + + INSERT INTO users (name) VALUES + (:name_1), + (:name_2), + (:name_3) + + It is essential to note that **passing multiple values is + NOT the same as using traditional executemany() form**. The above + syntax is a **special** syntax not typically used. To emit an + INSERT statement against multiple rows, the normal method is + to pass a multiple values list to the :meth:`.Connection.execute` + method, which is supported by all database backends and is generally + more efficient for a very large number of parameters. + + .. seealso:: + + :ref:`execute_multiple` - an introduction to + the traditional Core method of multiple parameter set + invocation for INSERTs and other statements. + + .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES + clause, even a list of length one, + implies that the :paramref:`.Insert.inline` flag is set to + True, indicating that the statement will not attempt to fetch + the "last inserted primary key" or other defaults. The + statement deals with an arbitrary number of rows, so the + :attr:`.ResultProxy.inserted_primary_key` accessor does not + apply. + + .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports + columns with Python side default values and callables in the + same way as that of an "executemany" style of invocation; the + callable is invoked for each row. See :ref:`bug_3288` + for other details. + + The :class:`.Update` construct supports a special form which is a + list of 2-tuples, which when provided must be passed in conjunction + with the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + parameter. + This form causes the UPDATE statement to render the SET clauses + using the order of parameters given to :meth:`.Update.values`, rather + than the ordering of columns given in the :class:`.Table`. + + .. versionadded:: 1.0.10 - added support for parameter-ordered + UPDATE statements via the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag. + + .. seealso:: + + :ref:`updates_order_parameters` - full example of the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag + + .. seealso:: + + :ref:`inserts_and_updates` - SQL Expression + Language Tutorial + + :func:`~.expression.insert` - produce an ``INSERT`` statement + + :func:`~.expression.update` - produce an ``UPDATE`` statement + + """ + if self.select is not None: + raise exc.InvalidRequestError( + "This construct already inserts from a SELECT") + if self._has_multi_parameters and kwargs: + raise exc.InvalidRequestError( + "This construct already has multiple parameter sets.") + + if args: + if len(args) > 1: + raise exc.ArgumentError( + "Only a single dictionary/tuple or list of " + "dictionaries/tuples is accepted positionally.") + v = args[0] + else: + v = {} + + if self.parameters is None: + self.parameters, self._has_multi_parameters = \ + self._process_colparams(v) + else: + if self._has_multi_parameters: + self.parameters = list(self.parameters) + p, self._has_multi_parameters = self._process_colparams(v) + if not self._has_multi_parameters: + raise exc.ArgumentError( + "Can't mix single-values and multiple values " + "formats in one statement") + + self.parameters.extend(p) + else: + self.parameters = self.parameters.copy() + p, self._has_multi_parameters = self._process_colparams(v) + if self._has_multi_parameters: + raise exc.ArgumentError( + "Can't mix single-values and multiple values " + "formats in one statement") + self.parameters.update(p) + + if kwargs: + if self._has_multi_parameters: + raise exc.ArgumentError( + "Can't pass kwargs and multiple parameter sets " + "simultaneously") + else: + self.parameters.update(kwargs) + + @_generative + def return_defaults(self, *cols): + """Make use of a :term:`RETURNING` clause for the purpose + of fetching server-side expressions and defaults. + + E.g.:: + + stmt = table.insert().values(data='newdata').return_defaults() + + result = connection.execute(stmt) + + server_created_at = result.returned_defaults['created_at'] + + When used against a backend that supports RETURNING, all column + values generated by SQL expression or server-side-default will be + added to any existing RETURNING clause, provided that + :meth:`.UpdateBase.returning` is not used simultaneously. The column + values will then be available on the result using the + :attr:`.ResultProxy.returned_defaults` accessor as a dictionary, + referring to values keyed to the :class:`.Column` object as well as + its ``.key``. + + This method differs from :meth:`.UpdateBase.returning` in these ways: + + 1. :meth:`.ValuesBase.return_defaults` is only intended for use with + an INSERT or an UPDATE statement that matches exactly one row. + While the RETURNING construct in the general sense supports + multiple rows for a multi-row UPDATE or DELETE statement, or for + special cases of INSERT that return multiple rows (e.g. INSERT from + SELECT, multi-valued VALUES clause), + :meth:`.ValuesBase.return_defaults` is intended only for an + "ORM-style" single-row INSERT/UPDATE statement. The row returned + by the statement is also consumed implicitly when + :meth:`.ValuesBase.return_defaults` is used. By contrast, + :meth:`.UpdateBase.returning` leaves the RETURNING result-set + intact with a collection of any number of rows. + + 2. It is compatible with the existing logic to fetch auto-generated + primary key values, also known as "implicit returning". Backends + that support RETURNING will automatically make use of RETURNING in + order to fetch the value of newly generated primary keys; while the + :meth:`.UpdateBase.returning` method circumvents this behavior, + :meth:`.ValuesBase.return_defaults` leaves it intact. + + 3. It can be called against any backend. Backends that don't support + RETURNING will skip the usage of the feature, rather than raising + an exception. The return value of + :attr:`.ResultProxy.returned_defaults` will be ``None`` + + :meth:`.ValuesBase.return_defaults` is used by the ORM to provide + an efficient implementation for the ``eager_defaults`` feature of + :func:`.mapper`. + + :param cols: optional list of column key names or :class:`.Column` + objects. If omitted, all column expressions evaluated on the server + are added to the returning list. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :meth:`.UpdateBase.returning` + + :attr:`.ResultProxy.returned_defaults` + + """ + self._return_defaults = cols or True + + +class Insert(ValuesBase): + """Represent an INSERT construct. + + The :class:`.Insert` object is created using the + :func:`~.expression.insert()` function. + + .. seealso:: + + :ref:`coretutorial_insert_expressions` + + """ + __visit_name__ = 'insert' + + _supports_multi_parameters = True + + def __init__(self, + table, + values=None, + inline=False, + bind=None, + prefixes=None, + returning=None, + return_defaults=False, + **dialect_kw): + """Construct an :class:`.Insert` object. + + Similar functionality is available via the + :meth:`~.TableClause.insert` method on + :class:`~.schema.Table`. + + :param table: :class:`.TableClause` which is the subject of the + insert. + + :param values: collection of values to be inserted; see + :meth:`.Insert.values` for a description of allowed formats here. + Can be omitted entirely; a :class:`.Insert` construct will also + dynamically render the VALUES clause at execution time based on + the parameters passed to :meth:`.Connection.execute`. + + :param inline: if True, no attempt will be made to retrieve the + SQL-generated default values to be provided within the statement; + in particular, + this allows SQL expressions to be rendered 'inline' within the + statement without the need to pre-execute them beforehand; for + backends that support "returning", this turns off the "implicit + returning" feature for the statement. + + If both `values` and compile-time bind parameters are present, the + compile-time bind parameters override the information specified + within `values` on a per-key basis. + + The keys within `values` can be either + :class:`~sqlalchemy.schema.Column` objects or their string + identifiers. Each key may reference one of: + + * a literal data value (i.e. string, number, etc.); + * a Column object; + * a SELECT statement. + + If a ``SELECT`` statement is specified which references this + ``INSERT`` statement's table, the statement will be correlated + against the ``INSERT`` statement. + + .. seealso:: + + :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial + + :ref:`inserts_and_updates` - SQL Expression Tutorial + + """ + ValuesBase.__init__(self, table, values, prefixes) + self._bind = bind + self.select = self.select_names = None + self.include_insert_from_select_defaults = False + self.inline = inline + self._returning = returning + self._validate_dialect_kwargs(dialect_kw) + self._return_defaults = return_defaults + + def get_children(self, **kwargs): + if self.select is not None: + return self.select, + else: + return () + + @_generative + def from_select(self, names, select, include_defaults=True): + """Return a new :class:`.Insert` construct which represents + an ``INSERT...FROM SELECT`` statement. + + e.g.:: + + sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5) + ins = table2.insert().from_select(['a', 'b'], sel) + + :param names: a sequence of string column names or :class:`.Column` + objects representing the target columns. + :param select: a :func:`.select` construct, :class:`.FromClause` + or other construct which resolves into a :class:`.FromClause`, + such as an ORM :class:`.Query` object, etc. The order of + columns returned from this FROM clause should correspond to the + order of columns sent as the ``names`` parameter; while this + is not checked before passing along to the database, the database + would normally raise an exception if these column lists don't + correspond. + :param include_defaults: if True, non-server default values and + SQL expressions as specified on :class:`.Column` objects + (as documented in :ref:`metadata_defaults_toplevel`) not + otherwise specified in the list of names will be rendered + into the INSERT and SELECT statements, so that these values are also + included in the data to be inserted. + + .. note:: A Python-side default that uses a Python callable function + will only be invoked **once** for the whole statement, and **not + per row**. + + .. versionadded:: 1.0.0 - :meth:`.Insert.from_select` now renders + Python-side and SQL expression column defaults into the + SELECT statement for columns otherwise not included in the + list of column names. + + .. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT + implies that the :paramref:`.insert.inline` flag is set to + True, indicating that the statement will not attempt to fetch + the "last inserted primary key" or other defaults. The statement + deals with an arbitrary number of rows, so the + :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. + + .. versionadded:: 0.8.3 + + """ + if self.parameters: + raise exc.InvalidRequestError( + "This construct already inserts value expressions") + + self.parameters, self._has_multi_parameters = \ + self._process_colparams( + dict((_column_as_key(n), Null()) for n in names)) + + self.select_names = names + self.inline = True + self.include_insert_from_select_defaults = include_defaults + self.select = _interpret_as_select(select) + + def _copy_internals(self, clone=_clone, **kw): + # TODO: coverage + self.parameters = self.parameters.copy() + if self.select is not None: + self.select = _clone(self.select) + + +class Update(ValuesBase): + """Represent an Update construct. + + The :class:`.Update` object is created using the :func:`update()` + function. + + """ + __visit_name__ = 'update' + + def __init__(self, + table, + whereclause=None, + values=None, + inline=False, + bind=None, + prefixes=None, + returning=None, + return_defaults=False, + preserve_parameter_order=False, + **dialect_kw): + r"""Construct an :class:`.Update` object. + + E.g.:: + + from sqlalchemy import update + + stmt = update(users).where(users.c.id==5).\ + values(name='user #5') + + Similar functionality is available via the + :meth:`~.TableClause.update` method on + :class:`.Table`:: + + stmt = users.update().\ + where(users.c.id==5).\ + values(name='user #5') + + :param table: A :class:`.Table` object representing the database + table to be updated. + + :param whereclause: Optional SQL expression describing the ``WHERE`` + condition of the ``UPDATE`` statement. Modern applications + may prefer to use the generative :meth:`~Update.where()` + method to specify the ``WHERE`` clause. + + The WHERE clause can refer to multiple tables. + For databases which support this, an ``UPDATE FROM`` clause will + be generated, or on MySQL, a multi-table update. The statement + will fail on databases that don't have support for multi-table + update statements. A SQL-standard method of referring to + additional tables in the WHERE clause is to use a correlated + subquery:: + + users.update().values(name='ed').where( + users.c.name==select([addresses.c.email_address]).\ + where(addresses.c.user_id==users.c.id).\ + as_scalar() + ) + + .. versionchanged:: 0.7.4 + The WHERE clause can refer to multiple tables. + + :param values: + Optional dictionary which specifies the ``SET`` conditions of the + ``UPDATE``. If left as ``None``, the ``SET`` + conditions are determined from those parameters passed to the + statement during the execution and/or compilation of the + statement. When compiled standalone without any parameters, + the ``SET`` clause generates for all columns. + + Modern applications may prefer to use the generative + :meth:`.Update.values` method to set the values of the + UPDATE statement. + + :param inline: + if True, SQL defaults present on :class:`.Column` objects via + the ``default`` keyword will be compiled 'inline' into the statement + and not pre-executed. This means that their values will not + be available in the dictionary returned from + :meth:`.ResultProxy.last_updated_params`. + + :param preserve_parameter_order: if True, the update statement is + expected to receive parameters **only** via the :meth:`.Update.values` + method, and they must be passed as a Python ``list`` of 2-tuples. + The rendered UPDATE statement will emit the SET clause for each + referenced column maintaining this order. + + .. versionadded:: 1.0.10 + + .. seealso:: + + :ref:`updates_order_parameters` - full example of the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag + + If both ``values`` and compile-time bind parameters are present, the + compile-time bind parameters override the information specified + within ``values`` on a per-key basis. + + The keys within ``values`` can be either :class:`.Column` + objects or their string identifiers (specifically the "key" of the + :class:`.Column`, normally but not necessarily equivalent to + its "name"). Normally, the + :class:`.Column` objects used here are expected to be + part of the target :class:`.Table` that is the table + to be updated. However when using MySQL, a multiple-table + UPDATE statement can refer to columns from any of + the tables referred to in the WHERE clause. + + The values referred to in ``values`` are typically: + + * a literal data value (i.e. string, number, etc.) + * a SQL expression, such as a related :class:`.Column`, + a scalar-returning :func:`.select` construct, + etc. + + When combining :func:`.select` constructs within the values + clause of an :func:`.update` construct, + the subquery represented by the :func:`.select` should be + *correlated* to the parent table, that is, providing criterion + which links the table inside the subquery to the outer table + being updated:: + + users.update().values( + name=select([addresses.c.email_address]).\ + where(addresses.c.user_id==users.c.id).\ + as_scalar() + ) + + .. seealso:: + + :ref:`inserts_and_updates` - SQL Expression + Language Tutorial + + + """ + self._preserve_parameter_order = preserve_parameter_order + ValuesBase.__init__(self, table, values, prefixes) + self._bind = bind + self._returning = returning + if whereclause is not None: + self._whereclause = _literal_as_text(whereclause) + else: + self._whereclause = None + self.inline = inline + self._validate_dialect_kwargs(dialect_kw) + self._return_defaults = return_defaults + + def get_children(self, **kwargs): + if self._whereclause is not None: + return self._whereclause, + else: + return () + + def _copy_internals(self, clone=_clone, **kw): + # TODO: coverage + self._whereclause = clone(self._whereclause, **kw) + self.parameters = self.parameters.copy() + + @_generative + def where(self, whereclause): + """return a new update() construct with the given expression added to + its WHERE clause, joined to the existing clause via AND, if any. + + """ + if self._whereclause is not None: + self._whereclause = and_(self._whereclause, + _literal_as_text(whereclause)) + else: + self._whereclause = _literal_as_text(whereclause) + + @property + def _extra_froms(self): + # TODO: this could be made memoized + # if the memoization is reset on each generative call. + froms = [] + seen = set([self.table]) + + if self._whereclause is not None: + for item in _from_objects(self._whereclause): + if not seen.intersection(item._cloned_set): + froms.append(item) + seen.update(item._cloned_set) + + return froms + + +class Delete(UpdateBase): + """Represent a DELETE construct. + + The :class:`.Delete` object is created using the :func:`delete()` + function. + + """ + + __visit_name__ = 'delete' + + def __init__(self, + table, + whereclause=None, + bind=None, + returning=None, + prefixes=None, + **dialect_kw): + """Construct :class:`.Delete` object. + + Similar functionality is available via the + :meth:`~.TableClause.delete` method on + :class:`~.schema.Table`. + + :param table: The table to delete rows from. + + :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` + condition of the ``DELETE`` statement. Note that the + :meth:`~Delete.where()` generative method may be used instead. + + .. seealso:: + + :ref:`deletes` - SQL Expression Tutorial + + """ + self._bind = bind + self.table = _interpret_as_from(table) + self._returning = returning + + if prefixes: + self._setup_prefixes(prefixes) + + if whereclause is not None: + self._whereclause = _literal_as_text(whereclause) + else: + self._whereclause = None + + self._validate_dialect_kwargs(dialect_kw) + + def get_children(self, **kwargs): + if self._whereclause is not None: + return self._whereclause, + else: + return () + + @_generative + def where(self, whereclause): + """Add the given WHERE clause to a newly returned delete construct.""" + + if self._whereclause is not None: + self._whereclause = and_(self._whereclause, + _literal_as_text(whereclause)) + else: + self._whereclause = _literal_as_text(whereclause) + + def _copy_internals(self, clone=_clone, **kw): + # TODO: coverage + self._whereclause = clone(self._whereclause, **kw) diff --git a/app/lib/sqlalchemy/sql/elements.py b/app/lib/sqlalchemy/sql/elements.py new file mode 100644 index 0000000..a450efa --- /dev/null +++ b/app/lib/sqlalchemy/sql/elements.py @@ -0,0 +1,4403 @@ +# sql/elements.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Core SQL expression elements, including :class:`.ClauseElement`, +:class:`.ColumnElement`, and derived classes. + +""" + +from __future__ import unicode_literals + +from .. import util, exc, inspection +from . import type_api +from . import operators +from .visitors import Visitable, cloned_traverse, traverse +from .annotation import Annotated +import itertools +from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG +from .base import _generative +import numbers + +import re +import operator + + +def _clone(element, **kw): + return element._clone() + + +def collate(expression, collation): + """Return the clause ``expression COLLATE collation``. + + e.g.:: + + collate(mycolumn, 'utf8_bin') + + produces:: + + mycolumn COLLATE utf8_bin + + """ + + expr = _literal_as_binds(expression) + return BinaryExpression( + expr, + _literal_as_text(collation), + operators.collate, type_=expr.type) + + +def between(expr, lower_bound, upper_bound, symmetric=False): + """Produce a ``BETWEEN`` predicate clause. + + E.g.:: + + from sqlalchemy import between + stmt = select([users_table]).where(between(users_table.c.id, 5, 7)) + + Would produce SQL resembling:: + + SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2 + + The :func:`.between` function is a standalone version of the + :meth:`.ColumnElement.between` method available on all + SQL expressions, as in:: + + stmt = select([users_table]).where(users_table.c.id.between(5, 7)) + + All arguments passed to :func:`.between`, including the left side + column expression, are coerced from Python scalar values if a + the value is not a :class:`.ColumnElement` subclass. For example, + three fixed values can be compared as in:: + + print(between(5, 3, 7)) + + Which would produce:: + + :param_1 BETWEEN :param_2 AND :param_3 + + :param expr: a column expression, typically a :class:`.ColumnElement` + instance or alternatively a Python scalar expression to be coerced + into a column expression, serving as the left side of the ``BETWEEN`` + expression. + + :param lower_bound: a column or Python scalar expression serving as the + lower bound of the right side of the ``BETWEEN`` expression. + + :param upper_bound: a column or Python scalar expression serving as the + upper bound of the right side of the ``BETWEEN`` expression. + + :param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note + that not all databases support this syntax. + + .. versionadded:: 0.9.5 + + .. seealso:: + + :meth:`.ColumnElement.between` + + """ + expr = _literal_as_binds(expr) + return expr.between(lower_bound, upper_bound, symmetric=symmetric) + + +def literal(value, type_=None): + r"""Return a literal clause, bound to a bind parameter. + + Literal clauses are created automatically when non- + :class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are + used in a comparison operation with a :class:`.ColumnElement` subclass, + such as a :class:`~sqlalchemy.schema.Column` object. Use this function + to force the generation of a literal clause, which will be created as a + :class:`BindParameter` with a bound value. + + :param value: the value to be bound. Can be any Python object supported by + the underlying DB-API, or is translatable via the given type argument. + + :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which + will provide bind-parameter translation for this literal. + + """ + return BindParameter(None, value, type_=type_, unique=True) + + + + +def outparam(key, type_=None): + """Create an 'OUT' parameter for usage in functions (stored procedures), + for databases which support them. + + The ``outparam`` can be used like a regular function parameter. + The "output" value will be available from the + :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` + attribute, which returns a dictionary containing the values. + + """ + return BindParameter( + key, None, type_=type_, unique=False, isoutparam=True) + + +def not_(clause): + """Return a negation of the given clause, i.e. ``NOT(clause)``. + + The ``~`` operator is also overloaded on all + :class:`.ColumnElement` subclasses to produce the + same result. + + """ + return operators.inv(_literal_as_binds(clause)) + + +@inspection._self_inspects +class ClauseElement(Visitable): + """Base class for elements of a programmatically constructed SQL + expression. + + """ + __visit_name__ = 'clause' + + _annotations = {} + supports_execution = False + _from_objects = [] + bind = None + _is_clone_of = None + is_selectable = False + is_clause_element = True + + description = None + _order_by_label_element = None + _is_from_container = False + + def _clone(self): + """Create a shallow copy of this ClauseElement. + + This method may be used by a generative API. Its also used as + part of the "deep" copy afforded by a traversal that combines + the _copy_internals() method. + + """ + c = self.__class__.__new__(self.__class__) + c.__dict__ = self.__dict__.copy() + ClauseElement._cloned_set._reset(c) + ColumnElement.comparator._reset(c) + + # this is a marker that helps to "equate" clauses to each other + # when a Select returns its list of FROM clauses. the cloning + # process leaves around a lot of remnants of the previous clause + # typically in the form of column expressions still attached to the + # old table. + c._is_clone_of = self + + return c + + @property + def _constructor(self): + """return the 'constructor' for this ClauseElement. + + This is for the purposes for creating a new object of + this type. Usually, its just the element's __class__. + However, the "Annotated" version of the object overrides + to return the class of its proxied element. + + """ + return self.__class__ + + @util.memoized_property + def _cloned_set(self): + """Return the set consisting all cloned ancestors of this + ClauseElement. + + Includes this ClauseElement. This accessor tends to be used for + FromClause objects to identify 'equivalent' FROM clauses, regardless + of transformative operations. + + """ + s = util.column_set() + f = self + while f is not None: + s.add(f) + f = f._is_clone_of + return s + + def __getstate__(self): + d = self.__dict__.copy() + d.pop('_is_clone_of', None) + return d + + def _annotate(self, values): + """return a copy of this ClauseElement with annotations + updated by the given dictionary. + + """ + return Annotated(self, values) + + def _with_annotations(self, values): + """return a copy of this ClauseElement with annotations + replaced by the given dictionary. + + """ + return Annotated(self, values) + + def _deannotate(self, values=None, clone=False): + """return a copy of this :class:`.ClauseElement` with annotations + removed. + + :param values: optional tuple of individual values + to remove. + + """ + if clone: + # clone is used when we are also copying + # the expression for a deep deannotation + return self._clone() + else: + # if no clone, since we have no annotations we return + # self + return self + + def _execute_on_connection(self, connection, multiparams, params): + if self.supports_execution: + return connection._execute_clauseelement(self, multiparams, params) + else: + raise exc.ObjectNotExecutableError(self) + + def unique_params(self, *optionaldict, **kwargs): + """Return a copy with :func:`bindparam()` elements replaced. + + Same functionality as ``params()``, except adds `unique=True` + to affected bind parameters so that multiple statements can be + used. + + """ + return self._params(True, optionaldict, kwargs) + + def params(self, *optionaldict, **kwargs): + """Return a copy with :func:`bindparam()` elements replaced. + + Returns a copy of this ClauseElement with :func:`bindparam()` + elements replaced with values taken from the given dictionary:: + + >>> clause = column('x') + bindparam('foo') + >>> print clause.compile().params + {'foo':None} + >>> print clause.params({'foo':7}).compile().params + {'foo':7} + + """ + return self._params(False, optionaldict, kwargs) + + def _params(self, unique, optionaldict, kwargs): + if len(optionaldict) == 1: + kwargs.update(optionaldict[0]) + elif len(optionaldict) > 1: + raise exc.ArgumentError( + "params() takes zero or one positional dictionary argument") + + def visit_bindparam(bind): + if bind.key in kwargs: + bind.value = kwargs[bind.key] + bind.required = False + if unique: + bind._convert_to_unique() + return cloned_traverse(self, {}, {'bindparam': visit_bindparam}) + + def compare(self, other, **kw): + r"""Compare this ClauseElement to the given ClauseElement. + + Subclasses should override the default behavior, which is a + straight identity comparison. + + \**kw are arguments consumed by subclass compare() methods and + may be used to modify the criteria for comparison. + (see :class:`.ColumnElement`) + + """ + return self is other + + def _copy_internals(self, clone=_clone, **kw): + """Reassign internal elements to be clones of themselves. + + Called during a copy-and-traverse operation on newly + shallow-copied elements to create a deep copy. + + The given clone function should be used, which may be applying + additional transformations to the element (i.e. replacement + traversal, cloned traversal, annotations). + + """ + pass + + def get_children(self, **kwargs): + r"""Return immediate child elements of this :class:`.ClauseElement`. + + This is used for visit traversal. + + \**kwargs may contain flags that change the collection that is + returned, for example to return a subset of items in order to + cut down on larger traversals, or to return child items from a + different context (such as schema-level collections instead of + clause-level). + + """ + return [] + + def self_group(self, against=None): + """Apply a 'grouping' to this :class:`.ClauseElement`. + + This method is overridden by subclasses to return a + "grouping" construct, i.e. parenthesis. In particular + it's used by "binary" expressions to provide a grouping + around themselves when placed into a larger expression, + as well as by :func:`.select` constructs when placed into + the FROM clause of another :func:`.select`. (Note that + subqueries should be normally created using the + :meth:`.Select.alias` method, as many platforms require + nested SELECT statements to be named). + + As expressions are composed together, the application of + :meth:`self_group` is automatic - end-user code should never + need to use this method directly. Note that SQLAlchemy's + clause constructs take operator precedence into account - + so parenthesis might not be needed, for example, in + an expression like ``x OR (y AND z)`` - AND takes precedence + over OR. + + The base :meth:`self_group` method of :class:`.ClauseElement` + just returns self. + """ + return self + + @util.dependencies("sqlalchemy.engine.default") + def compile(self, default, bind=None, dialect=None, **kw): + """Compile this SQL expression. + + The return value is a :class:`~.Compiled` object. + Calling ``str()`` or ``unicode()`` on the returned value will yield a + string representation of the result. The + :class:`~.Compiled` object also can return a + dictionary of bind parameter names and values + using the ``params`` accessor. + + :param bind: An ``Engine`` or ``Connection`` from which a + ``Compiled`` will be acquired. This argument takes precedence over + this :class:`.ClauseElement`'s bound engine, if any. + + :param column_keys: Used for INSERT and UPDATE statements, a list of + column names which should be present in the VALUES clause of the + compiled statement. If ``None``, all columns from the target table + object are rendered. + + :param dialect: A ``Dialect`` instance from which a ``Compiled`` + will be acquired. This argument takes precedence over the `bind` + argument as well as this :class:`.ClauseElement`'s bound engine, + if any. + + :param inline: Used for INSERT statements, for a dialect which does + not support inline retrieval of newly generated primary key + columns, will force the expression used to create the new primary + key value to be rendered inline within the INSERT statement's + VALUES clause. This typically refers to Sequence execution but may + also refer to any server-side default generation function + associated with a primary key `Column`. + + :param compile_kwargs: optional dictionary of additional parameters + that will be passed through to the compiler within all "visit" + methods. This allows any custom flag to be passed through to + a custom compilation construct, for example. It is also used + for the case of passing the ``literal_binds`` flag through:: + + from sqlalchemy.sql import table, column, select + + t = table('t', column('x')) + + s = select([t]).where(t.c.x == 5) + + print s.compile(compile_kwargs={"literal_binds": True}) + + .. versionadded:: 0.9.0 + + .. seealso:: + + :ref:`faq_sql_expression_string` + + """ + + if not dialect: + if bind: + dialect = bind.dialect + elif self.bind: + dialect = self.bind.dialect + bind = self.bind + else: + dialect = default.StrCompileDialect() + return self._compiler(dialect, bind=bind, **kw) + + def _compiler(self, dialect, **kw): + """Return a compiler appropriate for this ClauseElement, given a + Dialect.""" + + return dialect.statement_compiler(dialect, self, **kw) + + def __str__(self): + if util.py3k: + return str(self.compile()) + else: + return unicode(self.compile()).encode('ascii', 'backslashreplace') + + def __and__(self, other): + """'and' at the ClauseElement level. + + .. deprecated:: 0.9.5 - conjunctions are intended to be + at the :class:`.ColumnElement`. level + + """ + return and_(self, other) + + def __or__(self, other): + """'or' at the ClauseElement level. + + .. deprecated:: 0.9.5 - conjunctions are intended to be + at the :class:`.ColumnElement`. level + + """ + return or_(self, other) + + def __invert__(self): + if hasattr(self, 'negation_clause'): + return self.negation_clause + else: + return self._negate() + + def _negate(self): + return UnaryExpression( + self.self_group(against=operators.inv), + operator=operators.inv, + negate=None) + + def __bool__(self): + raise TypeError("Boolean value of this clause is not defined") + + __nonzero__ = __bool__ + + def __repr__(self): + friendly = self.description + if friendly is None: + return object.__repr__(self) + else: + return '<%s.%s at 0x%x; %s>' % ( + self.__module__, self.__class__.__name__, id(self), friendly) + + +class ColumnElement(operators.ColumnOperators, ClauseElement): + """Represent a column-oriented SQL expression suitable for usage in the + "columns" clause, WHERE clause etc. of a statement. + + While the most familiar kind of :class:`.ColumnElement` is the + :class:`.Column` object, :class:`.ColumnElement` serves as the basis + for any unit that may be present in a SQL expression, including + the expressions themselves, SQL functions, bound parameters, + literal expressions, keywords such as ``NULL``, etc. + :class:`.ColumnElement` is the ultimate base class for all such elements. + + A wide variety of SQLAlchemy Core functions work at the SQL expression + level, and are intended to accept instances of :class:`.ColumnElement` as + arguments. These functions will typically document that they accept a + "SQL expression" as an argument. What this means in terms of SQLAlchemy + usually refers to an input which is either already in the form of a + :class:`.ColumnElement` object, or a value which can be **coerced** into + one. The coercion rules followed by most, but not all, SQLAlchemy Core + functions with regards to SQL expressions are as follows: + + * a literal Python value, such as a string, integer or floating + point value, boolean, datetime, ``Decimal`` object, or virtually + any other Python object, will be coerced into a "literal bound + value". This generally means that a :func:`.bindparam` will be + produced featuring the given value embedded into the construct; the + resulting :class:`.BindParameter` object is an instance of + :class:`.ColumnElement`. The Python value will ultimately be sent + to the DBAPI at execution time as a paramterized argument to the + ``execute()`` or ``executemany()`` methods, after SQLAlchemy + type-specific converters (e.g. those provided by any associated + :class:`.TypeEngine` objects) are applied to the value. + + * any special object value, typically ORM-level constructs, which + feature a method called ``__clause_element__()``. The Core + expression system looks for this method when an object of otherwise + unknown type is passed to a function that is looking to coerce the + argument into a :class:`.ColumnElement` expression. The + ``__clause_element__()`` method, if present, should return a + :class:`.ColumnElement` instance. The primary use of + ``__clause_element__()`` within SQLAlchemy is that of class-bound + attributes on ORM-mapped classes; a ``User`` class which contains a + mapped attribute named ``.name`` will have a method + ``User.name.__clause_element__()`` which when invoked returns the + :class:`.Column` called ``name`` associated with the mapped table. + + * The Python ``None`` value is typically interpreted as ``NULL``, + which in SQLAlchemy Core produces an instance of :func:`.null`. + + A :class:`.ColumnElement` provides the ability to generate new + :class:`.ColumnElement` + objects using Python expressions. This means that Python operators + such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations, + and allow the instantiation of further :class:`.ColumnElement` instances + which are composed from other, more fundamental :class:`.ColumnElement` + objects. For example, two :class:`.ColumnClause` objects can be added + together with the addition operator ``+`` to produce + a :class:`.BinaryExpression`. + Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses + of :class:`.ColumnElement`:: + + >>> from sqlalchemy.sql import column + >>> column('a') + column('b') + + >>> print column('a') + column('b') + a + b + + .. seealso:: + + :class:`.Column` + + :func:`.expression.column` + + """ + + __visit_name__ = 'column' + primary_key = False + foreign_keys = [] + + _label = None + """The named label that can be used to target + this column in a result set. + + This label is almost always the label used when + rendering AS AS "; typically columns that don't have + any parent table and are named the same as what the label would be + in any case. + + """ + + _resolve_label = None + """The name that should be used to identify this ColumnElement in a + select() object when "label resolution" logic is used; this refers + to using a string name in an expression like order_by() or group_by() + that wishes to target a labeled expression in the columns clause. + + The name is distinct from that of .name or ._label to account for the case + where anonymizing logic may be used to change the name that's actually + rendered at compile time; this attribute should hold onto the original + name that was user-assigned when producing a .label() construct. + + """ + + _allow_label_resolve = True + """A flag that can be flipped to prevent a column from being resolvable + by string label name.""" + + _alt_names = () + + def self_group(self, against=None): + if (against in (operators.and_, operators.or_, operators._asbool) and + self.type._type_affinity + is type_api.BOOLEANTYPE._type_affinity): + return AsBoolean(self, operators.istrue, operators.isfalse) + elif (against in (operators.any_op, operators.all_op)): + return Grouping(self) + else: + return self + + def _negate(self): + if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity: + # TODO: see the note in AsBoolean that it seems to assume + # the element is the True_() / False_() constant, so this + # is too broad + return AsBoolean(self, operators.isfalse, operators.istrue) + else: + return super(ColumnElement, self)._negate() + + @util.memoized_property + def type(self): + return type_api.NULLTYPE + + @util.memoized_property + def comparator(self): + try: + comparator_factory = self.type.comparator_factory + except AttributeError: + raise TypeError( + "Object %r associated with '.type' attribute " + "is not a TypeEngine class or object" % self.type) + else: + return comparator_factory(self) + + def __getattr__(self, key): + try: + return getattr(self.comparator, key) + except AttributeError: + raise AttributeError( + 'Neither %r object nor %r object has an attribute %r' % ( + type(self).__name__, + type(self.comparator).__name__, + key) + ) + + def operate(self, op, *other, **kwargs): + return op(self.comparator, *other, **kwargs) + + def reverse_operate(self, op, other, **kwargs): + return op(other, self.comparator, **kwargs) + + def _bind_param(self, operator, obj, type_=None): + return BindParameter(None, obj, + _compared_to_operator=operator, + type_=type_, + _compared_to_type=self.type, unique=True) + + @property + def expression(self): + """Return a column expression. + + Part of the inspection interface; returns self. + + """ + return self + + @property + def _select_iterable(self): + return (self, ) + + @util.memoized_property + def base_columns(self): + return util.column_set(c for c in self.proxy_set + if not hasattr(c, '_proxies')) + + @util.memoized_property + def proxy_set(self): + s = util.column_set([self]) + if hasattr(self, '_proxies'): + for c in self._proxies: + s.update(c.proxy_set) + return s + + def shares_lineage(self, othercolumn): + """Return True if the given :class:`.ColumnElement` + has a common ancestor to this :class:`.ColumnElement`.""" + + return bool(self.proxy_set.intersection(othercolumn.proxy_set)) + + def _compare_name_for_result(self, other): + """Return True if the given column element compares to this one + when targeting within a result row.""" + + return hasattr(other, 'name') and hasattr(self, 'name') and \ + other.name == self.name + + def _make_proxy( + self, selectable, name=None, name_is_truncatable=False, **kw): + """Create a new :class:`.ColumnElement` representing this + :class:`.ColumnElement` as it appears in the select list of a + descending selectable. + + """ + if name is None: + name = self.anon_label + if self.key: + key = self.key + else: + try: + key = str(self) + except exc.UnsupportedCompilationError: + key = self.anon_label + + else: + key = name + co = ColumnClause( + _as_truncated(name) if name_is_truncatable else name, + type_=getattr(self, 'type', None), + _selectable=selectable + ) + co._proxies = [self] + if selectable._is_clone_of is not None: + co._is_clone_of = \ + selectable._is_clone_of.columns.get(key) + selectable._columns[key] = co + return co + + def compare(self, other, use_proxies=False, equivalents=None, **kw): + """Compare this ColumnElement to another. + + Special arguments understood: + + :param use_proxies: when True, consider two columns that + share a common base column as equivalent (i.e. shares_lineage()) + + :param equivalents: a dictionary of columns as keys mapped to sets + of columns. If the given "other" column is present in this + dictionary, if any of the columns in the corresponding set() pass + the comparison test, the result is True. This is used to expand the + comparison to other columns that may be known to be equivalent to + this one via foreign key or other criterion. + + """ + to_compare = (other, ) + if equivalents and other in equivalents: + to_compare = equivalents[other].union(to_compare) + + for oth in to_compare: + if use_proxies and self.shares_lineage(oth): + return True + elif hash(oth) == hash(self): + return True + else: + return False + + def cast(self, type_): + """Produce a type cast, i.e. ``CAST( AS )``. + + This is a shortcut to the :func:`~.expression.cast` function. + + .. versionadded:: 1.0.7 + + """ + return Cast(self, type_) + + def label(self, name): + """Produce a column label, i.e. `` AS ``. + + This is a shortcut to the :func:`~.expression.label` function. + + if 'name' is None, an anonymous label name will be generated. + + """ + return Label(name, self, self.type) + + @util.memoized_property + def anon_label(self): + """provides a constant 'anonymous label' for this ColumnElement. + + This is a label() expression which will be named at compile time. + The same label() is returned each time anon_label is called so + that expressions can reference anon_label multiple times, producing + the same label name at compile time. + + the compiler uses this function automatically at compile time + for expressions that are known to be 'unnamed' like binary + expressions and function calls. + + """ + while self._is_clone_of is not None: + self = self._is_clone_of + + return _anonymous_label( + '%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon')) + ) + + +class BindParameter(ColumnElement): + r"""Represent a "bound expression". + + :class:`.BindParameter` is invoked explicitly using the + :func:`.bindparam` function, as in:: + + from sqlalchemy import bindparam + + stmt = select([users_table]).\ + where(users_table.c.name == bindparam('username')) + + Detailed discussion of how :class:`.BindParameter` is used is + at :func:`.bindparam`. + + .. seealso:: + + :func:`.bindparam` + + """ + + __visit_name__ = 'bindparam' + + _is_crud = False + + def __init__(self, key, value=NO_ARG, type_=None, + unique=False, required=NO_ARG, + quote=None, callable_=None, + isoutparam=False, + _compared_to_operator=None, + _compared_to_type=None): + r"""Produce a "bound expression". + + The return value is an instance of :class:`.BindParameter`; this + is a :class:`.ColumnElement` subclass which represents a so-called + "placeholder" value in a SQL expression, the value of which is + supplied at the point at which the statement in executed against a + database connection. + + In SQLAlchemy, the :func:`.bindparam` construct has + the ability to carry along the actual value that will be ultimately + used at expression time. In this way, it serves not just as + a "placeholder" for eventual population, but also as a means of + representing so-called "unsafe" values which should not be rendered + directly in a SQL statement, but rather should be passed along + to the :term:`DBAPI` as values which need to be correctly escaped + and potentially handled for type-safety. + + When using :func:`.bindparam` explicitly, the use case is typically + one of traditional deferment of parameters; the :func:`.bindparam` + construct accepts a name which can then be referred to at execution + time:: + + from sqlalchemy import bindparam + + stmt = select([users_table]).\ + where(users_table.c.name == bindparam('username')) + + The above statement, when rendered, will produce SQL similar to:: + + SELECT id, name FROM user WHERE name = :username + + In order to populate the value of ``:username`` above, the value + would typically be applied at execution time to a method + like :meth:`.Connection.execute`:: + + result = connection.execute(stmt, username='wendy') + + Explicit use of :func:`.bindparam` is also common when producing + UPDATE or DELETE statements that are to be invoked multiple times, + where the WHERE criterion of the statement is to change on each + invocation, such as:: + + stmt = (users_table.update(). + where(user_table.c.name == bindparam('username')). + values(fullname=bindparam('fullname')) + ) + + connection.execute( + stmt, [{"username": "wendy", "fullname": "Wendy Smith"}, + {"username": "jack", "fullname": "Jack Jones"}, + ] + ) + + SQLAlchemy's Core expression system makes wide use of + :func:`.bindparam` in an implicit sense. It is typical that Python + literal values passed to virtually all SQL expression functions are + coerced into fixed :func:`.bindparam` constructs. For example, given + a comparison operation such as:: + + expr = users_table.c.name == 'Wendy' + + The above expression will produce a :class:`.BinaryExpression` + construct, where the left side is the :class:`.Column` object + representing the ``name`` column, and the right side is a + :class:`.BindParameter` representing the literal value:: + + print(repr(expr.right)) + BindParameter('%(4327771088 name)s', 'Wendy', type_=String()) + + The expression above will render SQL such as:: + + user.name = :name_1 + + Where the ``:name_1`` parameter name is an anonymous name. The + actual string ``Wendy`` is not in the rendered string, but is carried + along where it is later used within statement execution. If we + invoke a statement like the following:: + + stmt = select([users_table]).where(users_table.c.name == 'Wendy') + result = connection.execute(stmt) + + We would see SQL logging output as:: + + SELECT "user".id, "user".name + FROM "user" + WHERE "user".name = %(name_1)s + {'name_1': 'Wendy'} + + Above, we see that ``Wendy`` is passed as a parameter to the database, + while the placeholder ``:name_1`` is rendered in the appropriate form + for the target database, in this case the PostgreSQL database. + + Similarly, :func:`.bindparam` is invoked automatically + when working with :term:`CRUD` statements as far as the "VALUES" + portion is concerned. The :func:`.insert` construct produces an + ``INSERT`` expression which will, at statement execution time, + generate bound placeholders based on the arguments passed, as in:: + + stmt = users_table.insert() + result = connection.execute(stmt, name='Wendy') + + The above will produce SQL output as:: + + INSERT INTO "user" (name) VALUES (%(name)s) + {'name': 'Wendy'} + + The :class:`.Insert` construct, at compilation/execution time, + rendered a single :func:`.bindparam` mirroring the column + name ``name`` as a result of the single ``name`` parameter + we passed to the :meth:`.Connection.execute` method. + + :param key: + the key (e.g. the name) for this bind param. + Will be used in the generated + SQL statement for dialects that use named parameters. This + value may be modified when part of a compilation operation, + if other :class:`BindParameter` objects exist with the same + key, or if its length is too long and truncation is + required. + + :param value: + Initial value for this bind param. Will be used at statement + execution time as the value for this parameter passed to the + DBAPI, if no other value is indicated to the statement execution + method for this particular parameter name. Defaults to ``None``. + + :param callable\_: + A callable function that takes the place of "value". The function + will be called at statement execution time to determine the + ultimate value. Used for scenarios where the actual bind + value cannot be determined at the point at which the clause + construct is created, but embedded bind values are still desirable. + + :param type\_: + A :class:`.TypeEngine` class or instance representing an optional + datatype for this :func:`.bindparam`. If not passed, a type + may be determined automatically for the bind, based on the given + value; for example, trivial Python types such as ``str``, + ``int``, ``bool`` + may result in the :class:`.String`, :class:`.Integer` or + :class:`.Boolean` types being autoamtically selected. + + The type of a :func:`.bindparam` is significant especially in that + the type will apply pre-processing to the value before it is + passed to the database. For example, a :func:`.bindparam` which + refers to a datetime value, and is specified as holding the + :class:`.DateTime` type, may apply conversion needed to the + value (such as stringification on SQLite) before passing the value + to the database. + + :param unique: + if True, the key name of this :class:`.BindParameter` will be + modified if another :class:`.BindParameter` of the same name + already has been located within the containing + expression. This flag is used generally by the internals + when producing so-called "anonymous" bound expressions, it + isn't generally applicable to explicitly-named :func:`.bindparam` + constructs. + + :param required: + If ``True``, a value is required at execution time. If not passed, + it defaults to ``True`` if neither :paramref:`.bindparam.value` + or :paramref:`.bindparam.callable` were passed. If either of these + parameters are present, then :paramref:`.bindparam.required` + defaults to ``False``. + + .. versionchanged:: 0.8 If the ``required`` flag is not specified, + it will be set automatically to ``True`` or ``False`` depending + on whether or not the ``value`` or ``callable`` parameters + were specified. + + :param quote: + True if this parameter name requires quoting and is not + currently known as a SQLAlchemy reserved word; this currently + only applies to the Oracle backend, where bound names must + sometimes be quoted. + + :param isoutparam: + if True, the parameter should be treated like a stored procedure + "OUT" parameter. This applies to backends such as Oracle which + support OUT parameters. + + .. seealso:: + + :ref:`coretutorial_bind_param` + + :ref:`coretutorial_insert_expressions` + + :func:`.outparam` + + """ + if isinstance(key, ColumnClause): + type_ = key.type + key = key.key + if required is NO_ARG: + required = (value is NO_ARG and callable_ is None) + if value is NO_ARG: + value = None + + if quote is not None: + key = quoted_name(key, quote) + + if unique: + self.key = _anonymous_label('%%(%d %s)s' % (id(self), key + or 'param')) + else: + self.key = key or _anonymous_label('%%(%d param)s' + % id(self)) + + # identifying key that won't change across + # clones, used to identify the bind's logical + # identity + self._identifying_key = self.key + + # key that was passed in the first place, used to + # generate new keys + self._orig_key = key or 'param' + + self.unique = unique + self.value = value + self.callable = callable_ + self.isoutparam = isoutparam + self.required = required + if type_ is None: + if _compared_to_type is not None: + self.type = \ + _compared_to_type.coerce_compared_value( + _compared_to_operator, value) + else: + self.type = type_api._resolve_value_to_type(value) + elif isinstance(type_, type): + self.type = type_() + else: + self.type = type_ + + def _with_value(self, value): + """Return a copy of this :class:`.BindParameter` with the given value + set. + """ + cloned = self._clone() + cloned.value = value + cloned.callable = None + cloned.required = False + if cloned.type is type_api.NULLTYPE: + cloned.type = type_api._resolve_value_to_type(value) + return cloned + + @property + def effective_value(self): + """Return the value of this bound parameter, + taking into account if the ``callable`` parameter + was set. + + The ``callable`` value will be evaluated + and returned if present, else ``value``. + + """ + if self.callable: + return self.callable() + else: + return self.value + + def _clone(self): + c = ClauseElement._clone(self) + if self.unique: + c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key + or 'param')) + return c + + def _convert_to_unique(self): + if not self.unique: + self.unique = True + self.key = _anonymous_label( + '%%(%d %s)s' % (id(self), self._orig_key or 'param')) + + def compare(self, other, **kw): + """Compare this :class:`BindParameter` to the given + clause.""" + + return isinstance(other, BindParameter) \ + and self.type._compare_type_affinity(other.type) \ + and self.value == other.value \ + and self.callable == other.callable + + def __getstate__(self): + """execute a deferred value for serialization purposes.""" + + d = self.__dict__.copy() + v = self.value + if self.callable: + v = self.callable() + d['callable'] = None + d['value'] = v + return d + + def __repr__(self): + return 'BindParameter(%r, %r, type_=%r)' % (self.key, + self.value, self.type) + + +class TypeClause(ClauseElement): + """Handle a type keyword in a SQL statement. + + Used by the ``Case`` statement. + + """ + + __visit_name__ = 'typeclause' + + def __init__(self, type): + self.type = type + + +class TextClause(Executable, ClauseElement): + """Represent a literal SQL text fragment. + + E.g.:: + + from sqlalchemy import text + + t = text("SELECT * FROM users") + result = connection.execute(t) + + + The :class:`.Text` construct is produced using the :func:`.text` + function; see that function for full documentation. + + .. seealso:: + + :func:`.text` + + """ + + __visit_name__ = 'textclause' + + _bind_params_regex = re.compile(r'(?`` + to specify bind parameters; they will be compiled to their + engine-specific format. + + :param autocommit: + Deprecated. Use .execution_options(autocommit=) + to set the autocommit option. + + :param bind: + an optional connection or engine to be used for this text query. + + :param bindparams: + Deprecated. A list of :func:`.bindparam` instances used to + provide information about parameters embedded in the statement. + This argument now invokes the :meth:`.TextClause.bindparams` + method on the construct before returning it. E.g.:: + + stmt = text("SELECT * FROM table WHERE id=:id", + bindparams=[bindparam('id', value=5, type_=Integer)]) + + Is equivalent to:: + + stmt = text("SELECT * FROM table WHERE id=:id").\ + bindparams(bindparam('id', value=5, type_=Integer)) + + .. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method + supersedes the ``bindparams`` argument to :func:`.text`. + + :param typemap: + Deprecated. A dictionary mapping the names of columns + represented in the columns clause of a ``SELECT`` statement + to type objects, + which will be used to perform post-processing on columns within + the result set. This parameter now invokes the + :meth:`.TextClause.columns` method, which returns a + :class:`.TextAsFrom` construct that gains a ``.c`` collection and + can be embedded in other expressions. E.g.:: + + stmt = text("SELECT * FROM table", + typemap={'id': Integer, 'name': String}, + ) + + Is equivalent to:: + + stmt = text("SELECT * FROM table").columns(id=Integer, + name=String) + + Or alternatively:: + + from sqlalchemy.sql import column + stmt = text("SELECT * FROM table").columns( + column('id', Integer), + column('name', String) + ) + + .. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method + supersedes the ``typemap`` argument to :func:`.text`. + + .. seealso:: + + :ref:`sqlexpression_text` - in the Core tutorial + + :ref:`orm_tutorial_literal_sql` - in the ORM tutorial + + """ + stmt = TextClause(text, bind=bind) + if bindparams: + stmt = stmt.bindparams(*bindparams) + if typemap: + stmt = stmt.columns(**typemap) + if autocommit is not None: + util.warn_deprecated('autocommit on text() is deprecated. ' + 'Use .execution_options(autocommit=True)') + stmt = stmt.execution_options(autocommit=autocommit) + + return stmt + + @_generative + def bindparams(self, *binds, **names_to_values): + """Establish the values and/or types of bound parameters within + this :class:`.TextClause` construct. + + Given a text construct such as:: + + from sqlalchemy import text + stmt = text("SELECT id, name FROM user WHERE name=:name " + "AND timestamp=:timestamp") + + the :meth:`.TextClause.bindparams` method can be used to establish + the initial value of ``:name`` and ``:timestamp``, + using simple keyword arguments:: + + stmt = stmt.bindparams(name='jack', + timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)) + + Where above, new :class:`.BindParameter` objects + will be generated with the names ``name`` and ``timestamp``, and + values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``, + respectively. The types will be + inferred from the values given, in this case :class:`.String` and + :class:`.DateTime`. + + When specific typing behavior is needed, the positional ``*binds`` + argument can be used in which to specify :func:`.bindparam` constructs + directly. These constructs must include at least the ``key`` + argument, then an optional value and type:: + + from sqlalchemy import bindparam + stmt = stmt.bindparams( + bindparam('name', value='jack', type_=String), + bindparam('timestamp', type_=DateTime) + ) + + Above, we specified the type of :class:`.DateTime` for the + ``timestamp`` bind, and the type of :class:`.String` for the ``name`` + bind. In the case of ``name`` we also set the default value of + ``"jack"``. + + Additional bound parameters can be supplied at statement execution + time, e.g.:: + + result = connection.execute(stmt, + timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)) + + The :meth:`.TextClause.bindparams` method can be called repeatedly, + where it will re-use existing :class:`.BindParameter` objects to add + new information. For example, we can call + :meth:`.TextClause.bindparams` first with typing information, and a + second time with value information, and it will be combined:: + + stmt = text("SELECT id, name FROM user WHERE name=:name " + "AND timestamp=:timestamp") + stmt = stmt.bindparams( + bindparam('name', type_=String), + bindparam('timestamp', type_=DateTime) + ) + stmt = stmt.bindparams( + name='jack', + timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5) + ) + + + .. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method + supersedes the argument ``bindparams`` passed to + :func:`~.expression.text`. + + + """ + self._bindparams = new_params = self._bindparams.copy() + + for bind in binds: + try: + existing = new_params[bind.key] + except KeyError: + raise exc.ArgumentError( + "This text() construct doesn't define a " + "bound parameter named %r" % bind.key) + else: + new_params[existing.key] = bind + + for key, value in names_to_values.items(): + try: + existing = new_params[key] + except KeyError: + raise exc.ArgumentError( + "This text() construct doesn't define a " + "bound parameter named %r" % key) + else: + new_params[key] = existing._with_value(value) + + @util.dependencies('sqlalchemy.sql.selectable') + def columns(self, selectable, *cols, **types): + """Turn this :class:`.TextClause` object into a :class:`.TextAsFrom` + object that can be embedded into another statement. + + This function essentially bridges the gap between an entirely + textual SELECT statement and the SQL expression language concept + of a "selectable":: + + from sqlalchemy.sql import column, text + + stmt = text("SELECT id, name FROM some_table") + stmt = stmt.columns(column('id'), column('name')).alias('st') + + stmt = select([mytable]).\ + select_from( + mytable.join(stmt, mytable.c.name == stmt.c.name) + ).where(stmt.c.id > 5) + + Above, we pass a series of :func:`.column` elements to the + :meth:`.TextClause.columns` method positionally. These :func:`.column` + elements now become first class elements upon the :attr:`.TextAsFrom.c` + column collection, just like any other selectable. + + The column expressions we pass to :meth:`.TextClause.columns` may + also be typed; when we do so, these :class:`.TypeEngine` objects become + the effective return type of the column, so that SQLAlchemy's + result-set-processing systems may be used on the return values. + This is often needed for types such as date or boolean types, as well + as for unicode processing on some dialect configurations:: + + stmt = text("SELECT id, name, timestamp FROM some_table") + stmt = stmt.columns( + column('id', Integer), + column('name', Unicode), + column('timestamp', DateTime) + ) + + for id, name, timestamp in connection.execute(stmt): + print(id, name, timestamp) + + As a shortcut to the above syntax, keyword arguments referring to + types alone may be used, if only type conversion is needed:: + + stmt = text("SELECT id, name, timestamp FROM some_table") + stmt = stmt.columns( + id=Integer, + name=Unicode, + timestamp=DateTime + ) + + for id, name, timestamp in connection.execute(stmt): + print(id, name, timestamp) + + The positional form of :meth:`.TextClause.columns` also provides + the unique feature of **positional column targeting**, which is + particularly useful when using the ORM with complex textual queries. + If we specify the columns from our model to :meth:`.TextClause.columns`, + the result set will match to those columns positionally, meaning the + name or origin of the column in the textual SQL doesn't matter:: + + stmt = text("SELECT users.id, addresses.id, users.id, " + "users.name, addresses.email_address AS email " + "FROM users JOIN addresses ON users.id=addresses.user_id " + "WHERE users.id = 1").columns( + User.id, + Address.id, + Address.user_id, + User.name, + Address.email_address + ) + + query = session.query(User).from_statement(stmt).options( + contains_eager(User.addresses)) + + .. versionadded:: 1.1 the :meth:`.TextClause.columns` method now + offers positional column targeting in the result set when + the column expressions are passed purely positionally. + + The :meth:`.TextClause.columns` method provides a direct + route to calling :meth:`.FromClause.alias` as well as + :meth:`.SelectBase.cte` against a textual SELECT statement:: + + stmt = stmt.columns(id=Integer, name=String).cte('st') + + stmt = select([sometable]).where(sometable.c.id == stmt.c.id) + + .. versionadded:: 0.9.0 :func:`.text` can now be converted into a + fully featured "selectable" construct using the + :meth:`.TextClause.columns` method. This method supersedes the + ``typemap`` argument to :func:`.text`. + + + """ + + positional_input_cols = [ + ColumnClause(col.key, types.pop(col.key)) + if col.key in types + else col + for col in cols + ] + keyed_input_cols = [ + ColumnClause(key, type_) for key, type_ in types.items()] + + return selectable.TextAsFrom( + self, + positional_input_cols + keyed_input_cols, + positional=bool(positional_input_cols) and not keyed_input_cols) + + @property + def type(self): + return type_api.NULLTYPE + + @property + def comparator(self): + return self.type.comparator_factory(self) + + def self_group(self, against=None): + if against is operators.in_op: + return Grouping(self) + else: + return self + + def _copy_internals(self, clone=_clone, **kw): + self._bindparams = dict((b.key, clone(b, **kw)) + for b in self._bindparams.values()) + + def get_children(self, **kwargs): + return list(self._bindparams.values()) + + def compare(self, other): + return isinstance(other, TextClause) and other.text == self.text + + +class Null(ColumnElement): + """Represent the NULL keyword in a SQL statement. + + :class:`.Null` is accessed as a constant via the + :func:`.null` function. + + """ + + __visit_name__ = 'null' + + @util.memoized_property + def type(self): + return type_api.NULLTYPE + + @classmethod + def _instance(cls): + """Return a constant :class:`.Null` construct.""" + + return Null() + + def compare(self, other): + return isinstance(other, Null) + + +class False_(ColumnElement): + """Represent the ``false`` keyword, or equivalent, in a SQL statement. + + :class:`.False_` is accessed as a constant via the + :func:`.false` function. + + """ + + __visit_name__ = 'false' + + @util.memoized_property + def type(self): + return type_api.BOOLEANTYPE + + def _negate(self): + return True_() + + @classmethod + def _instance(cls): + """Return a :class:`.False_` construct. + + E.g.:: + + >>> from sqlalchemy import false + >>> print select([t.c.x]).where(false()) + SELECT x FROM t WHERE false + + A backend which does not support true/false constants will render as + an expression against 1 or 0:: + + >>> print select([t.c.x]).where(false()) + SELECT x FROM t WHERE 0 = 1 + + The :func:`.true` and :func:`.false` constants also feature + "short circuit" operation within an :func:`.and_` or :func:`.or_` + conjunction:: + + >>> print select([t.c.x]).where(or_(t.c.x > 5, true())) + SELECT x FROM t WHERE true + + >>> print select([t.c.x]).where(and_(t.c.x > 5, false())) + SELECT x FROM t WHERE false + + .. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature + better integrated behavior within conjunctions and on dialects + that don't support true/false constants. + + .. seealso:: + + :func:`.true` + + """ + + return False_() + + def compare(self, other): + return isinstance(other, False_) + + +class True_(ColumnElement): + """Represent the ``true`` keyword, or equivalent, in a SQL statement. + + :class:`.True_` is accessed as a constant via the + :func:`.true` function. + + """ + + __visit_name__ = 'true' + + @util.memoized_property + def type(self): + return type_api.BOOLEANTYPE + + def _negate(self): + return False_() + + @classmethod + def _ifnone(cls, other): + if other is None: + return cls._instance() + else: + return other + + @classmethod + def _instance(cls): + """Return a constant :class:`.True_` construct. + + E.g.:: + + >>> from sqlalchemy import true + >>> print select([t.c.x]).where(true()) + SELECT x FROM t WHERE true + + A backend which does not support true/false constants will render as + an expression against 1 or 0:: + + >>> print select([t.c.x]).where(true()) + SELECT x FROM t WHERE 1 = 1 + + The :func:`.true` and :func:`.false` constants also feature + "short circuit" operation within an :func:`.and_` or :func:`.or_` + conjunction:: + + >>> print select([t.c.x]).where(or_(t.c.x > 5, true())) + SELECT x FROM t WHERE true + + >>> print select([t.c.x]).where(and_(t.c.x > 5, false())) + SELECT x FROM t WHERE false + + .. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature + better integrated behavior within conjunctions and on dialects + that don't support true/false constants. + + .. seealso:: + + :func:`.false` + + """ + + return True_() + + def compare(self, other): + return isinstance(other, True_) + + +class ClauseList(ClauseElement): + """Describe a list of clauses, separated by an operator. + + By default, is comma-separated, such as a column listing. + + """ + __visit_name__ = 'clauselist' + + def __init__(self, *clauses, **kwargs): + self.operator = kwargs.pop('operator', operators.comma_op) + self.group = kwargs.pop('group', True) + self.group_contents = kwargs.pop('group_contents', True) + text_converter = kwargs.pop( + '_literal_as_text', + _expression_literal_as_text) + if self.group_contents: + self.clauses = [ + text_converter(clause).self_group(against=self.operator) + for clause in clauses] + else: + self.clauses = [ + text_converter(clause) + for clause in clauses] + + def __iter__(self): + return iter(self.clauses) + + def __len__(self): + return len(self.clauses) + + @property + def _select_iterable(self): + return iter(self) + + def append(self, clause): + if self.group_contents: + self.clauses.append(_literal_as_text(clause). + self_group(against=self.operator)) + else: + self.clauses.append(_literal_as_text(clause)) + + def _copy_internals(self, clone=_clone, **kw): + self.clauses = [clone(clause, **kw) for clause in self.clauses] + + def get_children(self, **kwargs): + return self.clauses + + @property + def _from_objects(self): + return list(itertools.chain(*[c._from_objects for c in self.clauses])) + + def self_group(self, against=None): + if self.group and operators.is_precedent(self.operator, against): + return Grouping(self) + else: + return self + + def compare(self, other, **kw): + """Compare this :class:`.ClauseList` to the given :class:`.ClauseList`, + including a comparison of all the clause items. + + """ + if not isinstance(other, ClauseList) and len(self.clauses) == 1: + return self.clauses[0].compare(other, **kw) + elif isinstance(other, ClauseList) and \ + len(self.clauses) == len(other.clauses) and \ + self.operator is other.operator: + + if self.operator in (operators.and_, operators.or_): + completed = set() + for clause in self.clauses: + for other_clause in set(other.clauses).difference(completed): + if clause.compare(other_clause, **kw): + completed.add(other_clause) + break + return len(completed) == len(other.clauses) + else: + for i in range(0, len(self.clauses)): + if not self.clauses[i].compare(other.clauses[i], **kw): + return False + else: + return True + else: + return False + + +class BooleanClauseList(ClauseList, ColumnElement): + __visit_name__ = 'clauselist' + + def __init__(self, *arg, **kw): + raise NotImplementedError( + "BooleanClauseList has a private constructor") + + @classmethod + def _construct(cls, operator, continue_on, skip_on, *clauses, **kw): + convert_clauses = [] + + clauses = [ + _expression_literal_as_text(clause) + for clause in + util.coerce_generator_arg(clauses) + ] + for clause in clauses: + + if isinstance(clause, continue_on): + continue + elif isinstance(clause, skip_on): + return clause.self_group(against=operators._asbool) + + convert_clauses.append(clause) + + if len(convert_clauses) == 1: + return convert_clauses[0].self_group(against=operators._asbool) + elif not convert_clauses and clauses: + return clauses[0].self_group(against=operators._asbool) + + convert_clauses = [c.self_group(against=operator) + for c in convert_clauses] + + self = cls.__new__(cls) + self.clauses = convert_clauses + self.group = True + self.operator = operator + self.group_contents = True + self.type = type_api.BOOLEANTYPE + return self + + @classmethod + def and_(cls, *clauses): + """Produce a conjunction of expressions joined by ``AND``. + + E.g.:: + + from sqlalchemy import and_ + + stmt = select([users_table]).where( + and_( + users_table.c.name == 'wendy', + users_table.c.enrolled == True + ) + ) + + The :func:`.and_` conjunction is also available using the + Python ``&`` operator (though note that compound expressions + need to be parenthesized in order to function with Python + operator precedence behavior):: + + stmt = select([users_table]).where( + (users_table.c.name == 'wendy') & + (users_table.c.enrolled == True) + ) + + The :func:`.and_` operation is also implicit in some cases; + the :meth:`.Select.where` method for example can be invoked multiple + times against a statement, which will have the effect of each + clause being combined using :func:`.and_`:: + + stmt = select([users_table]).\ + where(users_table.c.name == 'wendy').\ + where(users_table.c.enrolled == True) + + .. seealso:: + + :func:`.or_` + + """ + return cls._construct(operators.and_, True_, False_, *clauses) + + @classmethod + def or_(cls, *clauses): + """Produce a conjunction of expressions joined by ``OR``. + + E.g.:: + + from sqlalchemy import or_ + + stmt = select([users_table]).where( + or_( + users_table.c.name == 'wendy', + users_table.c.name == 'jack' + ) + ) + + The :func:`.or_` conjunction is also available using the + Python ``|`` operator (though note that compound expressions + need to be parenthesized in order to function with Python + operator precedence behavior):: + + stmt = select([users_table]).where( + (users_table.c.name == 'wendy') | + (users_table.c.name == 'jack') + ) + + .. seealso:: + + :func:`.and_` + + """ + return cls._construct(operators.or_, False_, True_, *clauses) + + @property + def _select_iterable(self): + return (self, ) + + def self_group(self, against=None): + if not self.clauses: + return self + else: + return super(BooleanClauseList, self).self_group(against=against) + + def _negate(self): + return ClauseList._negate(self) + + +and_ = BooleanClauseList.and_ +or_ = BooleanClauseList.or_ + + +class Tuple(ClauseList, ColumnElement): + """Represent a SQL tuple.""" + + def __init__(self, *clauses, **kw): + """Return a :class:`.Tuple`. + + Main usage is to produce a composite IN construct:: + + from sqlalchemy import tuple_ + + tuple_(table.c.col1, table.c.col2).in_( + [(1, 2), (5, 12), (10, 19)] + ) + + .. warning:: + + The composite IN construct is not supported by all backends, + and is currently known to work on PostgreSQL and MySQL, + but not SQLite. Unsupported backends will raise + a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such + an expression is invoked. + + """ + + clauses = [_literal_as_binds(c) for c in clauses] + self._type_tuple = [arg.type for arg in clauses] + self.type = kw.pop('type_', self._type_tuple[0] + if self._type_tuple else type_api.NULLTYPE) + + super(Tuple, self).__init__(*clauses, **kw) + + @property + def _select_iterable(self): + return (self, ) + + def _bind_param(self, operator, obj, type_=None): + return Tuple(*[ + BindParameter(None, o, _compared_to_operator=operator, + _compared_to_type=compared_to_type, unique=True, + type_=type_) + for o, compared_to_type in zip(obj, self._type_tuple) + ]).self_group() + + +class Case(ColumnElement): + """Represent a ``CASE`` expression. + + :class:`.Case` is produced using the :func:`.case` factory function, + as in:: + + from sqlalchemy import case + + stmt = select([users_table]).\ + where( + case( + [ + (users_table.c.name == 'wendy', 'W'), + (users_table.c.name == 'jack', 'J') + ], + else_='E' + ) + ) + + Details on :class:`.Case` usage is at :func:`.case`. + + .. seealso:: + + :func:`.case` + + """ + + __visit_name__ = 'case' + + def __init__(self, whens, value=None, else_=None): + r"""Produce a ``CASE`` expression. + + The ``CASE`` construct in SQL is a conditional object that + acts somewhat analogously to an "if/then" construct in other + languages. It returns an instance of :class:`.Case`. + + :func:`.case` in its usual form is passed a list of "when" + constructs, that is, a list of conditions and results as tuples:: + + from sqlalchemy import case + + stmt = select([users_table]).\ + where( + case( + [ + (users_table.c.name == 'wendy', 'W'), + (users_table.c.name == 'jack', 'J') + ], + else_='E' + ) + ) + + The above statement will produce SQL resembling:: + + SELECT id, name FROM user + WHERE CASE + WHEN (name = :name_1) THEN :param_1 + WHEN (name = :name_2) THEN :param_2 + ELSE :param_3 + END + + When simple equality expressions of several values against a single + parent column are needed, :func:`.case` also has a "shorthand" format + used via the + :paramref:`.case.value` parameter, which is passed a column + expression to be compared. In this form, the :paramref:`.case.whens` + parameter is passed as a dictionary containing expressions to be + compared against keyed to result expressions. The statement below is + equivalent to the preceding statement:: + + stmt = select([users_table]).\ + where( + case( + {"wendy": "W", "jack": "J"}, + value=users_table.c.name, + else_='E' + ) + ) + + The values which are accepted as result values in + :paramref:`.case.whens` as well as with :paramref:`.case.else_` are + coerced from Python literals into :func:`.bindparam` constructs. + SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted + as well. To coerce a literal string expression into a constant + expression rendered inline, use the :func:`.literal_column` construct, + as in:: + + from sqlalchemy import case, literal_column + + case( + [ + ( + orderline.c.qty > 100, + literal_column("'greaterthan100'") + ), + ( + orderline.c.qty > 10, + literal_column("'greaterthan10'") + ) + ], + else_=literal_column("'lessthan10'") + ) + + The above will render the given constants without using bound + parameters for the result values (but still for the comparison + values), as in:: + + CASE + WHEN (orderline.qty > :qty_1) THEN 'greaterthan100' + WHEN (orderline.qty > :qty_2) THEN 'greaterthan10' + ELSE 'lessthan10' + END + + :param whens: The criteria to be compared against, + :paramref:`.case.whens` accepts two different forms, based on + whether or not :paramref:`.case.value` is used. + + In the first form, it accepts a list of 2-tuples; each 2-tuple + consists of ``(, )``, where the SQL + expression is a boolean expression and "value" is a resulting value, + e.g.:: + + case([ + (users_table.c.name == 'wendy', 'W'), + (users_table.c.name == 'jack', 'J') + ]) + + In the second form, it accepts a Python dictionary of comparison + values mapped to a resulting value; this form requires + :paramref:`.case.value` to be present, and values will be compared + using the ``==`` operator, e.g.:: + + case( + {"wendy": "W", "jack": "J"}, + value=users_table.c.name + ) + + :param value: An optional SQL expression which will be used as a + fixed "comparison point" for candidate values within a dictionary + passed to :paramref:`.case.whens`. + + :param else\_: An optional SQL expression which will be the evaluated + result of the ``CASE`` construct if all expressions within + :paramref:`.case.whens` evaluate to false. When omitted, most + databases will produce a result of NULL if none of the "when" + expressions evaluate to true. + + + """ + + try: + whens = util.dictlike_iteritems(whens) + except TypeError: + pass + + if value is not None: + whenlist = [ + (_literal_as_binds(c).self_group(), + _literal_as_binds(r)) for (c, r) in whens + ] + else: + whenlist = [ + (_no_literals(c).self_group(), + _literal_as_binds(r)) for (c, r) in whens + ] + + if whenlist: + type_ = list(whenlist[-1])[-1].type + else: + type_ = None + + if value is None: + self.value = None + else: + self.value = _literal_as_binds(value) + + self.type = type_ + self.whens = whenlist + if else_ is not None: + self.else_ = _literal_as_binds(else_) + else: + self.else_ = None + + def _copy_internals(self, clone=_clone, **kw): + if self.value is not None: + self.value = clone(self.value, **kw) + self.whens = [(clone(x, **kw), clone(y, **kw)) + for x, y in self.whens] + if self.else_ is not None: + self.else_ = clone(self.else_, **kw) + + def get_children(self, **kwargs): + if self.value is not None: + yield self.value + for x, y in self.whens: + yield x + yield y + if self.else_ is not None: + yield self.else_ + + @property + def _from_objects(self): + return list(itertools.chain(*[x._from_objects for x in + self.get_children()])) + + +def literal_column(text, type_=None): + r"""Produce a :class:`.ColumnClause` object that has the + :paramref:`.column.is_literal` flag set to True. + + :func:`.literal_column` is similar to :func:`.column`, except that + it is more often used as a "standalone" column expression that renders + exactly as stated; while :func:`.column` stores a string name that + will be assumed to be part of a table and may be quoted as such, + :func:`.literal_column` can be that, or any other arbitrary column-oriented + expression. + + :param text: the text of the expression; can be any SQL expression. + Quoting rules will not be applied. To specify a column-name expression + which should be subject to quoting rules, use the :func:`column` + function. + + :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` + object which will + provide result-set translation and additional expression semantics for + this column. If left as None the type will be NullType. + + .. seealso:: + + :func:`.column` + + :func:`.text` + + :ref:`sqlexpression_literal_column` + + """ + return ColumnClause(text, type_=type_, is_literal=True) + + +class Cast(ColumnElement): + """Represent a ``CAST`` expression. + + :class:`.Cast` is produced using the :func:`.cast` factory function, + as in:: + + from sqlalchemy import cast, Numeric + + stmt = select([ + cast(product_table.c.unit_price, Numeric(10, 4)) + ]) + + Details on :class:`.Cast` usage is at :func:`.cast`. + + .. seealso:: + + :func:`.cast` + + """ + + __visit_name__ = 'cast' + + def __init__(self, expression, type_): + """Produce a ``CAST`` expression. + + :func:`.cast` returns an instance of :class:`.Cast`. + + E.g.:: + + from sqlalchemy import cast, Numeric + + stmt = select([ + cast(product_table.c.unit_price, Numeric(10, 4)) + ]) + + The above statement will produce SQL resembling:: + + SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product + + The :func:`.cast` function performs two distinct functions when + used. The first is that it renders the ``CAST`` expression within + the resulting SQL string. The second is that it associates the given + type (e.g. :class:`.TypeEngine` class or instance) with the column + expression on the Python side, which means the expression will take + on the expression operator behavior associated with that type, + as well as the bound-value handling and result-row-handling behavior + of the type. + + .. versionchanged:: 0.9.0 :func:`.cast` now applies the given type + to the expression such that it takes effect on the bound-value, + e.g. the Python-to-database direction, in addition to the + result handling, e.g. database-to-Python, direction. + + An alternative to :func:`.cast` is the :func:`.type_coerce` function. + This function performs the second task of associating an expression + with a specific type, but does not render the ``CAST`` expression + in SQL. + + :param expression: A SQL expression, such as a :class:`.ColumnElement` + expression or a Python string which will be coerced into a bound + literal value. + + :param type_: A :class:`.TypeEngine` class or instance indicating + the type to which the ``CAST`` should apply. + + .. seealso:: + + :func:`.type_coerce` - Python-side type coercion without emitting + CAST. + + """ + self.type = type_api.to_instance(type_) + self.clause = _literal_as_binds(expression, type_=self.type) + self.typeclause = TypeClause(self.type) + + def _copy_internals(self, clone=_clone, **kw): + self.clause = clone(self.clause, **kw) + self.typeclause = clone(self.typeclause, **kw) + + def get_children(self, **kwargs): + return self.clause, self.typeclause + + @property + def _from_objects(self): + return self.clause._from_objects + + +class TypeCoerce(ColumnElement): + """Represent a Python-side type-coercion wrapper. + + :class:`.TypeCoerce` supplies the :func:`.expression.type_coerce` + function; see that function for usage details. + + .. versionchanged:: 1.1 The :func:`.type_coerce` function now produces + a persistent :class:`.TypeCoerce` wrapper object rather than + translating the given object in place. + + .. seealso:: + + :func:`.expression.type_coerce` + + """ + + __visit_name__ = 'type_coerce' + + def __init__(self, expression, type_): + """Associate a SQL expression with a particular type, without rendering + ``CAST``. + + E.g.:: + + from sqlalchemy import type_coerce + + stmt = select([ + type_coerce(log_table.date_string, StringDateTime()) + ]) + + The above construct will produce a :class:`.TypeCoerce` object, which + renders SQL that labels the expression, but otherwise does not + modify its value on the SQL side:: + + SELECT date_string AS anon_1 FROM log + + When result rows are fetched, the ``StringDateTime`` type + will be applied to result rows on behalf of the ``date_string`` column. + The rationale for the "anon_1" label is so that the type-coerced + column remains separate in the list of result columns vs. other + type-coerced or direct values of the target column. In order to + provide a named label for the expression, use + :meth:`.ColumnElement.label`:: + + stmt = select([ + type_coerce( + log_table.date_string, StringDateTime()).label('date') + ]) + + + A type that features bound-value handling will also have that behavior + take effect when literal values or :func:`.bindparam` constructs are + passed to :func:`.type_coerce` as targets. + For example, if a type implements the + :meth:`.TypeEngine.bind_expression` + method or :meth:`.TypeEngine.bind_processor` method or equivalent, + these functions will take effect at statement compilation/execution + time when a literal value is passed, as in:: + + # bound-value handling of MyStringType will be applied to the + # literal value "some string" + stmt = select([type_coerce("some string", MyStringType)]) + + :func:`.type_coerce` is similar to the :func:`.cast` function, + except that it does not render the ``CAST`` expression in the resulting + statement. + + :param expression: A SQL expression, such as a :class:`.ColumnElement` + expression or a Python string which will be coerced into a bound + literal value. + + :param type_: A :class:`.TypeEngine` class or instance indicating + the type to which the expression is coerced. + + .. seealso:: + + :func:`.cast` + + """ + self.type = type_api.to_instance(type_) + self.clause = _literal_as_binds(expression, type_=self.type) + + def _copy_internals(self, clone=_clone, **kw): + self.clause = clone(self.clause, **kw) + self.__dict__.pop('typed_expression', None) + + def get_children(self, **kwargs): + return self.clause, + + @property + def _from_objects(self): + return self.clause._from_objects + + @util.memoized_property + def typed_expression(self): + if isinstance(self.clause, BindParameter): + bp = self.clause._clone() + bp.type = self.type + return bp + else: + return self.clause + + +class Extract(ColumnElement): + """Represent a SQL EXTRACT clause, ``extract(field FROM expr)``.""" + + __visit_name__ = 'extract' + + def __init__(self, field, expr, **kwargs): + """Return a :class:`.Extract` construct. + + This is typically available as :func:`.extract` + as well as ``func.extract`` from the + :data:`.func` namespace. + + """ + self.type = type_api.INTEGERTYPE + self.field = field + self.expr = _literal_as_binds(expr, None) + + def _copy_internals(self, clone=_clone, **kw): + self.expr = clone(self.expr, **kw) + + def get_children(self, **kwargs): + return self.expr, + + @property + def _from_objects(self): + return self.expr._from_objects + + +class _label_reference(ColumnElement): + """Wrap a column expression as it appears in a 'reference' context. + + This expression is any that inclues an _order_by_label_element, + which is a Label, or a DESC / ASC construct wrapping a Label. + + The production of _label_reference() should occur when an expression + is added to this context; this includes the ORDER BY or GROUP BY of a + SELECT statement, as well as a few other places, such as the ORDER BY + within an OVER clause. + + """ + __visit_name__ = 'label_reference' + + def __init__(self, element): + self.element = element + + def _copy_internals(self, clone=_clone, **kw): + self.element = clone(self.element, **kw) + + @property + def _from_objects(self): + return () + + +class _textual_label_reference(ColumnElement): + __visit_name__ = 'textual_label_reference' + + def __init__(self, element): + self.element = element + + @util.memoized_property + def _text_clause(self): + return TextClause._create_text(self.element) + + +class UnaryExpression(ColumnElement): + """Define a 'unary' expression. + + A unary expression has a single column expression + and an operator. The operator can be placed on the left + (where it is called the 'operator') or right (where it is called the + 'modifier') of the column expression. + + :class:`.UnaryExpression` is the basis for several unary operators + including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`, + :func:`.nullsfirst` and :func:`.nullslast`. + + """ + __visit_name__ = 'unary' + + def __init__(self, element, operator=None, modifier=None, + type_=None, negate=None, wraps_column_expression=False): + self.operator = operator + self.modifier = modifier + self.element = element.self_group( + against=self.operator or self.modifier) + self.type = type_api.to_instance(type_) + self.negate = negate + self.wraps_column_expression = wraps_column_expression + + @classmethod + def _create_nullsfirst(cls, column): + """Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression. + + :func:`.nullsfirst` is intended to modify the expression produced + by :func:`.asc` or :func:`.desc`, and indicates how NULL values + should be handled when they are encountered during ordering:: + + + from sqlalchemy import desc, nullsfirst + + stmt = select([users_table]).\ + order_by(nullsfirst(desc(users_table.c.name))) + + The SQL expression from the above would resemble:: + + SELECT id, name FROM user ORDER BY name DESC NULLS FIRST + + Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically + invoked from the column expression itself using + :meth:`.ColumnElement.nullsfirst`, rather than as its standalone + function version, as in:: + + stmt = (select([users_table]). + order_by(users_table.c.name.desc().nullsfirst()) + ) + + .. seealso:: + + :func:`.asc` + + :func:`.desc` + + :func:`.nullslast` + + :meth:`.Select.order_by` + + """ + return UnaryExpression( + _literal_as_label_reference(column), + modifier=operators.nullsfirst_op, + wraps_column_expression=False) + + @classmethod + def _create_nullslast(cls, column): + """Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression. + + :func:`.nullslast` is intended to modify the expression produced + by :func:`.asc` or :func:`.desc`, and indicates how NULL values + should be handled when they are encountered during ordering:: + + + from sqlalchemy import desc, nullslast + + stmt = select([users_table]).\ + order_by(nullslast(desc(users_table.c.name))) + + The SQL expression from the above would resemble:: + + SELECT id, name FROM user ORDER BY name DESC NULLS LAST + + Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically + invoked from the column expression itself using + :meth:`.ColumnElement.nullslast`, rather than as its standalone + function version, as in:: + + stmt = select([users_table]).\ + order_by(users_table.c.name.desc().nullslast()) + + .. seealso:: + + :func:`.asc` + + :func:`.desc` + + :func:`.nullsfirst` + + :meth:`.Select.order_by` + + """ + return UnaryExpression( + _literal_as_label_reference(column), + modifier=operators.nullslast_op, + wraps_column_expression=False) + + @classmethod + def _create_desc(cls, column): + """Produce a descending ``ORDER BY`` clause element. + + e.g.:: + + from sqlalchemy import desc + + stmt = select([users_table]).order_by(desc(users_table.c.name)) + + will produce SQL as:: + + SELECT id, name FROM user ORDER BY name DESC + + The :func:`.desc` function is a standalone version of the + :meth:`.ColumnElement.desc` method available on all SQL expressions, + e.g.:: + + + stmt = select([users_table]).order_by(users_table.c.name.desc()) + + :param column: A :class:`.ColumnElement` (e.g. scalar SQL expression) + with which to apply the :func:`.desc` operation. + + .. seealso:: + + :func:`.asc` + + :func:`.nullsfirst` + + :func:`.nullslast` + + :meth:`.Select.order_by` + + """ + return UnaryExpression( + _literal_as_label_reference(column), + modifier=operators.desc_op, + wraps_column_expression=False) + + @classmethod + def _create_asc(cls, column): + """Produce an ascending ``ORDER BY`` clause element. + + e.g.:: + + from sqlalchemy import asc + stmt = select([users_table]).order_by(asc(users_table.c.name)) + + will produce SQL as:: + + SELECT id, name FROM user ORDER BY name ASC + + The :func:`.asc` function is a standalone version of the + :meth:`.ColumnElement.asc` method available on all SQL expressions, + e.g.:: + + + stmt = select([users_table]).order_by(users_table.c.name.asc()) + + :param column: A :class:`.ColumnElement` (e.g. scalar SQL expression) + with which to apply the :func:`.asc` operation. + + .. seealso:: + + :func:`.desc` + + :func:`.nullsfirst` + + :func:`.nullslast` + + :meth:`.Select.order_by` + + """ + return UnaryExpression( + _literal_as_label_reference(column), + modifier=operators.asc_op, + wraps_column_expression=False) + + @classmethod + def _create_distinct(cls, expr): + """Produce an column-expression-level unary ``DISTINCT`` clause. + + This applies the ``DISTINCT`` keyword to an individual column + expression, and is typically contained within an aggregate function, + as in:: + + from sqlalchemy import distinct, func + stmt = select([func.count(distinct(users_table.c.name))]) + + The above would produce an expression resembling:: + + SELECT COUNT(DISTINCT name) FROM user + + The :func:`.distinct` function is also available as a column-level + method, e.g. :meth:`.ColumnElement.distinct`, as in:: + + stmt = select([func.count(users_table.c.name.distinct())]) + + The :func:`.distinct` operator is different from the + :meth:`.Select.distinct` method of :class:`.Select`, + which produces a ``SELECT`` statement + with ``DISTINCT`` applied to the result set as a whole, + e.g. a ``SELECT DISTINCT`` expression. See that method for further + information. + + .. seealso:: + + :meth:`.ColumnElement.distinct` + + :meth:`.Select.distinct` + + :data:`.func` + + """ + expr = _literal_as_binds(expr) + return UnaryExpression( + expr, operator=operators.distinct_op, + type_=expr.type, wraps_column_expression=False) + + @property + def _order_by_label_element(self): + if self.modifier in (operators.desc_op, operators.asc_op): + return self.element._order_by_label_element + else: + return None + + @property + def _from_objects(self): + return self.element._from_objects + + def _copy_internals(self, clone=_clone, **kw): + self.element = clone(self.element, **kw) + + def get_children(self, **kwargs): + return self.element, + + def compare(self, other, **kw): + """Compare this :class:`UnaryExpression` against the given + :class:`.ClauseElement`.""" + + return ( + isinstance(other, UnaryExpression) and + self.operator == other.operator and + self.modifier == other.modifier and + self.element.compare(other.element, **kw) + ) + + def _negate(self): + if self.negate is not None: + return UnaryExpression( + self.element, + operator=self.negate, + negate=self.operator, + modifier=self.modifier, + type_=self.type, + wraps_column_expression=self.wraps_column_expression) + elif self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity: + return UnaryExpression( + self.self_group(against=operators.inv), + operator=operators.inv, + type_=type_api.BOOLEANTYPE, + wraps_column_expression=self.wraps_column_expression, + negate=None) + else: + return ClauseElement._negate(self) + + def self_group(self, against=None): + if self.operator and operators.is_precedent(self.operator, against): + return Grouping(self) + else: + return self + + +class CollectionAggregate(UnaryExpression): + """Forms the basis for right-hand collection operator modifiers + ANY and ALL. + + The ANY and ALL keywords are available in different ways on different + backends. On PostgreSQL, they only work for an ARRAY type. On + MySQL, they only work for subqueries. + + """ + @classmethod + def _create_any(cls, expr): + """Produce an ANY expression. + + This may apply to an array type for some dialects (e.g. postgresql), + or to a subquery for others (e.g. mysql). e.g.:: + + # postgresql '5 = ANY (somearray)' + expr = 5 == any_(mytable.c.somearray) + + # mysql '5 = ANY (SELECT value FROM table)' + expr = 5 == any_(select([table.c.value])) + + .. versionadded:: 1.1 + + .. seealso:: + + :func:`.expression.all_` + + """ + + expr = _literal_as_binds(expr) + + if expr.is_selectable and hasattr(expr, 'as_scalar'): + expr = expr.as_scalar() + expr = expr.self_group() + return CollectionAggregate( + expr, operator=operators.any_op, + type_=type_api.NULLTYPE, wraps_column_expression=False) + + @classmethod + def _create_all(cls, expr): + """Produce an ALL expression. + + This may apply to an array type for some dialects (e.g. postgresql), + or to a subquery for others (e.g. mysql). e.g.:: + + # postgresql '5 = ALL (somearray)' + expr = 5 == all_(mytable.c.somearray) + + # mysql '5 = ALL (SELECT value FROM table)' + expr = 5 == all_(select([table.c.value])) + + .. versionadded:: 1.1 + + .. seealso:: + + :func:`.expression.any_` + + """ + + expr = _literal_as_binds(expr) + if expr.is_selectable and hasattr(expr, 'as_scalar'): + expr = expr.as_scalar() + expr = expr.self_group() + return CollectionAggregate( + expr, operator=operators.all_op, + type_=type_api.NULLTYPE, wraps_column_expression=False) + + # operate and reverse_operate are hardwired to + # dispatch onto the type comparator directly, so that we can + # ensure "reversed" behavior. + def operate(self, op, *other, **kwargs): + if not operators.is_comparison(op): + raise exc.ArgumentError( + "Only comparison operators may be used with ANY/ALL") + kwargs['reverse'] = True + return self.comparator.operate(operators.mirror(op), *other, **kwargs) + + def reverse_operate(self, op, other, **kwargs): + # comparison operators should never call reverse_operate + assert not operators.is_comparison(op) + raise exc.ArgumentError( + "Only comparison operators may be used with ANY/ALL") + + +class AsBoolean(UnaryExpression): + + def __init__(self, element, operator, negate): + self.element = element + self.type = type_api.BOOLEANTYPE + self.operator = operator + self.negate = negate + self.modifier = None + self.wraps_column_expression = True + + def self_group(self, against=None): + return self + + def _negate(self): + # TODO: this assumes the element is the True_() or False_() + # object, but this assumption isn't enforced and + # ColumnElement._negate() can send any number of expressions here + return self.element._negate() + + +class BinaryExpression(ColumnElement): + """Represent an expression that is ``LEFT RIGHT``. + + A :class:`.BinaryExpression` is generated automatically + whenever two column expressions are used in a Python binary expression:: + + >>> from sqlalchemy.sql import column + >>> column('a') + column('b') + + >>> print column('a') + column('b') + a + b + + """ + + __visit_name__ = 'binary' + + def __init__(self, left, right, operator, type_=None, + negate=None, modifiers=None): + # allow compatibility with libraries that + # refer to BinaryExpression directly and pass strings + if isinstance(operator, util.string_types): + operator = operators.custom_op(operator) + self._orig = (left, right) + self.left = left.self_group(against=operator) + self.right = right.self_group(against=operator) + self.operator = operator + self.type = type_api.to_instance(type_) + self.negate = negate + + if modifiers is None: + self.modifiers = {} + else: + self.modifiers = modifiers + + def __bool__(self): + if self.operator in (operator.eq, operator.ne): + return self.operator(hash(self._orig[0]), hash(self._orig[1])) + else: + raise TypeError("Boolean value of this clause is not defined") + + __nonzero__ = __bool__ + + @property + def is_comparison(self): + return operators.is_comparison(self.operator) + + @property + def _from_objects(self): + return self.left._from_objects + self.right._from_objects + + def _copy_internals(self, clone=_clone, **kw): + self.left = clone(self.left, **kw) + self.right = clone(self.right, **kw) + + def get_children(self, **kwargs): + return self.left, self.right + + def compare(self, other, **kw): + """Compare this :class:`BinaryExpression` against the + given :class:`BinaryExpression`.""" + + return ( + isinstance(other, BinaryExpression) and + self.operator == other.operator and + ( + self.left.compare(other.left, **kw) and + self.right.compare(other.right, **kw) or + ( + operators.is_commutative(self.operator) and + self.left.compare(other.right, **kw) and + self.right.compare(other.left, **kw) + ) + ) + ) + + def self_group(self, against=None): + if operators.is_precedent(self.operator, against): + return Grouping(self) + else: + return self + + def _negate(self): + if self.negate is not None: + return BinaryExpression( + self.left, + self.right, + self.negate, + negate=self.operator, + type_=self.type, + modifiers=self.modifiers) + else: + return super(BinaryExpression, self)._negate() + + +class Slice(ColumnElement): + """Represent SQL for a Python array-slice object. + + This is not a specific SQL construct at this level, but + may be interpreted by specific dialects, e.g. PostgreSQL. + + """ + __visit_name__ = 'slice' + + def __init__(self, start, stop, step): + self.start = start + self.stop = stop + self.step = step + self.type = type_api.NULLTYPE + + def self_group(self, against=None): + assert against is operator.getitem + return self + + +class IndexExpression(BinaryExpression): + """Represent the class of expressions that are like an "index" operation. + """ + pass + + +class Grouping(ColumnElement): + """Represent a grouping within a column expression""" + + __visit_name__ = 'grouping' + + def __init__(self, element): + self.element = element + self.type = getattr(element, 'type', type_api.NULLTYPE) + + def self_group(self, against=None): + return self + + @property + def _key_label(self): + return self._label + + @property + def _label(self): + return getattr(self.element, '_label', None) or self.anon_label + + def _copy_internals(self, clone=_clone, **kw): + self.element = clone(self.element, **kw) + + def get_children(self, **kwargs): + return self.element, + + @property + def _from_objects(self): + return self.element._from_objects + + def __getattr__(self, attr): + return getattr(self.element, attr) + + def __getstate__(self): + return {'element': self.element, 'type': self.type} + + def __setstate__(self, state): + self.element = state['element'] + self.type = state['type'] + + def compare(self, other, **kw): + return isinstance(other, Grouping) and \ + self.element.compare(other.element) + + +RANGE_UNBOUNDED = util.symbol("RANGE_UNBOUNDED") +RANGE_CURRENT = util.symbol("RANGE_CURRENT") + + +class Over(ColumnElement): + """Represent an OVER clause. + + This is a special operator against a so-called + "window" function, as well as any aggregate function, + which produces results relative to the result set + itself. It's supported only by certain database + backends. + + """ + __visit_name__ = 'over' + + order_by = None + partition_by = None + + def __init__( + self, element, partition_by=None, + order_by=None, range_=None, rows=None): + """Produce an :class:`.Over` object against a function. + + Used against aggregate or so-called "window" functions, + for database backends that support window functions. + + :func:`~.expression.over` is usually called using + the :meth:`.FunctionElement.over` method, e.g.:: + + func.row_number().over(order_by=mytable.c.some_column) + + Would produce:: + + ROW_NUMBER() OVER(ORDER BY some_column) + + Ranges are also possible using the :paramref:`.expression.over.range_` + and :paramref:`.expression.over.rows` parameters. These + mutually-exclusive parameters each accept a 2-tuple, which contains + a combination of integers and None:: + + func.row_number().over(order_by=my_table.c.some_column, range_=(None, 0)) + + The above would produce:: + + ROW_NUMBER() OVER(ORDER BY some_column RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) + + A value of None indicates "unbounded", a + value of zero indicates "current row", and negative / positive + integers indicate "preceding" and "following": + + * RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING:: + + func.row_number().over(order_by='x', range_=(-5, 10)) + + * ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW:: + + func.row_number().over(order_by='x', rows=(None, 0)) + + * RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING:: + + func.row_number().over(order_by='x', range_=(-2, None)) + + .. versionadded:: 1.1 support for RANGE / ROWS within a window + + + :param element: a :class:`.FunctionElement`, :class:`.WithinGroup`, + or other compatible construct. + :param partition_by: a column element or string, or a list + of such, that will be used as the PARTITION BY clause + of the OVER construct. + :param order_by: a column element or string, or a list + of such, that will be used as the ORDER BY clause + of the OVER construct. + :param range_: optional range clause for the window. This is a + tuple value which can contain integer values or None, and will + render a RANGE BETWEEN PRECEDING / FOLLOWING clause + + .. versionadded:: 1.1 + + :param rows: optional rows clause for the window. This is a tuple + value which can contain integer values or None, and will render + a ROWS BETWEEN PRECEDING / FOLLOWING clause. + + .. versionadded:: 1.1 + + This function is also available from the :data:`~.expression.func` + construct itself via the :meth:`.FunctionElement.over` method. + + .. seealso:: + + :data:`.expression.func` + + :func:`.expression.within_group` + + """ + self.element = element + if order_by is not None: + self.order_by = ClauseList( + *util.to_list(order_by), + _literal_as_text=_literal_as_label_reference) + if partition_by is not None: + self.partition_by = ClauseList( + *util.to_list(partition_by), + _literal_as_text=_literal_as_label_reference) + + if range_: + self.range_ = self._interpret_range(range_) + if rows: + raise exc.ArgumentError( + "'range_' and 'rows' are mutually exclusive") + else: + self.rows = None + elif rows: + self.rows = self._interpret_range(rows) + self.range_ = None + else: + self.rows = self.range_ = None + + def _interpret_range(self, range_): + if not isinstance(range_, tuple) or len(range_) != 2: + raise exc.ArgumentError("2-tuple expected for range/rows") + + if range_[0] is None: + preceding = RANGE_UNBOUNDED + else: + try: + preceding = int(range_[0]) + except ValueError: + raise exc.ArgumentError( + "Integer or None expected for preceding value") + else: + if preceding > 0: + raise exc.ArgumentError( + "Preceding value must be a " + "negative integer, zero, or None") + elif preceding < 0: + preceding = literal(abs(preceding)) + else: + preceding = RANGE_CURRENT + + if range_[1] is None: + following = RANGE_UNBOUNDED + else: + try: + following = int(range_[1]) + except ValueError: + raise exc.ArgumentError( + "Integer or None expected for following value") + else: + if following < 0: + raise exc.ArgumentError( + "Following value must be a positive " + "integer, zero, or None") + elif following > 0: + following = literal(following) + else: + following = RANGE_CURRENT + + return preceding, following + + @property + def func(self): + """the element referred to by this :class:`.Over` + clause. + + .. deprecated:: 1.1 the ``func`` element has been renamed to + ``.element``. The two attributes are synonymous though + ``.func`` is read-only. + + """ + return self.element + + @util.memoized_property + def type(self): + return self.element.type + + def get_children(self, **kwargs): + return [c for c in + (self.element, self.partition_by, self.order_by) + if c is not None] + + def _copy_internals(self, clone=_clone, **kw): + self.element = clone(self.element, **kw) + if self.partition_by is not None: + self.partition_by = clone(self.partition_by, **kw) + if self.order_by is not None: + self.order_by = clone(self.order_by, **kw) + + @property + def _from_objects(self): + return list(itertools.chain( + *[c._from_objects for c in + (self.element, self.partition_by, self.order_by) + if c is not None] + )) + + +class WithinGroup(ColumnElement): + """Represent a WITHIN GROUP (ORDER BY) clause. + + This is a special operator against so-called + so-called "ordered set aggregate" and "hypothetical + set aggregate" functions, including ``percentile_cont()``, + ``rank()``, ``dense_rank()``, etc. + + It's supported only by certain database backends, such as PostgreSQL, + Oracle and MS SQL Server. + + The :class:`.WithinGroup` consturct extracts its type from the + method :meth:`.FunctionElement.within_group_type`. If this returns + ``None``, the function's ``.type`` is used. + + """ + __visit_name__ = 'withingroup' + + order_by = None + + def __init__(self, element, *order_by): + r"""Produce a :class:`.WithinGroup` object against a function. + + Used against so-called "ordered set aggregate" and "hypothetical + set aggregate" functions, including :class:`.percentile_cont`, + :class:`.rank`, :class:`.dense_rank`, etc. + + :func:`~.expression.within_group` is usually called using + the :meth:`.FunctionElement.within_group` method, e.g.:: + + from sqlalchemy import within_group + stmt = select([ + department.c.id, + func.percentile_cont(0.5).within_group( + department.c.salary.desc() + ) + ]) + + The above statement would produce SQL similar to + ``SELECT department.id, percentile_cont(0.5) + WITHIN GROUP (ORDER BY department.salary DESC)``. + + :param element: a :class:`.FunctionElement` construct, typically + generated by :data:`~.expression.func`. + :param \*order_by: one or more column elements that will be used + as the ORDER BY clause of the WITHIN GROUP construct. + + .. versionadded:: 1.1 + + .. seealso:: + + :data:`.expression.func` + + :func:`.expression.over` + + """ + self.element = element + if order_by is not None: + self.order_by = ClauseList( + *util.to_list(order_by), + _literal_as_text=_literal_as_label_reference) + + def over(self, partition_by=None, order_by=None): + """Produce an OVER clause against this :class:`.WithinGroup` + construct. + + This function has the same signature as that of + :meth:`.FunctionElement.over`. + + """ + return Over(self, partition_by=partition_by, order_by=order_by) + + @util.memoized_property + def type(self): + wgt = self.element.within_group_type(self) + if wgt is not None: + return wgt + else: + return self.element.type + + def get_children(self, **kwargs): + return [c for c in + (self.func, self.order_by) + if c is not None] + + def _copy_internals(self, clone=_clone, **kw): + self.element = clone(self.element, **kw) + if self.order_by is not None: + self.order_by = clone(self.order_by, **kw) + + @property + def _from_objects(self): + return list(itertools.chain( + *[c._from_objects for c in + (self.element, self.order_by) + if c is not None] + )) + + +class FunctionFilter(ColumnElement): + """Represent a function FILTER clause. + + This is a special operator against aggregate and window functions, + which controls which rows are passed to it. + It's supported only by certain database backends. + + Invocation of :class:`.FunctionFilter` is via + :meth:`.FunctionElement.filter`:: + + func.count(1).filter(True) + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.FunctionElement.filter` + + """ + __visit_name__ = 'funcfilter' + + criterion = None + + def __init__(self, func, *criterion): + """Produce a :class:`.FunctionFilter` object against a function. + + Used against aggregate and window functions, + for database backends that support the "FILTER" clause. + + E.g.:: + + from sqlalchemy import funcfilter + funcfilter(func.count(1), MyClass.name == 'some name') + + Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')". + + This function is also available from the :data:`~.expression.func` + construct itself via the :meth:`.FunctionElement.filter` method. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.FunctionElement.filter` + + + """ + self.func = func + self.filter(*criterion) + + def filter(self, *criterion): + """Produce an additional FILTER against the function. + + This method adds additional criteria to the initial criteria + set up by :meth:`.FunctionElement.filter`. + + Multiple criteria are joined together at SQL render time + via ``AND``. + + + """ + + for criterion in list(criterion): + criterion = _expression_literal_as_text(criterion) + + if self.criterion is not None: + self.criterion = self.criterion & criterion + else: + self.criterion = criterion + + return self + + def over(self, partition_by=None, order_by=None): + """Produce an OVER clause against this filtered function. + + Used against aggregate or so-called "window" functions, + for database backends that support window functions. + + The expression:: + + func.rank().filter(MyClass.y > 5).over(order_by='x') + + is shorthand for:: + + from sqlalchemy import over, funcfilter + over(funcfilter(func.rank(), MyClass.y > 5), order_by='x') + + See :func:`~.expression.over` for a full description. + + """ + return Over(self, partition_by=partition_by, order_by=order_by) + + @util.memoized_property + def type(self): + return self.func.type + + def get_children(self, **kwargs): + return [c for c in + (self.func, self.criterion) + if c is not None] + + def _copy_internals(self, clone=_clone, **kw): + self.func = clone(self.func, **kw) + if self.criterion is not None: + self.criterion = clone(self.criterion, **kw) + + @property + def _from_objects(self): + return list(itertools.chain( + *[c._from_objects for c in (self.func, self.criterion) + if c is not None] + )) + + +class Label(ColumnElement): + """Represents a column label (AS). + + Represent a label, as typically applied to any column-level + element using the ``AS`` sql keyword. + + """ + + __visit_name__ = 'label' + + def __init__(self, name, element, type_=None): + """Return a :class:`Label` object for the + given :class:`.ColumnElement`. + + A label changes the name of an element in the columns clause of a + ``SELECT`` statement, typically via the ``AS`` SQL keyword. + + This functionality is more conveniently available via the + :meth:`.ColumnElement.label` method on :class:`.ColumnElement`. + + :param name: label name + + :param obj: a :class:`.ColumnElement`. + + """ + + if isinstance(element, Label): + self._resolve_label = element._label + + while isinstance(element, Label): + element = element.element + + if name: + self.name = name + self._resolve_label = self.name + else: + self.name = _anonymous_label( + '%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon')) + ) + + self.key = self._label = self._key_label = self.name + self._element = element + self._type = type_ + self._proxies = [element] + + def __reduce__(self): + return self.__class__, (self.name, self._element, self._type) + + @util.memoized_property + def _allow_label_resolve(self): + return self.element._allow_label_resolve + + @property + def _order_by_label_element(self): + return self + + @util.memoized_property + def type(self): + return type_api.to_instance( + self._type or getattr(self._element, 'type', None) + ) + + @util.memoized_property + def element(self): + return self._element.self_group(against=operators.as_) + + def self_group(self, against=None): + sub_element = self._element.self_group(against=against) + if sub_element is not self._element: + return Label(self.name, + sub_element, + type_=self._type) + else: + return self + + @property + def primary_key(self): + return self.element.primary_key + + @property + def foreign_keys(self): + return self.element.foreign_keys + + def get_children(self, **kwargs): + return self.element, + + def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw): + self._element = clone(self._element, **kw) + self.__dict__.pop('element', None) + self.__dict__.pop('_allow_label_resolve', None) + if anonymize_labels: + self.name = self._resolve_label = _anonymous_label( + '%%(%d %s)s' % ( + id(self), getattr(self.element, 'name', 'anon')) + ) + self.key = self._label = self._key_label = self.name + + @property + def _from_objects(self): + return self.element._from_objects + + def _make_proxy(self, selectable, name=None, **kw): + e = self.element._make_proxy(selectable, + name=name if name else self.name) + e._proxies.append(self) + if self._type is not None: + e.type = self._type + return e + + +class ColumnClause(Immutable, ColumnElement): + """Represents a column expression from any textual string. + + The :class:`.ColumnClause`, a lightweight analogue to the + :class:`.Column` class, is typically invoked using the + :func:`.column` function, as in:: + + from sqlalchemy import column + + id, name = column("id"), column("name") + stmt = select([id, name]).select_from("user") + + The above statement would produce SQL like:: + + SELECT id, name FROM user + + :class:`.ColumnClause` is the immediate superclass of the schema-specific + :class:`.Column` object. While the :class:`.Column` class has all the + same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause` + class is usable by itself in those cases where behavioral requirements + are limited to simple SQL expression generation. The object has none of + the associations with schema-level metadata or with execution-time + behavior that :class:`.Column` does, so in that sense is a "lightweight" + version of :class:`.Column`. + + Full details on :class:`.ColumnClause` usage is at :func:`.column`. + + .. seealso:: + + :func:`.column` + + :class:`.Column` + + """ + __visit_name__ = 'column' + + onupdate = default = server_default = server_onupdate = None + + _memoized_property = util.group_expirable_memoized_property() + + def __init__(self, text, type_=None, is_literal=False, _selectable=None): + """Produce a :class:`.ColumnClause` object. + + The :class:`.ColumnClause` is a lightweight analogue to the + :class:`.Column` class. The :func:`.column` function can + be invoked with just a name alone, as in:: + + from sqlalchemy import column + + id, name = column("id"), column("name") + stmt = select([id, name]).select_from("user") + + The above statement would produce SQL like:: + + SELECT id, name FROM user + + Once constructed, :func:`.column` may be used like any other SQL + expression element such as within :func:`.select` constructs:: + + from sqlalchemy.sql import column + + id, name = column("id"), column("name") + stmt = select([id, name]).select_from("user") + + The text handled by :func:`.column` is assumed to be handled + like the name of a database column; if the string contains mixed case, + special characters, or matches a known reserved word on the target + backend, the column expression will render using the quoting + behavior determined by the backend. To produce a textual SQL + expression that is rendered exactly without any quoting, + use :func:`.literal_column` instead, or pass ``True`` as the + value of :paramref:`.column.is_literal`. Additionally, full SQL + statements are best handled using the :func:`.text` construct. + + :func:`.column` can be used in a table-like + fashion by combining it with the :func:`.table` function + (which is the lightweight analogue to :class:`.Table`) to produce + a working table construct with minimal boilerplate:: + + from sqlalchemy import table, column, select + + user = table("user", + column("id"), + column("name"), + column("description"), + ) + + stmt = select([user.c.description]).where(user.c.name == 'wendy') + + A :func:`.column` / :func:`.table` construct like that illustrated + above can be created in an + ad-hoc fashion and is not associated with any + :class:`.schema.MetaData`, DDL, or events, unlike its + :class:`.Table` counterpart. + + .. versionchanged:: 1.0.0 :func:`.expression.column` can now + be imported from the plain ``sqlalchemy`` namespace like any + other SQL element. + + :param text: the text of the element. + + :param type: :class:`.types.TypeEngine` object which can associate + this :class:`.ColumnClause` with a type. + + :param is_literal: if True, the :class:`.ColumnClause` is assumed to + be an exact expression that will be delivered to the output with no + quoting rules applied regardless of case sensitive settings. the + :func:`.literal_column()` function essentially invokes + :func:`.column` while passing ``is_literal=True``. + + .. seealso:: + + :class:`.Column` + + :func:`.literal_column` + + :func:`.table` + + :func:`.text` + + :ref:`sqlexpression_literal_column` + + """ + + self.key = self.name = text + self.table = _selectable + self.type = type_api.to_instance(type_) + self.is_literal = is_literal + + def _compare_name_for_result(self, other): + if self.is_literal or \ + self.table is None or self.table._textual or \ + not hasattr(other, 'proxy_set') or ( + isinstance(other, ColumnClause) and + (other.is_literal or + other.table is None or + other.table._textual) + ): + return (hasattr(other, 'name') and self.name == other.name) or \ + (hasattr(other, '_label') and self._label == other._label) + else: + return other.proxy_set.intersection(self.proxy_set) + + def _get_table(self): + return self.__dict__['table'] + + def _set_table(self, table): + self._memoized_property.expire_instance(self) + self.__dict__['table'] = table + table = property(_get_table, _set_table) + + @_memoized_property + def _from_objects(self): + t = self.table + if t is not None: + return [t] + else: + return [] + + @util.memoized_property + def description(self): + if util.py3k: + return self.name + else: + return self.name.encode('ascii', 'backslashreplace') + + @_memoized_property + def _key_label(self): + if self.key != self.name: + return self._gen_label(self.key) + else: + return self._label + + @_memoized_property + def _label(self): + return self._gen_label(self.name) + + @_memoized_property + def _render_label_in_columns_clause(self): + return self.table is not None + + def _gen_label(self, name): + t = self.table + + if self.is_literal: + return None + + elif t is not None and t.named_with_column: + if getattr(t, 'schema', None): + label = t.schema.replace('.', '_') + "_" + \ + t.name + "_" + name + else: + label = t.name + "_" + name + + # propagate name quoting rules for labels. + if getattr(name, "quote", None) is not None: + if isinstance(label, quoted_name): + label.quote = name.quote + else: + label = quoted_name(label, name.quote) + elif getattr(t.name, "quote", None) is not None: + # can't get this situation to occur, so let's + # assert false on it for now + assert not isinstance(label, quoted_name) + label = quoted_name(label, t.name.quote) + + # ensure the label name doesn't conflict with that + # of an existing column + if label in t.c: + _label = label + counter = 1 + while _label in t.c: + _label = label + "_" + str(counter) + counter += 1 + label = _label + + return _as_truncated(label) + + else: + return name + + def _bind_param(self, operator, obj, type_=None): + return BindParameter(self.key, obj, + _compared_to_operator=operator, + _compared_to_type=self.type, + type_=type_, + unique=True) + + def _make_proxy(self, selectable, name=None, attach=True, + name_is_truncatable=False, **kw): + # propagate the "is_literal" flag only if we are keeping our name, + # otherwise its considered to be a label + is_literal = self.is_literal and (name is None or name == self.name) + c = self._constructor( + _as_truncated(name or self.name) if + name_is_truncatable else + (name or self.name), + type_=self.type, + _selectable=selectable, + is_literal=is_literal + ) + if name is None: + c.key = self.key + c._proxies = [self] + if selectable._is_clone_of is not None: + c._is_clone_of = \ + selectable._is_clone_of.columns.get(c.key) + + if attach: + selectable._columns[c.key] = c + return c + + +class _IdentifiedClause(Executable, ClauseElement): + + __visit_name__ = 'identified' + _execution_options = \ + Executable._execution_options.union({'autocommit': False}) + + def __init__(self, ident): + self.ident = ident + + +class SavepointClause(_IdentifiedClause): + __visit_name__ = 'savepoint' + + +class RollbackToSavepointClause(_IdentifiedClause): + __visit_name__ = 'rollback_to_savepoint' + + +class ReleaseSavepointClause(_IdentifiedClause): + __visit_name__ = 'release_savepoint' + + +class quoted_name(util.MemoizedSlots, util.text_type): + """Represent a SQL identifier combined with quoting preferences. + + :class:`.quoted_name` is a Python unicode/str subclass which + represents a particular identifier name along with a + ``quote`` flag. This ``quote`` flag, when set to + ``True`` or ``False``, overrides automatic quoting behavior + for this identifier in order to either unconditionally quote + or to not quote the name. If left at its default of ``None``, + quoting behavior is applied to the identifier on a per-backend basis + based on an examination of the token itself. + + A :class:`.quoted_name` object with ``quote=True`` is also + prevented from being modified in the case of a so-called + "name normalize" option. Certain database backends, such as + Oracle, Firebird, and DB2 "normalize" case-insensitive names + as uppercase. The SQLAlchemy dialects for these backends + convert from SQLAlchemy's lower-case-means-insensitive convention + to the upper-case-means-insensitive conventions of those backends. + The ``quote=True`` flag here will prevent this conversion from occurring + to support an identifier that's quoted as all lower case against + such a backend. + + The :class:`.quoted_name` object is normally created automatically + when specifying the name for key schema constructs such as + :class:`.Table`, :class:`.Column`, and others. The class can also be + passed explicitly as the name to any function that receives a name which + can be quoted. Such as to use the :meth:`.Engine.has_table` method with + an unconditionally quoted name:: + + from sqlaclchemy import create_engine + from sqlalchemy.sql.elements import quoted_name + + engine = create_engine("oracle+cx_oracle://some_dsn") + engine.has_table(quoted_name("some_table", True)) + + The above logic will run the "has table" logic against the Oracle backend, + passing the name exactly as ``"some_table"`` without converting to + upper case. + + .. versionadded:: 0.9.0 + + """ + + __slots__ = 'quote', 'lower', 'upper' + + def __new__(cls, value, quote): + if value is None: + return None + # experimental - don't bother with quoted_name + # if quote flag is None. doesn't seem to make any dent + # in performance however + # elif not sprcls and quote is None: + # return value + elif isinstance(value, cls) and ( + quote is None or value.quote == quote + ): + return value + self = super(quoted_name, cls).__new__(cls, value) + self.quote = quote + return self + + def __reduce__(self): + return quoted_name, (util.text_type(self), self.quote) + + def _memoized_method_lower(self): + if self.quote: + return self + else: + return util.text_type(self).lower() + + def _memoized_method_upper(self): + if self.quote: + return self + else: + return util.text_type(self).upper() + + def __repr__(self): + backslashed = self.encode('ascii', 'backslashreplace') + if not util.py2k: + backslashed = backslashed.decode('ascii') + return "'%s'" % backslashed + + +class _truncated_label(quoted_name): + """A unicode subclass used to identify symbolic " + "names that may require truncation.""" + + __slots__ = () + + def __new__(cls, value, quote=None): + quote = getattr(value, "quote", quote) + # return super(_truncated_label, cls).__new__(cls, value, quote, True) + return super(_truncated_label, cls).__new__(cls, value, quote) + + def __reduce__(self): + return self.__class__, (util.text_type(self), self.quote) + + def apply_map(self, map_): + return self + + +class conv(_truncated_label): + """Mark a string indicating that a name has already been converted + by a naming convention. + + This is a string subclass that indicates a name that should not be + subject to any further naming conventions. + + E.g. when we create a :class:`.Constraint` using a naming convention + as follows:: + + m = MetaData(naming_convention={ + "ck": "ck_%(table_name)s_%(constraint_name)s" + }) + t = Table('t', m, Column('x', Integer), + CheckConstraint('x > 5', name='x5')) + + The name of the above constraint will be rendered as ``"ck_t_x5"``. + That is, the existing name ``x5`` is used in the naming convention as the + ``constraint_name`` token. + + In some situations, such as in migration scripts, we may be rendering + the above :class:`.CheckConstraint` with a name that's already been + converted. In order to make sure the name isn't double-modified, the + new name is applied using the :func:`.schema.conv` marker. We can + use this explicitly as follows:: + + + m = MetaData(naming_convention={ + "ck": "ck_%(table_name)s_%(constraint_name)s" + }) + t = Table('t', m, Column('x', Integer), + CheckConstraint('x > 5', name=conv('ck_t_x5'))) + + Where above, the :func:`.schema.conv` marker indicates that the constraint + name here is final, and the name will render as ``"ck_t_x5"`` and not + ``"ck_t_ck_t_x5"`` + + .. versionadded:: 0.9.4 + + .. seealso:: + + :ref:`constraint_naming_conventions` + + """ + __slots__ = () + + +class _defer_name(_truncated_label): + """mark a name as 'deferred' for the purposes of automated name + generation. + + """ + __slots__ = () + + def __new__(cls, value): + if value is None: + return _NONE_NAME + elif isinstance(value, conv): + return value + else: + return super(_defer_name, cls).__new__(cls, value) + + def __reduce__(self): + return self.__class__, (util.text_type(self), ) + + +class _defer_none_name(_defer_name): + """indicate a 'deferred' name that was ultimately the value None.""" + __slots__ = () + +_NONE_NAME = _defer_none_name("_unnamed_") + +# for backwards compatibility in case +# someone is re-implementing the +# _truncated_identifier() sequence in a custom +# compiler +_generated_label = _truncated_label + + +class _anonymous_label(_truncated_label): + """A unicode subclass used to identify anonymously + generated names.""" + + __slots__ = () + + def __add__(self, other): + return _anonymous_label( + quoted_name( + util.text_type.__add__(self, util.text_type(other)), + self.quote) + ) + + def __radd__(self, other): + return _anonymous_label( + quoted_name( + util.text_type.__add__(util.text_type(other), self), + self.quote) + ) + + def apply_map(self, map_): + if self.quote is not None: + # preserve quoting only if necessary + return quoted_name(self % map_, self.quote) + else: + # else skip the constructor call + return self % map_ + + +def _as_truncated(value): + """coerce the given value to :class:`._truncated_label`. + + Existing :class:`._truncated_label` and + :class:`._anonymous_label` objects are passed + unchanged. + """ + + if isinstance(value, _truncated_label): + return value + else: + return _truncated_label(value) + + +def _string_or_unprintable(element): + if isinstance(element, util.string_types): + return element + else: + try: + return str(element) + except Exception: + return "unprintable element %r" % element + + +def _expand_cloned(elements): + """expand the given set of ClauseElements to be the set of all 'cloned' + predecessors. + + """ + return itertools.chain(*[x._cloned_set for x in elements]) + + +def _select_iterables(elements): + """expand tables into individual columns in the + given list of column expressions. + + """ + return itertools.chain(*[c._select_iterable for c in elements]) + + +def _cloned_intersection(a, b): + """return the intersection of sets a and b, counting + any overlap between 'cloned' predecessors. + + The returned set is in terms of the entities present within 'a'. + + """ + all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) + return set(elem for elem in a + if all_overlap.intersection(elem._cloned_set)) + + +def _cloned_difference(a, b): + all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) + return set(elem for elem in a + if not all_overlap.intersection(elem._cloned_set)) + + +@util.dependencies("sqlalchemy.sql.functions") +def _labeled(functions, element): + if not hasattr(element, 'name') or \ + isinstance(element, functions.FunctionElement): + return element.label(None) + else: + return element + + +def _is_column(col): + """True if ``col`` is an instance of :class:`.ColumnElement`.""" + + return isinstance(col, ColumnElement) + + +def _find_columns(clause): + """locate Column objects within the given expression.""" + + cols = util.column_set() + traverse(clause, {}, {'column': cols.add}) + return cols + + +# there is some inconsistency here between the usage of +# inspect() vs. checking for Visitable and __clause_element__. +# Ideally all functions here would derive from inspect(), +# however the inspect() versions add significant callcount +# overhead for critical functions like _interpret_as_column_or_from(). +# Generally, the column-based functions are more performance critical +# and are fine just checking for __clause_element__(). It is only +# _interpret_as_from() where we'd like to be able to receive ORM entities +# that have no defined namespace, hence inspect() is needed there. + + +def _column_as_key(element): + if isinstance(element, util.string_types): + return element + if hasattr(element, '__clause_element__'): + element = element.__clause_element__() + try: + return element.key + except AttributeError: + return None + + +def _clause_element_as_expr(element): + if hasattr(element, '__clause_element__'): + return element.__clause_element__() + else: + return element + + +def _literal_as_label_reference(element): + if isinstance(element, util.string_types): + return _textual_label_reference(element) + + elif hasattr(element, '__clause_element__'): + element = element.__clause_element__() + + return _literal_as_text(element) + + +def _literal_and_labels_as_label_reference(element): + if isinstance(element, util.string_types): + return _textual_label_reference(element) + + elif hasattr(element, '__clause_element__'): + element = element.__clause_element__() + + if isinstance(element, ColumnElement) and \ + element._order_by_label_element is not None: + return _label_reference(element) + else: + return _literal_as_text(element) + + +def _expression_literal_as_text(element): + return _literal_as_text(element, warn=True) + + +def _literal_as_text(element, warn=False): + if isinstance(element, Visitable): + return element + elif hasattr(element, '__clause_element__'): + return element.__clause_element__() + elif isinstance(element, util.string_types): + if warn: + util.warn_limited( + "Textual SQL expression %(expr)r should be " + "explicitly declared as text(%(expr)r)", + {"expr": util.ellipses_string(element)}) + + return TextClause(util.text_type(element)) + elif isinstance(element, (util.NoneType, bool)): + return _const_expr(element) + else: + raise exc.ArgumentError( + "SQL expression object or string expected, got object of type %r " + "instead" % type(element) + ) + + +def _no_literals(element): + if hasattr(element, '__clause_element__'): + return element.__clause_element__() + elif not isinstance(element, Visitable): + raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' " + "function to indicate a SQL expression " + "literal, or 'literal()' to indicate a " + "bound value." % element) + else: + return element + + +def _is_literal(element): + return not isinstance(element, Visitable) and \ + not hasattr(element, '__clause_element__') + + +def _only_column_elements_or_none(element, name): + if element is None: + return None + else: + return _only_column_elements(element, name) + + +def _only_column_elements(element, name): + if hasattr(element, '__clause_element__'): + element = element.__clause_element__() + if not isinstance(element, ColumnElement): + raise exc.ArgumentError( + "Column-based expression object expected for argument " + "'%s'; got: '%s', type %s" % (name, element, type(element))) + return element + + +def _literal_as_binds(element, name=None, type_=None): + if hasattr(element, '__clause_element__'): + return element.__clause_element__() + elif not isinstance(element, Visitable): + if element is None: + return Null() + else: + return BindParameter(name, element, type_=type_, unique=True) + else: + return element + +_guess_straight_column = re.compile(r'^\w\S*$', re.I) + + +def _interpret_as_column_or_from(element): + if isinstance(element, Visitable): + return element + elif hasattr(element, '__clause_element__'): + return element.__clause_element__() + + insp = inspection.inspect(element, raiseerr=False) + if insp is None: + if isinstance(element, (util.NoneType, bool)): + return _const_expr(element) + elif hasattr(insp, "selectable"): + return insp.selectable + + # be forgiving as this is an extremely common + # and known expression + if element == "*": + guess_is_literal = True + elif isinstance(element, (numbers.Number)): + return ColumnClause(str(element), is_literal=True) + else: + element = str(element) + # give into temptation, as this fact we are guessing about + # is not one we've previously ever needed our users tell us; + # but let them know we are not happy about it + guess_is_literal = not _guess_straight_column.match(element) + util.warn_limited( + "Textual column expression %(column)r should be " + "explicitly declared with text(%(column)r), " + "or use %(literal_column)s(%(column)r) " + "for more specificity", + { + "column": util.ellipses_string(element), + "literal_column": "literal_column" + if guess_is_literal else "column" + }) + return ColumnClause( + element, + is_literal=guess_is_literal) + + +def _const_expr(element): + if isinstance(element, (Null, False_, True_)): + return element + elif element is None: + return Null() + elif element is False: + return False_() + elif element is True: + return True_() + else: + raise exc.ArgumentError( + "Expected None, False, or True" + ) + + +def _type_from_args(args): + for a in args: + if not a.type._isnull: + return a.type + else: + return type_api.NULLTYPE + + +def _corresponding_column_or_error(fromclause, column, + require_embedded=False): + c = fromclause.corresponding_column(column, + require_embedded=require_embedded) + if c is None: + raise exc.InvalidRequestError( + "Given column '%s', attached to table '%s', " + "failed to locate a corresponding column from table '%s'" + % + (column, + getattr(column, 'table', None), + fromclause.description) + ) + return c + + +class AnnotatedColumnElement(Annotated): + def __init__(self, element, values): + Annotated.__init__(self, element, values) + ColumnElement.comparator._reset(self) + for attr in ('name', 'key', 'table'): + if self.__dict__.get(attr, False) is None: + self.__dict__.pop(attr) + + def _with_annotations(self, values): + clone = super(AnnotatedColumnElement, self)._with_annotations(values) + ColumnElement.comparator._reset(clone) + return clone + + @util.memoized_property + def name(self): + """pull 'name' from parent, if not present""" + return self._Annotated__element.name + + @util.memoized_property + def table(self): + """pull 'table' from parent, if not present""" + return self._Annotated__element.table + + @util.memoized_property + def key(self): + """pull 'key' from parent, if not present""" + return self._Annotated__element.key + + @util.memoized_property + def info(self): + return self._Annotated__element.info + + @util.memoized_property + def anon_label(self): + return self._Annotated__element.anon_label diff --git a/app/lib/sqlalchemy/sql/expression.py b/app/lib/sqlalchemy/sql/expression.py new file mode 100644 index 0000000..172bf4b --- /dev/null +++ b/app/lib/sqlalchemy/sql/expression.py @@ -0,0 +1,144 @@ +# sql/expression.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Defines the public namespace for SQL expression constructs. + +Prior to version 0.9, this module contained all of "elements", "dml", +"default_comparator" and "selectable". The module was broken up +and most "factory" functions were moved to be grouped with their associated +class. + +""" + +__all__ = [ + 'Alias', 'any_', 'all_', 'ClauseElement', 'ColumnCollection', 'ColumnElement', + 'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Lateral', + 'Select', + 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between', + 'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct', + 'except_', 'except_all', 'exists', 'extract', 'func', 'modifier', + 'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label', + 'lateral', 'literal', 'literal_column', 'not_', 'null', 'nullsfirst', + 'nullslast', + 'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery', + 'table', 'text', + 'tuple_', 'type_coerce', 'union', 'union_all', 'update', 'within_group', + 'TableSample', 'tablesample'] + + +from .visitors import Visitable +from .functions import func, modifier, FunctionElement, Function +from ..util.langhelpers import public_factory +from .elements import ClauseElement, ColumnElement,\ + BindParameter, CollectionAggregate, UnaryExpression, BooleanClauseList, \ + Label, Cast, Case, ColumnClause, TextClause, Over, Null, \ + True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \ + Grouping, WithinGroup, not_, \ + collate, literal_column, between,\ + literal, outparam, TypeCoerce, ClauseList, FunctionFilter + +from .elements import SavepointClause, RollbackToSavepointClause, \ + ReleaseSavepointClause + +from .base import ColumnCollection, Generative, Executable, \ + PARSE_AUTOCOMMIT + +from .selectable import Alias, Join, Select, Selectable, TableClause, \ + CompoundSelect, CTE, FromClause, FromGrouping, Lateral, SelectBase, \ + alias, GenerativeSelect, subquery, HasCTE, HasPrefixes, HasSuffixes, \ + lateral, Exists, ScalarSelect, TextAsFrom, TableSample, tablesample + + +from .dml import Insert, Update, Delete, UpdateBase, ValuesBase + +# factory functions - these pull class-bound constructors and classmethods +# from SQL elements and selectables into public functions. This allows +# the functions to be available in the sqlalchemy.sql.* namespace and +# to be auto-cross-documenting from the function to the class itself. + +all_ = public_factory(CollectionAggregate._create_all, ".expression.all_") +any_ = public_factory(CollectionAggregate._create_any, ".expression.any_") +and_ = public_factory(BooleanClauseList.and_, ".expression.and_") +or_ = public_factory(BooleanClauseList.or_, ".expression.or_") +bindparam = public_factory(BindParameter, ".expression.bindparam") +select = public_factory(Select, ".expression.select") +text = public_factory(TextClause._create_text, ".expression.text") +table = public_factory(TableClause, ".expression.table") +column = public_factory(ColumnClause, ".expression.column") +over = public_factory(Over, ".expression.over") +within_group = public_factory(WithinGroup, ".expression.within_group") +label = public_factory(Label, ".expression.label") +case = public_factory(Case, ".expression.case") +cast = public_factory(Cast, ".expression.cast") +extract = public_factory(Extract, ".expression.extract") +tuple_ = public_factory(Tuple, ".expression.tuple_") +except_ = public_factory(CompoundSelect._create_except, ".expression.except_") +except_all = public_factory( + CompoundSelect._create_except_all, ".expression.except_all") +intersect = public_factory( + CompoundSelect._create_intersect, ".expression.intersect") +intersect_all = public_factory( + CompoundSelect._create_intersect_all, ".expression.intersect_all") +union = public_factory(CompoundSelect._create_union, ".expression.union") +union_all = public_factory( + CompoundSelect._create_union_all, ".expression.union_all") +exists = public_factory(Exists, ".expression.exists") +nullsfirst = public_factory( + UnaryExpression._create_nullsfirst, ".expression.nullsfirst") +nullslast = public_factory( + UnaryExpression._create_nullslast, ".expression.nullslast") +asc = public_factory(UnaryExpression._create_asc, ".expression.asc") +desc = public_factory(UnaryExpression._create_desc, ".expression.desc") +distinct = public_factory( + UnaryExpression._create_distinct, ".expression.distinct") +type_coerce = public_factory(TypeCoerce, ".expression.type_coerce") +true = public_factory(True_._instance, ".expression.true") +false = public_factory(False_._instance, ".expression.false") +null = public_factory(Null._instance, ".expression.null") +join = public_factory(Join._create_join, ".expression.join") +outerjoin = public_factory(Join._create_outerjoin, ".expression.outerjoin") +insert = public_factory(Insert, ".expression.insert") +update = public_factory(Update, ".expression.update") +delete = public_factory(Delete, ".expression.delete") +funcfilter = public_factory( + FunctionFilter, ".expression.funcfilter") + + +# internal functions still being called from tests and the ORM, +# these might be better off in some other namespace +from .base import _from_objects +from .elements import _literal_as_text, _clause_element_as_expr,\ + _is_column, _labeled, _only_column_elements, _string_or_unprintable, \ + _truncated_label, _clone, _cloned_difference, _cloned_intersection,\ + _column_as_key, _literal_as_binds, _select_iterables, \ + _corresponding_column_or_error, _literal_as_label_reference, \ + _expression_literal_as_text +from .selectable import _interpret_as_from + + +# old names for compatibility +_Executable = Executable +_BindParamClause = BindParameter +_Label = Label +_SelectBase = SelectBase +_BinaryExpression = BinaryExpression +_Cast = Cast +_Null = Null +_False = False_ +_True = True_ +_TextClause = TextClause +_UnaryExpression = UnaryExpression +_Case = Case +_Tuple = Tuple +_Over = Over +_Generative = Generative +_TypeClause = TypeClause +_Extract = Extract +_Exists = Exists +_Grouping = Grouping +_FromGrouping = FromGrouping +_ScalarSelect = ScalarSelect diff --git a/app/lib/sqlalchemy/sql/functions.py b/app/lib/sqlalchemy/sql/functions.py new file mode 100644 index 0000000..08f1d32 --- /dev/null +++ b/app/lib/sqlalchemy/sql/functions.py @@ -0,0 +1,813 @@ +# sql/functions.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""SQL function API, factories, and built-in functions. + +""" +from . import sqltypes, schema +from .base import Executable, ColumnCollection +from .elements import ClauseList, Cast, Extract, _literal_as_binds, \ + literal_column, _type_from_args, ColumnElement, _clone,\ + Over, BindParameter, FunctionFilter, Grouping, WithinGroup +from .selectable import FromClause, Select, Alias +from . import util as sqlutil +from . import operators +from .visitors import VisitableType +from .. import util +from . import annotation + +_registry = util.defaultdict(dict) + + +def register_function(identifier, fn, package="_default"): + """Associate a callable with a particular func. name. + + This is normally called by _GenericMeta, but is also + available by itself so that a non-Function construct + can be associated with the :data:`.func` accessor (i.e. + CAST, EXTRACT). + + """ + reg = _registry[package] + reg[identifier] = fn + + +class FunctionElement(Executable, ColumnElement, FromClause): + """Base for SQL function-oriented constructs. + + .. seealso:: + + :class:`.Function` - named SQL function. + + :data:`.func` - namespace which produces registered or ad-hoc + :class:`.Function` instances. + + :class:`.GenericFunction` - allows creation of registered function + types. + + """ + + packagenames = () + + def __init__(self, *clauses, **kwargs): + """Construct a :class:`.FunctionElement`. + """ + args = [_literal_as_binds(c, self.name) for c in clauses] + self.clause_expr = ClauseList( + operator=operators.comma_op, + group_contents=True, *args).\ + self_group() + + def _execute_on_connection(self, connection, multiparams, params): + return connection._execute_function(self, multiparams, params) + + @property + def columns(self): + """The set of columns exported by this :class:`.FunctionElement`. + + Function objects currently have no result column names built in; + this method returns a single-element column collection with + an anonymously named column. + + An interim approach to providing named columns for a function + as a FROM clause is to build a :func:`.select` with the + desired columns:: + + from sqlalchemy.sql import column + + stmt = select([column('x'), column('y')]).\ + select_from(func.myfunction()) + + + """ + return ColumnCollection(self.label(None)) + + @util.memoized_property + def clauses(self): + """Return the underlying :class:`.ClauseList` which contains + the arguments for this :class:`.FunctionElement`. + + """ + return self.clause_expr.element + + def over(self, partition_by=None, order_by=None, rows=None, range_=None): + """Produce an OVER clause against this function. + + Used against aggregate or so-called "window" functions, + for database backends that support window functions. + + The expression:: + + func.row_number().over(order_by='x') + + is shorthand for:: + + from sqlalchemy import over + over(func.row_number(), order_by='x') + + See :func:`~.expression.over` for a full description. + + .. versionadded:: 0.7 + + """ + return Over( + self, + partition_by=partition_by, + order_by=order_by, + rows=rows, + range_=range_ + ) + + def within_group(self, *order_by): + """Produce a WITHIN GROUP (ORDER BY expr) clause against this function. + + Used against so-called "ordered set aggregate" and "hypothetical + set aggregate" functions, including :class:`.percentile_cont`, + :class:`.rank`, :class:`.dense_rank`, etc. + + See :func:`~.expression.within_group` for a full description. + + .. versionadded:: 1.1 + + + """ + return WithinGroup(self, *order_by) + + def filter(self, *criterion): + """Produce a FILTER clause against this function. + + Used against aggregate and window functions, + for database backends that support the "FILTER" clause. + + The expression:: + + func.count(1).filter(True) + + is shorthand for:: + + from sqlalchemy import funcfilter + funcfilter(func.count(1), True) + + .. versionadded:: 1.0.0 + + .. seealso:: + + :class:`.FunctionFilter` + + :func:`.funcfilter` + + + """ + if not criterion: + return self + return FunctionFilter(self, *criterion) + + @property + def _from_objects(self): + return self.clauses._from_objects + + def get_children(self, **kwargs): + return self.clause_expr, + + def _copy_internals(self, clone=_clone, **kw): + self.clause_expr = clone(self.clause_expr, **kw) + self._reset_exported() + FunctionElement.clauses._reset(self) + + def within_group_type(self, within_group): + """For types that define their return type as based on the criteria + within a WITHIN GROUP (ORDER BY) expression, called by the + :class:`.WithinGroup` construct. + + Returns None by default, in which case the function's normal ``.type`` + is used. + + """ + + return None + + def alias(self, name=None, flat=False): + r"""Produce a :class:`.Alias` construct against this + :class:`.FunctionElement`. + + This construct wraps the function in a named alias which + is suitable for the FROM clause, in the style accepted for example + by PostgreSQL. + + e.g.:: + + from sqlalchemy.sql import column + + stmt = select([column('data_view')]).\ + select_from(SomeTable).\ + select_from(func.unnest(SomeTable.data).alias('data_view') + ) + + Would produce: + + .. sourcecode:: sql + + SELECT data_view + FROM sometable, unnest(sometable.data) AS data_view + + .. versionadded:: 0.9.8 The :meth:`.FunctionElement.alias` method + is now supported. Previously, this method's behavior was + undefined and did not behave consistently across versions. + + """ + + return Alias(self, name) + + def select(self): + """Produce a :func:`~.expression.select` construct + against this :class:`.FunctionElement`. + + This is shorthand for:: + + s = select([function_element]) + + """ + s = Select([self]) + if self._execution_options: + s = s.execution_options(**self._execution_options) + return s + + def scalar(self): + """Execute this :class:`.FunctionElement` against an embedded + 'bind' and return a scalar value. + + This first calls :meth:`~.FunctionElement.select` to + produce a SELECT construct. + + Note that :class:`.FunctionElement` can be passed to + the :meth:`.Connectable.scalar` method of :class:`.Connection` + or :class:`.Engine`. + + """ + return self.select().execute().scalar() + + def execute(self): + """Execute this :class:`.FunctionElement` against an embedded + 'bind'. + + This first calls :meth:`~.FunctionElement.select` to + produce a SELECT construct. + + Note that :class:`.FunctionElement` can be passed to + the :meth:`.Connectable.execute` method of :class:`.Connection` + or :class:`.Engine`. + + """ + return self.select().execute() + + def _bind_param(self, operator, obj, type_=None): + return BindParameter(None, obj, _compared_to_operator=operator, + _compared_to_type=self.type, unique=True, + type_=type_) + + def self_group(self, against=None): + # for the moment, we are parenthesizing all array-returning + # expressions against getitem. This may need to be made + # more portable if in the future we support other DBs + # besides postgresql. + if against is operators.getitem and \ + isinstance(self.type, sqltypes.ARRAY): + return Grouping(self) + else: + return super(FunctionElement, self).self_group(against=against) + + +class _FunctionGenerator(object): + """Generate :class:`.Function` objects based on getattr calls.""" + + def __init__(self, **opts): + self.__names = [] + self.opts = opts + + def __getattr__(self, name): + # passthru __ attributes; fixes pydoc + if name.startswith('__'): + try: + return self.__dict__[name] + except KeyError: + raise AttributeError(name) + + elif name.endswith('_'): + name = name[0:-1] + f = _FunctionGenerator(**self.opts) + f.__names = list(self.__names) + [name] + return f + + def __call__(self, *c, **kwargs): + o = self.opts.copy() + o.update(kwargs) + + tokens = len(self.__names) + + if tokens == 2: + package, fname = self.__names + elif tokens == 1: + package, fname = "_default", self.__names[0] + else: + package = None + + if package is not None: + func = _registry[package].get(fname) + if func is not None: + return func(*c, **o) + + return Function(self.__names[-1], + packagenames=self.__names[0:-1], *c, **o) + + +func = _FunctionGenerator() +"""Generate SQL function expressions. + + :data:`.func` is a special object instance which generates SQL + functions based on name-based attributes, e.g.:: + + >>> print(func.count(1)) + count(:param_1) + + The element is a column-oriented SQL element like any other, and is + used in that way:: + + >>> print(select([func.count(table.c.id)])) + SELECT count(sometable.id) FROM sometable + + Any name can be given to :data:`.func`. If the function name is unknown to + SQLAlchemy, it will be rendered exactly as is. For common SQL functions + which SQLAlchemy is aware of, the name may be interpreted as a *generic + function* which will be compiled appropriately to the target database:: + + >>> print(func.current_timestamp()) + CURRENT_TIMESTAMP + + To call functions which are present in dot-separated packages, + specify them in the same manner:: + + >>> print(func.stats.yield_curve(5, 10)) + stats.yield_curve(:yield_curve_1, :yield_curve_2) + + SQLAlchemy can be made aware of the return type of functions to enable + type-specific lexical and result-based behavior. For example, to ensure + that a string-based function returns a Unicode value and is similarly + treated as a string in expressions, specify + :class:`~sqlalchemy.types.Unicode` as the type: + + >>> print(func.my_string(u'hi', type_=Unicode) + ' ' + + ... func.my_string(u'there', type_=Unicode)) + my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3) + + The object returned by a :data:`.func` call is usually an instance of + :class:`.Function`. + This object meets the "column" interface, including comparison and labeling + functions. The object can also be passed the :meth:`~.Connectable.execute` + method of a :class:`.Connection` or :class:`.Engine`, where it will be + wrapped inside of a SELECT statement first:: + + print(connection.execute(func.current_timestamp()).scalar()) + + In a few exception cases, the :data:`.func` accessor + will redirect a name to a built-in expression such as :func:`.cast` + or :func:`.extract`, as these names have well-known meaning + but are not exactly the same as "functions" from a SQLAlchemy + perspective. + + .. versionadded:: 0.8 :data:`.func` can return non-function expression + constructs for common quasi-functional names like :func:`.cast` + and :func:`.extract`. + + Functions which are interpreted as "generic" functions know how to + calculate their return type automatically. For a listing of known generic + functions, see :ref:`generic_functions`. + + .. note:: + + The :data:`.func` construct has only limited support for calling + standalone "stored procedures", especially those with special + parameterization concerns. + + See the section :ref:`stored_procedures` for details on how to use + the DBAPI-level ``callproc()`` method for fully traditional stored + procedures. + +""" + +modifier = _FunctionGenerator(group=False) + + +class Function(FunctionElement): + """Describe a named SQL function. + + See the superclass :class:`.FunctionElement` for a description + of public methods. + + .. seealso:: + + :data:`.func` - namespace which produces registered or ad-hoc + :class:`.Function` instances. + + :class:`.GenericFunction` - allows creation of registered function + types. + + """ + + __visit_name__ = 'function' + + def __init__(self, name, *clauses, **kw): + """Construct a :class:`.Function`. + + The :data:`.func` construct is normally used to construct + new :class:`.Function` instances. + + """ + self.packagenames = kw.pop('packagenames', None) or [] + self.name = name + self._bind = kw.get('bind', None) + self.type = sqltypes.to_instance(kw.get('type_', None)) + + FunctionElement.__init__(self, *clauses, **kw) + + def _bind_param(self, operator, obj, type_=None): + return BindParameter(self.name, obj, + _compared_to_operator=operator, + _compared_to_type=self.type, + type_=type_, + unique=True) + + +class _GenericMeta(VisitableType): + def __init__(cls, clsname, bases, clsdict): + if annotation.Annotated not in cls.__mro__: + cls.name = name = clsdict.get('name', clsname) + cls.identifier = identifier = clsdict.get('identifier', name) + package = clsdict.pop('package', '_default') + # legacy + if '__return_type__' in clsdict: + cls.type = clsdict['__return_type__'] + register_function(identifier, cls, package) + super(_GenericMeta, cls).__init__(clsname, bases, clsdict) + + +class GenericFunction(util.with_metaclass(_GenericMeta, Function)): + """Define a 'generic' function. + + A generic function is a pre-established :class:`.Function` + class that is instantiated automatically when called + by name from the :data:`.func` attribute. Note that + calling any name from :data:`.func` has the effect that + a new :class:`.Function` instance is created automatically, + given that name. The primary use case for defining + a :class:`.GenericFunction` class is so that a function + of a particular name may be given a fixed return type. + It can also include custom argument parsing schemes as well + as additional methods. + + Subclasses of :class:`.GenericFunction` are automatically + registered under the name of the class. For + example, a user-defined function ``as_utc()`` would + be available immediately:: + + from sqlalchemy.sql.functions import GenericFunction + from sqlalchemy.types import DateTime + + class as_utc(GenericFunction): + type = DateTime + + print select([func.as_utc()]) + + User-defined generic functions can be organized into + packages by specifying the "package" attribute when defining + :class:`.GenericFunction`. Third party libraries + containing many functions may want to use this in order + to avoid name conflicts with other systems. For example, + if our ``as_utc()`` function were part of a package + "time":: + + class as_utc(GenericFunction): + type = DateTime + package = "time" + + The above function would be available from :data:`.func` + using the package name ``time``:: + + print select([func.time.as_utc()]) + + A final option is to allow the function to be accessed + from one name in :data:`.func` but to render as a different name. + The ``identifier`` attribute will override the name used to + access the function as loaded from :data:`.func`, but will retain + the usage of ``name`` as the rendered name:: + + class GeoBuffer(GenericFunction): + type = Geometry + package = "geo" + name = "ST_Buffer" + identifier = "buffer" + + The above function will render as follows:: + + >>> print func.geo.buffer() + ST_Buffer() + + .. versionadded:: 0.8 :class:`.GenericFunction` now supports + automatic registration of new functions as well as package + and custom naming support. + + .. versionchanged:: 0.8 The attribute name ``type`` is used + to specify the function's return type at the class level. + Previously, the name ``__return_type__`` was used. This + name is still recognized for backwards-compatibility. + + """ + + coerce_arguments = True + + def __init__(self, *args, **kwargs): + parsed_args = kwargs.pop('_parsed_args', None) + if parsed_args is None: + parsed_args = [_literal_as_binds(c, self.name) for c in args] + self.packagenames = [] + self._bind = kwargs.get('bind', None) + self.clause_expr = ClauseList( + operator=operators.comma_op, + group_contents=True, *parsed_args).self_group() + self.type = sqltypes.to_instance( + kwargs.pop("type_", None) or getattr(self, 'type', None)) + +register_function("cast", Cast) +register_function("extract", Extract) + + +class next_value(GenericFunction): + """Represent the 'next value', given a :class:`.Sequence` + as its single argument. + + Compiles into the appropriate function on each backend, + or will raise NotImplementedError if used on a backend + that does not provide support for sequences. + + """ + type = sqltypes.Integer() + name = "next_value" + + def __init__(self, seq, **kw): + assert isinstance(seq, schema.Sequence), \ + "next_value() accepts a Sequence object as input." + self._bind = kw.get('bind', None) + self.sequence = seq + + @property + def _from_objects(self): + return [] + + +class AnsiFunction(GenericFunction): + def __init__(self, **kwargs): + GenericFunction.__init__(self, **kwargs) + + +class ReturnTypeFromArgs(GenericFunction): + """Define a function whose return type is the same as its arguments.""" + + def __init__(self, *args, **kwargs): + args = [_literal_as_binds(c, self.name) for c in args] + kwargs.setdefault('type_', _type_from_args(args)) + kwargs['_parsed_args'] = args + super(ReturnTypeFromArgs, self).__init__(*args, **kwargs) + + +class coalesce(ReturnTypeFromArgs): + pass + + +class max(ReturnTypeFromArgs): + pass + + +class min(ReturnTypeFromArgs): + pass + + +class sum(ReturnTypeFromArgs): + pass + + +class now(GenericFunction): + type = sqltypes.DateTime + + +class concat(GenericFunction): + type = sqltypes.String + + +class char_length(GenericFunction): + type = sqltypes.Integer + + def __init__(self, arg, **kwargs): + GenericFunction.__init__(self, arg, **kwargs) + + +class random(GenericFunction): + pass + + +class count(GenericFunction): + r"""The ANSI COUNT aggregate function. With no arguments, + emits COUNT \*. + + """ + type = sqltypes.Integer + + def __init__(self, expression=None, **kwargs): + if expression is None: + expression = literal_column('*') + super(count, self).__init__(expression, **kwargs) + + +class current_date(AnsiFunction): + type = sqltypes.Date + + +class current_time(AnsiFunction): + type = sqltypes.Time + + +class current_timestamp(AnsiFunction): + type = sqltypes.DateTime + + +class current_user(AnsiFunction): + type = sqltypes.String + + +class localtime(AnsiFunction): + type = sqltypes.DateTime + + +class localtimestamp(AnsiFunction): + type = sqltypes.DateTime + + +class session_user(AnsiFunction): + type = sqltypes.String + + +class sysdate(AnsiFunction): + type = sqltypes.DateTime + + +class user(AnsiFunction): + type = sqltypes.String + + +class array_agg(GenericFunction): + """support for the ARRAY_AGG function. + + The ``func.array_agg(expr)`` construct returns an expression of + type :class:`.types.ARRAY`. + + e.g.:: + + stmt = select([func.array_agg(table.c.values)[2:5]]) + + .. versionadded:: 1.1 + + .. seealso:: + + :func:`.postgresql.array_agg` - PostgreSQL-specific version that + returns :class:`.postgresql.ARRAY`, which has PG-specific operators added. + + """ + + type = sqltypes.ARRAY + + def __init__(self, *args, **kwargs): + args = [_literal_as_binds(c) for c in args] + kwargs.setdefault('type_', self.type(_type_from_args(args))) + kwargs['_parsed_args'] = args + super(array_agg, self).__init__(*args, **kwargs) + + +class OrderedSetAgg(GenericFunction): + """Define a function where the return type is based on the sort + expression type as defined by the expression passed to the + :meth:`.FunctionElement.within_group` method.""" + + array_for_multi_clause = False + + def within_group_type(self, within_group): + func_clauses = self.clause_expr.element + order_by = sqlutil.unwrap_order_by(within_group.order_by) + if self.array_for_multi_clause and len(func_clauses.clauses) > 1: + return sqltypes.ARRAY(order_by[0].type) + else: + return order_by[0].type + + +class mode(OrderedSetAgg): + """implement the ``mode`` ordered-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is the same as the sort expression. + + .. versionadded:: 1.1 + + """ + + +class percentile_cont(OrderedSetAgg): + """implement the ``percentile_cont`` ordered-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is the same as the sort expression, + or if the arguments are an array, an :class:`.types.ARRAY` of the sort + expression's type. + + .. versionadded:: 1.1 + + """ + + array_for_multi_clause = True + + +class percentile_disc(OrderedSetAgg): + """implement the ``percentile_disc`` ordered-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is the same as the sort expression, + or if the arguments are an array, an :class:`.types.ARRAY` of the sort + expression's type. + + .. versionadded:: 1.1 + + """ + + array_for_multi_clause = True + + +class rank(GenericFunction): + """Implement the ``rank`` hypothetical-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is :class:`.Integer`. + + .. versionadded:: 1.1 + + """ + type = sqltypes.Integer() + + +class dense_rank(GenericFunction): + """Implement the ``dense_rank`` hypothetical-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is :class:`.Integer`. + + .. versionadded:: 1.1 + + """ + type = sqltypes.Integer() + + +class percent_rank(GenericFunction): + """Implement the ``percent_rank`` hypothetical-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is :class:`.Numeric`. + + .. versionadded:: 1.1 + + """ + type = sqltypes.Numeric() + + +class cume_dist(GenericFunction): + """Implement the ``cume_dist`` hypothetical-set aggregate function. + + This function must be used with the :meth:`.FunctionElement.within_group` + modifier to supply a sort expression to operate upon. + + The return type of this function is :class:`.Numeric`. + + .. versionadded:: 1.1 + + """ + type = sqltypes.Numeric() diff --git a/app/lib/sqlalchemy/sql/naming.py b/app/lib/sqlalchemy/sql/naming.py new file mode 100644 index 0000000..d93c916 --- /dev/null +++ b/app/lib/sqlalchemy/sql/naming.py @@ -0,0 +1,146 @@ +# sqlalchemy/naming.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Establish constraint and index naming conventions. + + +""" + +from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \ + UniqueConstraint, CheckConstraint, Index, Table, Column +from .. import event, events +from .. import exc +from .elements import _truncated_label, _defer_name, _defer_none_name, conv +import re + + +class ConventionDict(object): + + def __init__(self, const, table, convention): + self.const = const + self._is_fk = isinstance(const, ForeignKeyConstraint) + self.table = table + self.convention = convention + self._const_name = const.name + + def _key_table_name(self): + return self.table.name + + def _column_X(self, idx): + if self._is_fk: + fk = self.const.elements[idx] + return fk.parent + else: + return list(self.const.columns)[idx] + + def _key_constraint_name(self): + if isinstance(self._const_name, (type(None), _defer_none_name)): + raise exc.InvalidRequestError( + "Naming convention including " + "%(constraint_name)s token requires that " + "constraint is explicitly named." + ) + if not isinstance(self._const_name, conv): + self.const.name = None + return self._const_name + + def _key_column_X_name(self, idx): + return self._column_X(idx).name + + def _key_column_X_label(self, idx): + return self._column_X(idx)._label + + def _key_referred_table_name(self): + fk = self.const.elements[0] + refs = fk.target_fullname.split(".") + if len(refs) == 3: + refschema, reftable, refcol = refs + else: + reftable, refcol = refs + return reftable + + def _key_referred_column_X_name(self, idx): + fk = self.const.elements[idx] + refs = fk.target_fullname.split(".") + if len(refs) == 3: + refschema, reftable, refcol = refs + else: + reftable, refcol = refs + return refcol + + def __getitem__(self, key): + if key in self.convention: + return self.convention[key](self.const, self.table) + elif hasattr(self, '_key_%s' % key): + return getattr(self, '_key_%s' % key)() + else: + col_template = re.match(r".*_?column_(\d+)_.+", key) + if col_template: + idx = col_template.group(1) + attr = "_key_" + key.replace(idx, "X") + idx = int(idx) + if hasattr(self, attr): + return getattr(self, attr)(idx) + raise KeyError(key) + +_prefix_dict = { + Index: "ix", + PrimaryKeyConstraint: "pk", + CheckConstraint: "ck", + UniqueConstraint: "uq", + ForeignKeyConstraint: "fk" +} + + +def _get_convention(dict_, key): + + for super_ in key.__mro__: + if super_ in _prefix_dict and _prefix_dict[super_] in dict_: + return dict_[_prefix_dict[super_]] + elif super_ in dict_: + return dict_[super_] + else: + return None + + +def _constraint_name_for_table(const, table): + metadata = table.metadata + convention = _get_convention(metadata.naming_convention, type(const)) + + if isinstance(const.name, conv): + return const.name + elif convention is not None and \ + not isinstance(const.name, conv) and \ + ( + const.name is None or + "constraint_name" in convention or + isinstance(const.name, _defer_name)): + return conv( + convention % ConventionDict(const, table, + metadata.naming_convention) + ) + elif isinstance(convention, _defer_none_name): + return None + + +@event.listens_for(Constraint, "after_parent_attach") +@event.listens_for(Index, "after_parent_attach") +def _constraint_name(const, table): + if isinstance(table, Column): + # for column-attached constraint, set another event + # to link the column attached to the table as this constraint + # associated with the table. + event.listen(table, "after_parent_attach", + lambda col, table: _constraint_name(const, table) + ) + elif isinstance(table, Table): + if isinstance(const.name, (conv, _defer_name)): + return + + newname = _constraint_name_for_table(const, table) + if newname is not None: + const.name = newname diff --git a/app/lib/sqlalchemy/sql/operators.py b/app/lib/sqlalchemy/sql/operators.py new file mode 100644 index 0000000..d883392 --- /dev/null +++ b/app/lib/sqlalchemy/sql/operators.py @@ -0,0 +1,1014 @@ +# sql/operators.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Defines operators used in SQL expressions.""" + +from .. import util + +from operator import ( + and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg, + getitem, lshift, rshift, contains +) + +if util.py2k: + from operator import div +else: + div = truediv + + +class Operators(object): + """Base of comparison and logical operators. + + Implements base methods + :meth:`~sqlalchemy.sql.operators.Operators.operate` and + :meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as + :meth:`~sqlalchemy.sql.operators.Operators.__and__`, + :meth:`~sqlalchemy.sql.operators.Operators.__or__`, + :meth:`~sqlalchemy.sql.operators.Operators.__invert__`. + + Usually is used via its most common subclass + :class:`.ColumnOperators`. + + """ + __slots__ = () + + def __and__(self, other): + """Implement the ``&`` operator. + + When used with SQL expressions, results in an + AND operation, equivalent to + :func:`~.expression.and_`, that is:: + + a & b + + is equivalent to:: + + from sqlalchemy import and_ + and_(a, b) + + Care should be taken when using ``&`` regarding + operator precedence; the ``&`` operator has the highest precedence. + The operands should be enclosed in parenthesis if they contain + further sub expressions:: + + (a == 2) & (b == 4) + + """ + return self.operate(and_, other) + + def __or__(self, other): + """Implement the ``|`` operator. + + When used with SQL expressions, results in an + OR operation, equivalent to + :func:`~.expression.or_`, that is:: + + a | b + + is equivalent to:: + + from sqlalchemy import or_ + or_(a, b) + + Care should be taken when using ``|`` regarding + operator precedence; the ``|`` operator has the highest precedence. + The operands should be enclosed in parenthesis if they contain + further sub expressions:: + + (a == 2) | (b == 4) + + """ + return self.operate(or_, other) + + def __invert__(self): + """Implement the ``~`` operator. + + When used with SQL expressions, results in a + NOT operation, equivalent to + :func:`~.expression.not_`, that is:: + + ~a + + is equivalent to:: + + from sqlalchemy import not_ + not_(a) + + """ + return self.operate(inv) + + def op(self, opstring, precedence=0, is_comparison=False): + """produce a generic operator function. + + e.g.:: + + somecolumn.op("*")(5) + + produces:: + + somecolumn * 5 + + This function can also be used to make bitwise operators explicit. For + example:: + + somecolumn.op('&')(0xff) + + is a bitwise AND of the value in ``somecolumn``. + + :param operator: a string which will be output as the infix operator + between this element and the expression passed to the + generated function. + + :param precedence: precedence to apply to the operator, when + parenthesizing expressions. A lower number will cause the expression + to be parenthesized when applied against another operator with + higher precedence. The default value of ``0`` is lower than all + operators except for the comma (``,``) and ``AS`` operators. + A value of 100 will be higher or equal to all operators, and -100 + will be lower than or equal to all operators. + + .. versionadded:: 0.8 - added the 'precedence' argument. + + :param is_comparison: if True, the operator will be considered as a + "comparison" operator, that is which evaluates to a boolean + true/false value, like ``==``, ``>``, etc. This flag should be set + so that ORM relationships can establish that the operator is a + comparison operator when used in a custom join condition. + + .. versionadded:: 0.9.2 - added the + :paramref:`.Operators.op.is_comparison` flag. + + .. seealso:: + + :ref:`types_operators` + + :ref:`relationship_custom_operator` + + """ + operator = custom_op(opstring, precedence, is_comparison) + + def against(other): + return operator(self, other) + return against + + def operate(self, op, *other, **kwargs): + r"""Operate on an argument. + + This is the lowest level of operation, raises + :class:`NotImplementedError` by default. + + Overriding this on a subclass can allow common + behavior to be applied to all operations. + For example, overriding :class:`.ColumnOperators` + to apply ``func.lower()`` to the left and right + side:: + + class MyComparator(ColumnOperators): + def operate(self, op, other): + return op(func.lower(self), func.lower(other)) + + :param op: Operator callable. + :param \*other: the 'other' side of the operation. Will + be a single scalar for most operations. + :param \**kwargs: modifiers. These may be passed by special + operators such as :meth:`ColumnOperators.contains`. + + + """ + raise NotImplementedError(str(op)) + + def reverse_operate(self, op, other, **kwargs): + """Reverse operate on an argument. + + Usage is the same as :meth:`operate`. + + """ + raise NotImplementedError(str(op)) + + +class custom_op(object): + """Represent a 'custom' operator. + + :class:`.custom_op` is normally instantitated when the + :meth:`.ColumnOperators.op` method is used to create a + custom operator callable. The class can also be used directly + when programmatically constructing expressions. E.g. + to represent the "factorial" operation:: + + from sqlalchemy.sql import UnaryExpression + from sqlalchemy.sql import operators + from sqlalchemy import Numeric + + unary = UnaryExpression(table.c.somecolumn, + modifier=operators.custom_op("!"), + type_=Numeric) + + """ + __name__ = 'custom_op' + + def __init__( + self, opstring, precedence=0, is_comparison=False, + natural_self_precedent=False, eager_grouping=False): + self.opstring = opstring + self.precedence = precedence + self.is_comparison = is_comparison + self.natural_self_precedent = natural_self_precedent + self.eager_grouping = eager_grouping + + def __eq__(self, other): + return isinstance(other, custom_op) and \ + other.opstring == self.opstring + + def __hash__(self): + return id(self) + + def __call__(self, left, right, **kw): + return left.operate(self, right, **kw) + + +class ColumnOperators(Operators): + """Defines boolean, comparison, and other operators for + :class:`.ColumnElement` expressions. + + By default, all methods call down to + :meth:`.operate` or :meth:`.reverse_operate`, + passing in the appropriate operator function from the + Python builtin ``operator`` module or + a SQLAlchemy-specific operator function from + :mod:`sqlalchemy.expression.operators`. For example + the ``__eq__`` function:: + + def __eq__(self, other): + return self.operate(operators.eq, other) + + Where ``operators.eq`` is essentially:: + + def eq(a, b): + return a == b + + The core column expression unit :class:`.ColumnElement` + overrides :meth:`.Operators.operate` and others + to return further :class:`.ColumnElement` constructs, + so that the ``==`` operation above is replaced by a clause + construct. + + See also: + + :ref:`types_operators` + + :attr:`.TypeEngine.comparator_factory` + + :class:`.ColumnOperators` + + :class:`.PropComparator` + + """ + + __slots__ = () + + timetuple = None + """Hack, allows datetime objects to be compared on the LHS.""" + + def __lt__(self, other): + """Implement the ``<`` operator. + + In a column context, produces the clause ``a < b``. + + """ + return self.operate(lt, other) + + def __le__(self, other): + """Implement the ``<=`` operator. + + In a column context, produces the clause ``a <= b``. + + """ + return self.operate(le, other) + + __hash__ = Operators.__hash__ + + def __eq__(self, other): + """Implement the ``==`` operator. + + In a column context, produces the clause ``a = b``. + If the target is ``None``, produces ``a IS NULL``. + + """ + return self.operate(eq, other) + + def __ne__(self, other): + """Implement the ``!=`` operator. + + In a column context, produces the clause ``a != b``. + If the target is ``None``, produces ``a IS NOT NULL``. + + """ + return self.operate(ne, other) + + def is_distinct_from(self, other): + """Implement the ``IS DISTINCT FROM`` operator. + + Renders "a IS DISTINCT FROM b" on most platforms; + on some such as SQLite may render "a IS NOT b". + + .. versionadded:: 1.1 + + """ + return self.operate(is_distinct_from, other) + + def isnot_distinct_from(self, other): + """Implement the ``IS NOT DISTINCT FROM`` operator. + + Renders "a IS NOT DISTINCT FROM b" on most platforms; + on some such as SQLite may render "a IS b". + + .. versionadded:: 1.1 + + """ + return self.operate(isnot_distinct_from, other) + + def __gt__(self, other): + """Implement the ``>`` operator. + + In a column context, produces the clause ``a > b``. + + """ + return self.operate(gt, other) + + def __ge__(self, other): + """Implement the ``>=`` operator. + + In a column context, produces the clause ``a >= b``. + + """ + return self.operate(ge, other) + + def __neg__(self): + """Implement the ``-`` operator. + + In a column context, produces the clause ``-a``. + + """ + return self.operate(neg) + + def __contains__(self, other): + return self.operate(contains, other) + + def __getitem__(self, index): + """Implement the [] operator. + + This can be used by some database-specific types + such as PostgreSQL ARRAY and HSTORE. + + """ + return self.operate(getitem, index) + + def __lshift__(self, other): + """implement the << operator. + + Not used by SQLAlchemy core, this is provided + for custom operator systems which want to use + << as an extension point. + """ + return self.operate(lshift, other) + + def __rshift__(self, other): + """implement the >> operator. + + Not used by SQLAlchemy core, this is provided + for custom operator systems which want to use + >> as an extension point. + """ + return self.operate(rshift, other) + + def concat(self, other): + """Implement the 'concat' operator. + + In a column context, produces the clause ``a || b``, + or uses the ``concat()`` operator on MySQL. + + """ + return self.operate(concat_op, other) + + def like(self, other, escape=None): + r"""Implement the ``like`` operator. + + In a column context, produces the expression:: + + a LIKE other + + E.g.:: + + stmt = select([sometable]).\ + where(sometable.c.column.like("%foobar%")) + + :param other: expression to be compared + :param escape: optional escape character, renders the ``ESCAPE`` + keyword, e.g.:: + + somecolumn.like("foo/%bar", escape="/") + + .. seealso:: + + :meth:`.ColumnOperators.ilike` + + """ + return self.operate(like_op, other, escape=escape) + + def ilike(self, other, escape=None): + r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE. + + In a column context, produces an expression either of the form:: + + lower(a) LIKE lower(other) + + Or on backends that support the ILIKE operator:: + + a ILIKE other + + E.g.:: + + stmt = select([sometable]).\ + where(sometable.c.column.ilike("%foobar%")) + + :param other: expression to be compared + :param escape: optional escape character, renders the ``ESCAPE`` + keyword, e.g.:: + + somecolumn.ilike("foo/%bar", escape="/") + + .. seealso:: + + :meth:`.ColumnOperators.like` + + """ + return self.operate(ilike_op, other, escape=escape) + + def in_(self, other): + """Implement the ``in`` operator. + + In a column context, produces the clause ``a IN other``. + "other" may be a tuple/list of column expressions, + or a :func:`~.expression.select` construct. + + """ + return self.operate(in_op, other) + + def notin_(self, other): + """implement the ``NOT IN`` operator. + + This is equivalent to using negation with + :meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``. + + .. versionadded:: 0.8 + + .. seealso:: + + :meth:`.ColumnOperators.in_` + + """ + return self.operate(notin_op, other) + + def notlike(self, other, escape=None): + """implement the ``NOT LIKE`` operator. + + This is equivalent to using negation with + :meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``. + + .. versionadded:: 0.8 + + .. seealso:: + + :meth:`.ColumnOperators.like` + + """ + return self.operate(notlike_op, other, escape=escape) + + def notilike(self, other, escape=None): + """implement the ``NOT ILIKE`` operator. + + This is equivalent to using negation with + :meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``. + + .. versionadded:: 0.8 + + .. seealso:: + + :meth:`.ColumnOperators.ilike` + + """ + return self.operate(notilike_op, other, escape=escape) + + def is_(self, other): + """Implement the ``IS`` operator. + + Normally, ``IS`` is generated automatically when comparing to a + value of ``None``, which resolves to ``NULL``. However, explicit + usage of ``IS`` may be desirable if comparing to boolean values + on certain platforms. + + .. versionadded:: 0.7.9 + + .. seealso:: :meth:`.ColumnOperators.isnot` + + """ + return self.operate(is_, other) + + def isnot(self, other): + """Implement the ``IS NOT`` operator. + + Normally, ``IS NOT`` is generated automatically when comparing to a + value of ``None``, which resolves to ``NULL``. However, explicit + usage of ``IS NOT`` may be desirable if comparing to boolean values + on certain platforms. + + .. versionadded:: 0.7.9 + + .. seealso:: :meth:`.ColumnOperators.is_` + + """ + return self.operate(isnot, other) + + def startswith(self, other, **kwargs): + """Implement the ``startwith`` operator. + + In a column context, produces the clause ``LIKE '%'`` + + """ + return self.operate(startswith_op, other, **kwargs) + + def endswith(self, other, **kwargs): + """Implement the 'endswith' operator. + + In a column context, produces the clause ``LIKE '%'`` + + """ + return self.operate(endswith_op, other, **kwargs) + + def contains(self, other, **kwargs): + """Implement the 'contains' operator. + + In a column context, produces the clause ``LIKE '%%'`` + + """ + return self.operate(contains_op, other, **kwargs) + + def match(self, other, **kwargs): + """Implements a database-specific 'match' operator. + + :meth:`~.ColumnOperators.match` attempts to resolve to + a MATCH-like function or operator provided by the backend. + Examples include: + + * PostgreSQL - renders ``x @@ to_tsquery(y)`` + * MySQL - renders ``MATCH (x) AGAINST (y IN BOOLEAN MODE)`` + * Oracle - renders ``CONTAINS(x, y)`` + * other backends may provide special implementations. + * Backends without any special implementation will emit + the operator as "MATCH". This is compatible with SQlite, for + example. + + """ + return self.operate(match_op, other, **kwargs) + + def desc(self): + """Produce a :func:`~.expression.desc` clause against the + parent object.""" + return self.operate(desc_op) + + def asc(self): + """Produce a :func:`~.expression.asc` clause against the + parent object.""" + return self.operate(asc_op) + + def nullsfirst(self): + """Produce a :func:`~.expression.nullsfirst` clause against the + parent object.""" + return self.operate(nullsfirst_op) + + def nullslast(self): + """Produce a :func:`~.expression.nullslast` clause against the + parent object.""" + return self.operate(nullslast_op) + + def collate(self, collation): + """Produce a :func:`~.expression.collate` clause against + the parent object, given the collation string.""" + return self.operate(collate, collation) + + def __radd__(self, other): + """Implement the ``+`` operator in reverse. + + See :meth:`.ColumnOperators.__add__`. + + """ + return self.reverse_operate(add, other) + + def __rsub__(self, other): + """Implement the ``-`` operator in reverse. + + See :meth:`.ColumnOperators.__sub__`. + + """ + return self.reverse_operate(sub, other) + + def __rmul__(self, other): + """Implement the ``*`` operator in reverse. + + See :meth:`.ColumnOperators.__mul__`. + + """ + return self.reverse_operate(mul, other) + + def __rdiv__(self, other): + """Implement the ``/`` operator in reverse. + + See :meth:`.ColumnOperators.__div__`. + + """ + return self.reverse_operate(div, other) + + def __rmod__(self, other): + """Implement the ``%`` operator in reverse. + + See :meth:`.ColumnOperators.__mod__`. + + """ + return self.reverse_operate(mod, other) + + def between(self, cleft, cright, symmetric=False): + """Produce a :func:`~.expression.between` clause against + the parent object, given the lower and upper range. + + """ + return self.operate(between_op, cleft, cright, symmetric=symmetric) + + def distinct(self): + """Produce a :func:`~.expression.distinct` clause against the + parent object. + + """ + return self.operate(distinct_op) + + def any_(self): + """Produce a :func:`~.expression.any_` clause against the + parent object. + + .. versionadded:: 1.1 + + """ + return self.operate(any_op) + + def all_(self): + """Produce a :func:`~.expression.all_` clause against the + parent object. + + .. versionadded:: 1.1 + + """ + return self.operate(all_op) + + def __add__(self, other): + """Implement the ``+`` operator. + + In a column context, produces the clause ``a + b`` + if the parent object has non-string affinity. + If the parent object has a string affinity, + produces the concatenation operator, ``a || b`` - + see :meth:`.ColumnOperators.concat`. + + """ + return self.operate(add, other) + + def __sub__(self, other): + """Implement the ``-`` operator. + + In a column context, produces the clause ``a - b``. + + """ + return self.operate(sub, other) + + def __mul__(self, other): + """Implement the ``*`` operator. + + In a column context, produces the clause ``a * b``. + + """ + return self.operate(mul, other) + + def __div__(self, other): + """Implement the ``/`` operator. + + In a column context, produces the clause ``a / b``. + + """ + return self.operate(div, other) + + def __mod__(self, other): + """Implement the ``%`` operator. + + In a column context, produces the clause ``a % b``. + + """ + return self.operate(mod, other) + + def __truediv__(self, other): + """Implement the ``//`` operator. + + In a column context, produces the clause ``a / b``. + + """ + return self.operate(truediv, other) + + def __rtruediv__(self, other): + """Implement the ``//`` operator in reverse. + + See :meth:`.ColumnOperators.__truediv__`. + + """ + return self.reverse_operate(truediv, other) + + +def from_(): + raise NotImplementedError() + + +def as_(): + raise NotImplementedError() + + +def exists(): + raise NotImplementedError() + + +def istrue(a): + raise NotImplementedError() + + +def isfalse(a): + raise NotImplementedError() + + +def is_distinct_from(a, b): + return a.is_distinct_from(b) + + +def isnot_distinct_from(a, b): + return a.isnot_distinct_from(b) + + +def is_(a, b): + return a.is_(b) + + +def isnot(a, b): + return a.isnot(b) + + +def collate(a, b): + return a.collate(b) + + +def op(a, opstring, b): + return a.op(opstring)(b) + + +def like_op(a, b, escape=None): + return a.like(b, escape=escape) + + +def notlike_op(a, b, escape=None): + return a.notlike(b, escape=escape) + + +def ilike_op(a, b, escape=None): + return a.ilike(b, escape=escape) + + +def notilike_op(a, b, escape=None): + return a.notilike(b, escape=escape) + + +def between_op(a, b, c, symmetric=False): + return a.between(b, c, symmetric=symmetric) + + +def notbetween_op(a, b, c, symmetric=False): + return a.notbetween(b, c, symmetric=symmetric) + + +def in_op(a, b): + return a.in_(b) + + +def notin_op(a, b): + return a.notin_(b) + + +def distinct_op(a): + return a.distinct() + + +def any_op(a): + return a.any_() + + +def all_op(a): + return a.all_() + + +def startswith_op(a, b, escape=None): + return a.startswith(b, escape=escape) + + +def notstartswith_op(a, b, escape=None): + return ~a.startswith(b, escape=escape) + + +def endswith_op(a, b, escape=None): + return a.endswith(b, escape=escape) + + +def notendswith_op(a, b, escape=None): + return ~a.endswith(b, escape=escape) + + +def contains_op(a, b, escape=None): + return a.contains(b, escape=escape) + + +def notcontains_op(a, b, escape=None): + return ~a.contains(b, escape=escape) + + +def match_op(a, b, **kw): + return a.match(b, **kw) + + +def notmatch_op(a, b, **kw): + return a.notmatch(b, **kw) + + +def comma_op(a, b): + raise NotImplementedError() + + +def concat_op(a, b): + return a.concat(b) + + +def desc_op(a): + return a.desc() + + +def asc_op(a): + return a.asc() + + +def nullsfirst_op(a): + return a.nullsfirst() + + +def nullslast_op(a): + return a.nullslast() + + +def json_getitem_op(a, b): + raise NotImplementedError() + + +def json_path_getitem_op(a, b): + raise NotImplementedError() + + +_commutative = set([eq, ne, add, mul]) + +_comparison = set([eq, ne, lt, gt, ge, le, between_op, like_op]) + + +def is_comparison(op): + return op in _comparison or \ + isinstance(op, custom_op) and op.is_comparison + + +def is_commutative(op): + return op in _commutative + + +def is_ordering_modifier(op): + return op in (asc_op, desc_op, + nullsfirst_op, nullslast_op) + + +def is_natural_self_precedent(op): + return op in _natural_self_precedent or \ + isinstance(op, custom_op) and op.natural_self_precedent + +_mirror = { + gt: lt, + ge: le, + lt: gt, + le: ge +} + + +def mirror(op): + """rotate a comparison operator 180 degrees. + + Note this is not the same as negation. + + """ + return _mirror.get(op, op) + + +_associative = _commutative.union([concat_op, and_, or_]).difference([eq, ne]) + +_natural_self_precedent = _associative.union([ + getitem, json_getitem_op, json_path_getitem_op]) +"""Operators where if we have (a op b) op c, we don't want to +parenthesize (a op b). + +""" + + +_asbool = util.symbol('_asbool', canonical=-10) +_smallest = util.symbol('_smallest', canonical=-100) +_largest = util.symbol('_largest', canonical=100) + +_PRECEDENCE = { + from_: 15, + any_op: 15, + all_op: 15, + getitem: 15, + json_getitem_op: 15, + json_path_getitem_op: 15, + + mul: 8, + truediv: 8, + div: 8, + mod: 8, + neg: 8, + add: 7, + sub: 7, + + concat_op: 6, + match_op: 6, + notmatch_op: 6, + + ilike_op: 6, + notilike_op: 6, + like_op: 6, + notlike_op: 6, + in_op: 6, + notin_op: 6, + + is_: 6, + isnot: 6, + + eq: 5, + ne: 5, + is_distinct_from: 5, + isnot_distinct_from: 5, + gt: 5, + lt: 5, + ge: 5, + le: 5, + + between_op: 5, + notbetween_op: 5, + distinct_op: 5, + inv: 5, + istrue: 5, + isfalse: 5, + and_: 3, + or_: 2, + comma_op: -1, + + desc_op: 3, + asc_op: 3, + collate: 4, + + as_: -1, + exists: 0, + + _asbool: -10, + _smallest: _smallest, + _largest: _largest +} + + +def is_precedent(operator, against): + if operator is against and is_natural_self_precedent(operator): + return False + else: + return (_PRECEDENCE.get(operator, + getattr(operator, 'precedence', _smallest)) <= + _PRECEDENCE.get(against, + getattr(against, 'precedence', _largest))) diff --git a/app/lib/sqlalchemy/sql/schema.py b/app/lib/sqlalchemy/sql/schema.py new file mode 100644 index 0000000..6699614 --- /dev/null +++ b/app/lib/sqlalchemy/sql/schema.py @@ -0,0 +1,4027 @@ +# sql/schema.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""The schema module provides the building blocks for database metadata. + +Each element within this module describes a database entity which can be +created and dropped, or is otherwise part of such an entity. Examples include +tables, columns, sequences, and indexes. + +All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as +defined in this module they are intended to be agnostic of any vendor-specific +constructs. + +A collection of entities are grouped into a unit called +:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of +schema elements, and can also be associated with an actual database connection +such that operations involving the contained elements can contact the database +as needed. + +Two of the elements here also build upon their "syntactic" counterparts, which +are defined in :class:`~sqlalchemy.sql.expression.`, specifically +:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`. +Since these objects are part of the SQL expression language, they are usable +as components in SQL expressions. + +""" +from __future__ import absolute_import + +from .. import exc, util, event, inspection +from .base import SchemaEventTarget, DialectKWArgs +import operator +from . import visitors +from . import type_api +from .base import _bind_or_error, ColumnCollection +from .elements import ClauseElement, ColumnClause, \ + _as_truncated, TextClause, _literal_as_text,\ + ColumnElement, quoted_name +from .selectable import TableClause +import collections +import sqlalchemy +from . import ddl + +RETAIN_SCHEMA = util.symbol('retain_schema') + +BLANK_SCHEMA = util.symbol( + 'blank_schema', + """Symbol indicating that a :class:`.Table` or :class:`.Sequence` + should have 'None' for its schema, even if the parent + :class:`.MetaData` has specified a schema. + + .. versionadded:: 1.0.14 + + """ +) + + +def _get_table_key(name, schema): + if schema is None: + return name + else: + return schema + "." + name + + +@inspection._self_inspects +class SchemaItem(SchemaEventTarget, visitors.Visitable): + """Base class for items that define a database schema.""" + + __visit_name__ = 'schema_item' + + def _init_items(self, *args): + """Initialize the list of child items for this SchemaItem.""" + + for item in args: + if item is not None: + item._set_parent_with_dispatch(self) + + def get_children(self, **kwargs): + """used to allow SchemaVisitor access""" + return [] + + def __repr__(self): + return util.generic_repr(self, omit_kwarg=['info']) + + @property + @util.deprecated('0.9', 'Use ``.name.quote``') + def quote(self): + """Return the value of the ``quote`` flag passed + to this schema object, for those schema items which + have a ``name`` field. + + """ + + return self.name.quote + + @util.memoized_property + def info(self): + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.SchemaItem`. + + The dictionary is automatically generated when first accessed. + It can also be specified in the constructor of some objects, + such as :class:`.Table` and :class:`.Column`. + + """ + return {} + + def _schema_item_copy(self, schema_item): + if 'info' in self.__dict__: + schema_item.info = self.info.copy() + schema_item.dispatch._update(self.dispatch) + return schema_item + + def _translate_schema(self, effective_schema, map_): + return map_.get(effective_schema, effective_schema) + + +class Table(DialectKWArgs, SchemaItem, TableClause): + r"""Represent a table in a database. + + e.g.:: + + mytable = Table("mytable", metadata, + Column('mytable_id', Integer, primary_key=True), + Column('value', String(50)) + ) + + The :class:`.Table` object constructs a unique instance of itself based + on its name and optional schema name within the given + :class:`.MetaData` object. Calling the :class:`.Table` + constructor with the same name and same :class:`.MetaData` argument + a second time will return the *same* :class:`.Table` object - in this way + the :class:`.Table` constructor acts as a registry function. + + .. seealso:: + + :ref:`metadata_describing` - Introduction to database metadata + + Constructor arguments are as follows: + + :param name: The name of this table as represented in the database. + + The table name, along with the value of the ``schema`` parameter, + forms a key which uniquely identifies this :class:`.Table` within + the owning :class:`.MetaData` collection. + Additional calls to :class:`.Table` with the same name, metadata, + and schema name will return the same :class:`.Table` object. + + Names which contain no upper case characters + will be treated as case insensitive names, and will not be quoted + unless they are a reserved word or contain special characters. + A name with any number of upper case characters is considered + to be case sensitive, and will be sent as quoted. + + To enable unconditional quoting for the table name, specify the flag + ``quote=True`` to the constructor, or use the :class:`.quoted_name` + construct to specify the name. + + :param metadata: a :class:`.MetaData` object which will contain this + table. The metadata is used as a point of association of this table + with other tables which are referenced via foreign key. It also + may be used to associate this table with a particular + :class:`.Connectable`. + + :param \*args: Additional positional arguments are used primarily + to add the list of :class:`.Column` objects contained within this + table. Similar to the style of a CREATE TABLE statement, other + :class:`.SchemaItem` constructs may be added here, including + :class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`. + + :param autoload: Defaults to False, unless :paramref:`.Table.autoload_with` + is set in which case it defaults to True; :class:`.Column` objects + for this table should be reflected from the database, possibly + augmenting or replacing existing :class:`.Column` objects that were + explicitly specified. + + .. versionchanged:: 1.0.0 setting the :paramref:`.Table.autoload_with` + parameter implies that :paramref:`.Table.autoload` will default + to True. + + .. seealso:: + + :ref:`metadata_reflection_toplevel` + + :param autoload_replace: Defaults to ``True``; when using + :paramref:`.Table.autoload` + in conjunction with :paramref:`.Table.extend_existing`, indicates + that :class:`.Column` objects present in the already-existing + :class:`.Table` object should be replaced with columns of the same + name retrieved from the autoload process. When ``False``, columns + already present under existing names will be omitted from the + reflection process. + + Note that this setting does not impact :class:`.Column` objects + specified programmatically within the call to :class:`.Table` that + also is autoloading; those :class:`.Column` objects will always + replace existing columns of the same name when + :paramref:`.Table.extend_existing` is ``True``. + + .. versionadded:: 0.7.5 + + .. seealso:: + + :paramref:`.Table.autoload` + + :paramref:`.Table.extend_existing` + + :param autoload_with: An :class:`.Engine` or :class:`.Connection` object + with which this :class:`.Table` object will be reflected; when + set to a non-None value, it implies that :paramref:`.Table.autoload` + is ``True``. If left unset, but :paramref:`.Table.autoload` is + explicitly set to ``True``, an autoload operation will attempt to + proceed by locating an :class:`.Engine` or :class:`.Connection` bound + to the underlying :class:`.MetaData` object. + + .. seealso:: + + :paramref:`.Table.autoload` + + :param extend_existing: When ``True``, indicates that if this + :class:`.Table` is already present in the given :class:`.MetaData`, + apply further arguments within the constructor to the existing + :class:`.Table`. + + If :paramref:`.Table.extend_existing` or + :paramref:`.Table.keep_existing` are not set, and the given name + of the new :class:`.Table` refers to a :class:`.Table` that is + already present in the target :class:`.MetaData` collection, and + this :class:`.Table` specifies additional columns or other constructs + or flags that modify the table's state, an + error is raised. The purpose of these two mutually-exclusive flags + is to specify what action should be taken when a :class:`.Table` + is specified that matches an existing :class:`.Table`, yet specifies + additional constructs. + + :paramref:`.Table.extend_existing` will also work in conjunction + with :paramref:`.Table.autoload` to run a new reflection + operation against the database, even if a :class:`.Table` + of the same name is already present in the target + :class:`.MetaData`; newly reflected :class:`.Column` objects + and other options will be added into the state of the + :class:`.Table`, potentially overwriting existing columns + and options of the same name. + + .. versionchanged:: 0.7.4 :paramref:`.Table.extend_existing` will + invoke a new reflection operation when combined with + :paramref:`.Table.autoload` set to True. + + As is always the case with :paramref:`.Table.autoload`, + :class:`.Column` objects can be specified in the same :class:`.Table` + constructor, which will take precedence. Below, the existing + table ``mytable`` will be augmented with :class:`.Column` objects + both reflected from the database, as well as the given :class:`.Column` + named "y":: + + Table("mytable", metadata, + Column('y', Integer), + extend_existing=True, + autoload=True, + autoload_with=engine + ) + + .. seealso:: + + :paramref:`.Table.autoload` + + :paramref:`.Table.autoload_replace` + + :paramref:`.Table.keep_existing` + + + :param implicit_returning: True by default - indicates that + RETURNING can be used by default to fetch newly inserted primary key + values, for backends which support this. Note that + create_engine() also provides an implicit_returning flag. + + :param include_columns: A list of strings indicating a subset of + columns to be loaded via the ``autoload`` operation; table columns who + aren't present in this list will not be represented on the resulting + ``Table`` object. Defaults to ``None`` which indicates all columns + should be reflected. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + :param keep_existing: When ``True``, indicates that if this Table + is already present in the given :class:`.MetaData`, ignore + further arguments within the constructor to the existing + :class:`.Table`, and return the :class:`.Table` object as + originally created. This is to allow a function that wishes + to define a new :class:`.Table` on first call, but on + subsequent calls will return the same :class:`.Table`, + without any of the declarations (particularly constraints) + being applied a second time. + + If :paramref:`.Table.extend_existing` or + :paramref:`.Table.keep_existing` are not set, and the given name + of the new :class:`.Table` refers to a :class:`.Table` that is + already present in the target :class:`.MetaData` collection, and + this :class:`.Table` specifies additional columns or other constructs + or flags that modify the table's state, an + error is raised. The purpose of these two mutually-exclusive flags + is to specify what action should be taken when a :class:`.Table` + is specified that matches an existing :class:`.Table`, yet specifies + additional constructs. + + .. seealso:: + + :paramref:`.Table.extend_existing` + + :param listeners: A list of tuples of the form ``(, )`` + which will be passed to :func:`.event.listen` upon construction. + This alternate hook to :func:`.event.listen` allows the establishment + of a listener function specific to this :class:`.Table` before + the "autoload" process begins. Particularly useful for + the :meth:`.DDLEvents.column_reflect` event:: + + def listen_for_reflect(table, column_info): + "handle the column reflection event" + # ... + + t = Table( + 'sometable', + autoload=True, + listeners=[ + ('column_reflect', listen_for_reflect) + ]) + + :param mustexist: When ``True``, indicates that this Table must already + be present in the given :class:`.MetaData` collection, else + an exception is raised. + + :param prefixes: + A list of strings to insert after CREATE in the CREATE TABLE + statement. They will be separated by spaces. + + :param quote: Force quoting of this table's name on or off, corresponding + to ``True`` or ``False``. When left at its default of ``None``, + the column identifier will be quoted according to whether the name is + case sensitive (identifiers with at least one upper case character are + treated as case sensitive), or if it's a reserved word. This flag + is only needed to force quoting of a reserved word which is not known + by the SQLAlchemy dialect. + + :param quote_schema: same as 'quote' but applies to the schema identifier. + + :param schema: The schema name for this table, which is required if + the table resides in a schema other than the default selected schema + for the engine's database connection. Defaults to ``None``. + + If the owning :class:`.MetaData` of this :class:`.Table` specifies + its own :paramref:`.MetaData.schema` parameter, then that schema + name will be applied to this :class:`.Table` if the schema parameter + here is set to ``None``. To set a blank schema name on a :class:`.Table` + that would otherwise use the schema set on the owning :class:`.MetaData`, + specify the special symbol :attr:`.BLANK_SCHEMA`. + + .. versionadded:: 1.0.14 Added the :attr:`.BLANK_SCHEMA` symbol to + allow a :class:`.Table` to have a blank schema name even when the + parent :class:`.MetaData` specifies :paramref:`.MetaData.schema`. + + The quoting rules for the schema name are the same as those for the + ``name`` parameter, in that quoting is applied for reserved words or + case-sensitive names; to enable unconditional quoting for the + schema name, specify the flag + ``quote_schema=True`` to the constructor, or use the + :class:`.quoted_name` construct to specify the name. + + :param useexisting: Deprecated. Use :paramref:`.Table.extend_existing`. + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form ``_``. + See the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ + + __visit_name__ = 'table' + + def __new__(cls, *args, **kw): + if not args: + # python3k pickle seems to call this + return object.__new__(cls) + + try: + name, metadata, args = args[0], args[1], args[2:] + except IndexError: + raise TypeError("Table() takes at least two arguments") + + schema = kw.get('schema', None) + if schema is None: + schema = metadata.schema + elif schema is BLANK_SCHEMA: + schema = None + keep_existing = kw.pop('keep_existing', False) + extend_existing = kw.pop('extend_existing', False) + if 'useexisting' in kw: + msg = "useexisting is deprecated. Use extend_existing." + util.warn_deprecated(msg) + if extend_existing: + msg = "useexisting is synonymous with extend_existing." + raise exc.ArgumentError(msg) + extend_existing = kw.pop('useexisting', False) + + if keep_existing and extend_existing: + msg = "keep_existing and extend_existing are mutually exclusive." + raise exc.ArgumentError(msg) + + mustexist = kw.pop('mustexist', False) + key = _get_table_key(name, schema) + if key in metadata.tables: + if not keep_existing and not extend_existing and bool(args): + raise exc.InvalidRequestError( + "Table '%s' is already defined for this MetaData " + "instance. Specify 'extend_existing=True' " + "to redefine " + "options and columns on an " + "existing Table object." % key) + table = metadata.tables[key] + if extend_existing: + table._init_existing(*args, **kw) + return table + else: + if mustexist: + raise exc.InvalidRequestError( + "Table '%s' not defined" % (key)) + table = object.__new__(cls) + table.dispatch.before_parent_attach(table, metadata) + metadata._add_table(name, schema, table) + try: + table._init(name, metadata, *args, **kw) + table.dispatch.after_parent_attach(table, metadata) + return table + except: + with util.safe_reraise(): + metadata._remove_table(name, schema) + + @property + @util.deprecated('0.9', 'Use ``table.schema.quote``') + def quote_schema(self): + """Return the value of the ``quote_schema`` flag passed + to this :class:`.Table`. + """ + + return self.schema.quote + + def __init__(self, *args, **kw): + """Constructor for :class:`~.schema.Table`. + + This method is a no-op. See the top-level + documentation for :class:`~.schema.Table` + for constructor arguments. + + """ + # __init__ is overridden to prevent __new__ from + # calling the superclass constructor. + + def _init(self, name, metadata, *args, **kwargs): + super(Table, self).__init__( + quoted_name(name, kwargs.pop('quote', None))) + self.metadata = metadata + + self.schema = kwargs.pop('schema', None) + if self.schema is None: + self.schema = metadata.schema + elif self.schema is BLANK_SCHEMA: + self.schema = None + else: + quote_schema = kwargs.pop('quote_schema', None) + self.schema = quoted_name(self.schema, quote_schema) + + self.indexes = set() + self.constraints = set() + self._columns = ColumnCollection() + PrimaryKeyConstraint(_implicit_generated=True).\ + _set_parent_with_dispatch(self) + self.foreign_keys = set() + self._extra_dependencies = set() + if self.schema is not None: + self.fullname = "%s.%s" % (self.schema, self.name) + else: + self.fullname = self.name + + autoload_with = kwargs.pop('autoload_with', None) + autoload = kwargs.pop('autoload', autoload_with is not None) + # this argument is only used with _init_existing() + kwargs.pop('autoload_replace', True) + _extend_on = kwargs.pop("_extend_on", None) + + include_columns = kwargs.pop('include_columns', None) + + self.implicit_returning = kwargs.pop('implicit_returning', True) + + if 'info' in kwargs: + self.info = kwargs.pop('info') + if 'listeners' in kwargs: + listeners = kwargs.pop('listeners') + for evt, fn in listeners: + event.listen(self, evt, fn) + + self._prefixes = kwargs.pop('prefixes', []) + + self._extra_kwargs(**kwargs) + + # load column definitions from the database if 'autoload' is defined + # we do it after the table is in the singleton dictionary to support + # circular foreign keys + if autoload: + self._autoload( + metadata, autoload_with, + include_columns, _extend_on=_extend_on) + + # initialize all the column, etc. objects. done after reflection to + # allow user-overrides + self._init_items(*args) + + def _autoload(self, metadata, autoload_with, include_columns, + exclude_columns=(), _extend_on=None): + + if autoload_with: + autoload_with.run_callable( + autoload_with.dialect.reflecttable, + self, include_columns, exclude_columns, + _extend_on=_extend_on + ) + else: + bind = _bind_or_error( + metadata, + msg="No engine is bound to this Table's MetaData. " + "Pass an engine to the Table via " + "autoload_with=, " + "or associate the MetaData with an engine via " + "metadata.bind=") + bind.run_callable( + bind.dialect.reflecttable, + self, include_columns, exclude_columns, + _extend_on=_extend_on + ) + + @property + def _sorted_constraints(self): + """Return the set of constraints as a list, sorted by creation + order. + + """ + return sorted(self.constraints, key=lambda c: c._creation_order) + + @property + def foreign_key_constraints(self): + """:class:`.ForeignKeyConstraint` objects referred to by this + :class:`.Table`. + + This list is produced from the collection of :class:`.ForeignKey` + objects currently associated. + + .. versionadded:: 1.0.0 + + """ + return set(fkc.constraint for fkc in self.foreign_keys) + + def _init_existing(self, *args, **kwargs): + autoload_with = kwargs.pop('autoload_with', None) + autoload = kwargs.pop('autoload', autoload_with is not None) + autoload_replace = kwargs.pop('autoload_replace', True) + schema = kwargs.pop('schema', None) + _extend_on = kwargs.pop('_extend_on', None) + + if schema and schema != self.schema: + raise exc.ArgumentError( + "Can't change schema of existing table from '%s' to '%s'", + (self.schema, schema)) + + include_columns = kwargs.pop('include_columns', None) + + if include_columns is not None: + for c in self.c: + if c.name not in include_columns: + self._columns.remove(c) + + for key in ('quote', 'quote_schema'): + if key in kwargs: + raise exc.ArgumentError( + "Can't redefine 'quote' or 'quote_schema' arguments") + + if 'info' in kwargs: + self.info = kwargs.pop('info') + + if autoload: + if not autoload_replace: + # don't replace columns already present. + # we'd like to do this for constraints also however we don't + # have simple de-duping for unnamed constraints. + exclude_columns = [c.name for c in self.c] + else: + exclude_columns = () + self._autoload( + self.metadata, autoload_with, + include_columns, exclude_columns, _extend_on=_extend_on) + + self._extra_kwargs(**kwargs) + self._init_items(*args) + + def _extra_kwargs(self, **kwargs): + self._validate_dialect_kwargs(kwargs) + + def _init_collections(self): + pass + + def _reset_exported(self): + pass + + @property + def _autoincrement_column(self): + return self.primary_key._autoincrement_column + + @property + def key(self): + """Return the 'key' for this :class:`.Table`. + + This value is used as the dictionary key within the + :attr:`.MetaData.tables` collection. It is typically the same + as that of :attr:`.Table.name` for a table with no + :attr:`.Table.schema` set; otherwise it is typically of the form + ``schemaname.tablename``. + + """ + return _get_table_key(self.name, self.schema) + + def __repr__(self): + return "Table(%s)" % ', '.join( + [repr(self.name)] + [repr(self.metadata)] + + [repr(x) for x in self.columns] + + ["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']]) + + def __str__(self): + return _get_table_key(self.description, self.schema) + + @property + def bind(self): + """Return the connectable associated with this Table.""" + + return self.metadata and self.metadata.bind or None + + def add_is_dependent_on(self, table): + """Add a 'dependency' for this Table. + + This is another Table object which must be created + first before this one can, or dropped after this one. + + Usually, dependencies between tables are determined via + ForeignKey objects. However, for other situations that + create dependencies outside of foreign keys (rules, inheriting), + this method can manually establish such a link. + + """ + self._extra_dependencies.add(table) + + def append_column(self, column): + """Append a :class:`~.schema.Column` to this :class:`~.schema.Table`. + + The "key" of the newly added :class:`~.schema.Column`, i.e. the + value of its ``.key`` attribute, will then be available + in the ``.c`` collection of this :class:`~.schema.Table`, and the + column definition will be included in any CREATE TABLE, SELECT, + UPDATE, etc. statements generated from this :class:`~.schema.Table` + construct. + + Note that this does **not** change the definition of the table + as it exists within any underlying database, assuming that + table has already been created in the database. Relational + databases support the addition of columns to existing tables + using the SQL ALTER command, which would need to be + emitted for an already-existing table that doesn't contain + the newly added column. + + """ + + column._set_parent_with_dispatch(self) + + def append_constraint(self, constraint): + """Append a :class:`~.schema.Constraint` to this + :class:`~.schema.Table`. + + This has the effect of the constraint being included in any + future CREATE TABLE statement, assuming specific DDL creation + events have not been associated with the given + :class:`~.schema.Constraint` object. + + Note that this does **not** produce the constraint within the + relational database automatically, for a table that already exists + in the database. To add a constraint to an + existing relational database table, the SQL ALTER command must + be used. SQLAlchemy also provides the + :class:`.AddConstraint` construct which can produce this SQL when + invoked as an executable clause. + + """ + + constraint._set_parent_with_dispatch(self) + + def append_ddl_listener(self, event_name, listener): + """Append a DDL event listener to this ``Table``. + + .. deprecated:: 0.7 + See :class:`.DDLEvents`. + + """ + + def adapt_listener(target, connection, **kw): + listener(event_name, target, connection) + + event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) + + def _set_parent(self, metadata): + metadata._add_table(self.name, self.schema, self) + self.metadata = metadata + + def get_children(self, column_collections=True, + schema_visitor=False, **kw): + if not schema_visitor: + return TableClause.get_children( + self, column_collections=column_collections, **kw) + else: + if column_collections: + return list(self.columns) + else: + return [] + + def exists(self, bind=None): + """Return True if this table exists.""" + + if bind is None: + bind = _bind_or_error(self) + + return bind.run_callable(bind.dialect.has_table, + self.name, schema=self.schema) + + def create(self, bind=None, checkfirst=False): + """Issue a ``CREATE`` statement for this + :class:`.Table`, using the given :class:`.Connectable` + for connectivity. + + .. seealso:: + + :meth:`.MetaData.create_all`. + + """ + + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaGenerator, + self, + checkfirst=checkfirst) + + def drop(self, bind=None, checkfirst=False): + """Issue a ``DROP`` statement for this + :class:`.Table`, using the given :class:`.Connectable` + for connectivity. + + .. seealso:: + + :meth:`.MetaData.drop_all`. + + """ + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaDropper, + self, + checkfirst=checkfirst) + + def tometadata(self, metadata, schema=RETAIN_SCHEMA, + referred_schema_fn=None, name=None): + """Return a copy of this :class:`.Table` associated with a different + :class:`.MetaData`. + + E.g.:: + + m1 = MetaData() + + user = Table('user', m1, Column('id', Integer, priamry_key=True)) + + m2 = MetaData() + user_copy = user.tometadata(m2) + + :param metadata: Target :class:`.MetaData` object, into which the + new :class:`.Table` object will be created. + + :param schema: optional string name indicating the target schema. + Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates + that no change to the schema name should be made in the new + :class:`.Table`. If set to a string name, the new :class:`.Table` + will have this new name as the ``.schema``. If set to ``None``, the + schema will be set to that of the schema set on the target + :class:`.MetaData`, which is typically ``None`` as well, unless + set explicitly:: + + m2 = MetaData(schema='newschema') + + # user_copy_one will have "newschema" as the schema name + user_copy_one = user.tometadata(m2, schema=None) + + m3 = MetaData() # schema defaults to None + + # user_copy_two will have None as the schema name + user_copy_two = user.tometadata(m3, schema=None) + + :param referred_schema_fn: optional callable which can be supplied + in order to provide for the schema name that should be assigned + to the referenced table of a :class:`.ForeignKeyConstraint`. + The callable accepts this parent :class:`.Table`, the + target schema that we are changing to, the + :class:`.ForeignKeyConstraint` object, and the existing + "target schema" of that constraint. The function should return the + string schema name that should be applied. + E.g.:: + + def referred_schema_fn(table, to_schema, + constraint, referred_schema): + if referred_schema == 'base_tables': + return referred_schema + else: + return to_schema + + new_table = table.tometadata(m2, schema="alt_schema", + referred_schema_fn=referred_schema_fn) + + .. versionadded:: 0.9.2 + + :param name: optional string name indicating the target table name. + If not specified or None, the table name is retained. This allows + a :class:`.Table` to be copied to the same :class:`.MetaData` target + with a new name. + + .. versionadded:: 1.0.0 + + """ + if name is None: + name = self.name + if schema is RETAIN_SCHEMA: + schema = self.schema + elif schema is None: + schema = metadata.schema + key = _get_table_key(name, schema) + if key in metadata.tables: + util.warn("Table '%s' already exists within the given " + "MetaData - not copying." % self.description) + return metadata.tables[key] + + args = [] + for c in self.columns: + args.append(c.copy(schema=schema)) + table = Table( + name, metadata, schema=schema, + *args, **self.kwargs + ) + for c in self.constraints: + if isinstance(c, ForeignKeyConstraint): + referred_schema = c._referred_schema + if referred_schema_fn: + fk_constraint_schema = referred_schema_fn( + self, schema, c, referred_schema) + else: + fk_constraint_schema = ( + schema if referred_schema == self.schema else None) + table.append_constraint( + c.copy(schema=fk_constraint_schema, target_table=table)) + elif not c._type_bound: + # skip unique constraints that would be generated + # by the 'unique' flag on Column + if isinstance(c, UniqueConstraint) and \ + len(c.columns) == 1 and \ + list(c.columns)[0].unique: + continue + + table.append_constraint( + c.copy(schema=schema, target_table=table)) + for index in self.indexes: + # skip indexes that would be generated + # by the 'index' flag on Column + if len(index.columns) == 1 and \ + list(index.columns)[0].index: + continue + Index(index.name, + unique=index.unique, + *[table.c[col] for col in index.columns.keys()], + **index.kwargs) + return self._schema_item_copy(table) + + +class Column(SchemaItem, ColumnClause): + """Represents a column in a database table.""" + + __visit_name__ = 'column' + + def __init__(self, *args, **kwargs): + r""" + Construct a new ``Column`` object. + + :param name: The name of this column as represented in the database. + This argument may be the first positional argument, or specified + via keyword. + + Names which contain no upper case characters + will be treated as case insensitive names, and will not be quoted + unless they are a reserved word. Names with any number of upper + case characters will be quoted and sent exactly. Note that this + behavior applies even for databases which standardize upper + case names as case insensitive such as Oracle. + + The name field may be omitted at construction time and applied + later, at any time before the Column is associated with a + :class:`.Table`. This is to support convenient + usage within the :mod:`~sqlalchemy.ext.declarative` extension. + + :param type\_: The column's type, indicated using an instance which + subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments + are required for the type, the class of the type can be sent + as well, e.g.:: + + # use a type with arguments + Column('data', String(50)) + + # use no arguments + Column('level', Integer) + + The ``type`` argument may be the second positional argument + or specified by keyword. + + If the ``type`` is ``None`` or is omitted, it will first default to + the special type :class:`.NullType`. If and when this + :class:`.Column` is made to refer to another column using + :class:`.ForeignKey` and/or :class:`.ForeignKeyConstraint`, the type + of the remote-referenced column will be copied to this column as + well, at the moment that the foreign key is resolved against that + remote :class:`.Column` object. + + .. versionchanged:: 0.9.0 + Support for propagation of type to a :class:`.Column` from its + :class:`.ForeignKey` object has been improved and should be + more reliable and timely. + + :param \*args: Additional positional arguments include various + :class:`.SchemaItem` derived constructs which will be applied + as options to the column. These include instances of + :class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`, + and :class:`.Sequence`. In some cases an equivalent keyword + argument is available such as ``server_default``, ``default`` + and ``unique``. + + :param autoincrement: Set up "auto increment" semantics for an integer + primary key column. The default value is the string ``"auto"`` + which indicates that a single-column primary key that is of + an INTEGER type with no stated client-side or python-side defaults + should receive auto increment semantics automatically; + all other varieties of primary key columns will not. This + includes that :term:`DDL` such as PostgreSQL SERIAL or MySQL + AUTO_INCREMENT will be emitted for this column during a table + create, as well as that the column is assumed to generate new + integer primary key values when an INSERT statement invokes which + will be retrieved by the dialect. + + The flag may be set to ``True`` to indicate that a column which + is part of a composite (e.g. multi-column) primary key should + have autoincrement semantics, though note that only one column + within a primary key may have this setting. It can also + be set to ``True`` to indicate autoincrement semantics on a + column that has a client-side or server-side default configured, + however note that not all dialects can accommodate all styles + of default as an "autoincrement". It can also be + set to ``False`` on a single-column primary key that has a + datatype of INTEGER in order to disable auto increment semantics + for that column. + + .. versionchanged:: 1.1 The autoincrement flag now defaults to + ``"auto"`` which indicates autoincrement semantics by default + for single-column integer primary keys only; for composite + (multi-column) primary keys, autoincrement is never implicitly + enabled; as always, ``autoincrement=True`` will allow for + at most one of those columns to be an "autoincrement" column. + ``autoincrement=True`` may also be set on a :class:`.Column` + that has an explicit client-side or server-side default, + subject to limitations of the backend database and dialect. + + + The setting *only* has an effect for columns which are: + + * Integer derived (i.e. INT, SMALLINT, BIGINT). + + * Part of the primary key + + * Not referring to another column via :class:`.ForeignKey`, unless + the value is specified as ``'ignore_fk'``:: + + # turn on autoincrement for this column despite + # the ForeignKey() + Column('id', ForeignKey('other.id'), + primary_key=True, autoincrement='ignore_fk') + + It is typically not desirable to have "autoincrement" enabled + on a column that refers to another via foreign key, as such a column + is required to refer to a value that originates from elsewhere. + + The setting has these two effects on columns that meet the + above criteria: + + * DDL issued for the column will include database-specific + keywords intended to signify this column as an + "autoincrement" column, such as AUTO INCREMENT on MySQL, + SERIAL on PostgreSQL, and IDENTITY on MS-SQL. It does + *not* issue AUTOINCREMENT for SQLite since this is a + special SQLite flag that is not required for autoincrementing + behavior. + + .. seealso:: + + :ref:`sqlite_autoincrement` + + * The column will be considered to be available using an + "autoincrement" method specific to the backend database, such + as calling upon ``cursor.lastrowid``, using RETURNING in an + INSERT statement to get at a sequence-generated value, or using + special functions such as "SELECT scope_identity()". + These methods are highly specific to the DBAPIs and databases in + use and vary greatly, so care should be taken when associating + ``autoincrement=True`` with a custom default generation function. + + + :param default: A scalar, Python callable, or + :class:`.ColumnElement` expression representing the + *default value* for this column, which will be invoked upon insert + if this column is otherwise not specified in the VALUES clause of + the insert. This is a shortcut to using :class:`.ColumnDefault` as + a positional argument; see that class for full detail on the + structure of the argument. + + Contrast this argument to :paramref:`.Column.server_default` + which creates a default generator on the database side. + + .. seealso:: + + :ref:`metadata_defaults_toplevel` + + :param doc: optional String that can be used by the ORM or similar + to document attributes. This attribute does not render SQL + comments (a future attribute 'comment' will achieve that). + + :param key: An optional string identifier which will identify this + ``Column`` object on the :class:`.Table`. When a key is provided, + this is the only identifier referencing the ``Column`` within the + application, including ORM attribute mapping; the ``name`` field + is used only when rendering SQL. + + :param index: When ``True``, indicates that the column is indexed. + This is a shortcut for using a :class:`.Index` construct on the + table. To specify indexes with explicit names or indexes that + contain multiple columns, use the :class:`.Index` construct + instead. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + :param nullable: When set to ``False``, will cause the "NOT NULL" + phrase to be added when generating DDL for the column. When + ``True``, will normally generate nothing (in SQL this defaults to + "NULL"), except in some very specific backend-specific edge cases + where "NULL" may render explicitly. Defaults to ``True`` unless + :paramref:`~.Column.primary_key` is also ``True``, in which case it + defaults to ``False``. This parameter is only used when issuing + CREATE TABLE statements. + + :param onupdate: A scalar, Python callable, or + :class:`~sqlalchemy.sql.expression.ClauseElement` representing a + default value to be applied to the column within UPDATE + statements, which wil be invoked upon update if this column is not + present in the SET clause of the update. This is a shortcut to + using :class:`.ColumnDefault` as a positional argument with + ``for_update=True``. + + .. seealso:: + + :ref:`metadata_defaults` - complete discussion of onupdate + + :param primary_key: If ``True``, marks this column as a primary key + column. Multiple columns can have this flag set to specify + composite primary keys. As an alternative, the primary key of a + :class:`.Table` can be specified via an explicit + :class:`.PrimaryKeyConstraint` object. + + :param server_default: A :class:`.FetchedValue` instance, str, Unicode + or :func:`~sqlalchemy.sql.expression.text` construct representing + the DDL DEFAULT value for the column. + + String types will be emitted as-is, surrounded by single quotes:: + + Column('x', Text, server_default="val") + + x TEXT DEFAULT 'val' + + A :func:`~sqlalchemy.sql.expression.text` expression will be + rendered as-is, without quotes:: + + Column('y', DateTime, server_default=text('NOW()')) + + y DATETIME DEFAULT NOW() + + Strings and text() will be converted into a + :class:`.DefaultClause` object upon initialization. + + Use :class:`.FetchedValue` to indicate that an already-existing + column will generate a default value on the database side which + will be available to SQLAlchemy for post-fetch after inserts. This + construct does not specify any DDL and the implementation is left + to the database, such as via a trigger. + + .. seealso:: + + :ref:`server_defaults` - complete discussion of server side + defaults + + :param server_onupdate: A :class:`.FetchedValue` instance + representing a database-side default generation function, + such as a trigger. This + indicates to SQLAlchemy that a newly generated value will be + available after updates. This construct does not actually + implement any kind of generation function within the database, + which instead must be specified separately. + + .. seealso:: + + :ref:`triggered_columns` + + :param quote: Force quoting of this column's name on or off, + corresponding to ``True`` or ``False``. When left at its default + of ``None``, the column identifier will be quoted according to + whether the name is case sensitive (identifiers with at least one + upper case character are treated as case sensitive), or if it's a + reserved word. This flag is only needed to force quoting of a + reserved word which is not known by the SQLAlchemy dialect. + + :param unique: When ``True``, indicates that this column contains a + unique constraint, or if ``index`` is ``True`` as well, indicates + that the :class:`.Index` should be created with the unique flag. + To specify multiple columns in the constraint/index or to specify + an explicit name, use the :class:`.UniqueConstraint` or + :class:`.Index` constructs explicitly. + + :param system: When ``True``, indicates this is a "system" column, + that is a column which is automatically made available by the + database, and should not be included in the columns list for a + ``CREATE TABLE`` statement. + + For more elaborate scenarios where columns should be + conditionally rendered differently on different backends, + consider custom compilation rules for :class:`.CreateColumn`. + + .. versionadded:: 0.8.3 Added the ``system=True`` parameter to + :class:`.Column`. + + """ + + name = kwargs.pop('name', None) + type_ = kwargs.pop('type_', None) + args = list(args) + if args: + if isinstance(args[0], util.string_types): + if name is not None: + raise exc.ArgumentError( + "May not pass name positionally and as a keyword.") + name = args.pop(0) + if args: + coltype = args[0] + + if hasattr(coltype, "_sqla_type"): + if type_ is not None: + raise exc.ArgumentError( + "May not pass type_ positionally and as a keyword.") + type_ = args.pop(0) + + if name is not None: + name = quoted_name(name, kwargs.pop('quote', None)) + elif "quote" in kwargs: + raise exc.ArgumentError("Explicit 'name' is required when " + "sending 'quote' argument") + + super(Column, self).__init__(name, type_) + self.key = kwargs.pop('key', name) + self.primary_key = kwargs.pop('primary_key', False) + self.nullable = kwargs.pop('nullable', not self.primary_key) + self.default = kwargs.pop('default', None) + self.server_default = kwargs.pop('server_default', None) + self.server_onupdate = kwargs.pop('server_onupdate', None) + + # these default to None because .index and .unique is *not* + # an informational flag about Column - there can still be an + # Index or UniqueConstraint referring to this Column. + self.index = kwargs.pop('index', None) + self.unique = kwargs.pop('unique', None) + + self.system = kwargs.pop('system', False) + self.doc = kwargs.pop('doc', None) + self.onupdate = kwargs.pop('onupdate', None) + self.autoincrement = kwargs.pop('autoincrement', "auto") + self.constraints = set() + self.foreign_keys = set() + + # check if this Column is proxying another column + if '_proxies' in kwargs: + self._proxies = kwargs.pop('_proxies') + # otherwise, add DDL-related events + elif isinstance(self.type, SchemaEventTarget): + self.type._set_parent_with_dispatch(self) + + if self.default is not None: + if isinstance(self.default, (ColumnDefault, Sequence)): + args.append(self.default) + else: + if getattr(self.type, '_warn_on_bytestring', False): + if isinstance(self.default, util.binary_type): + util.warn( + "Unicode column '%s' has non-unicode " + "default value %r specified." % ( + self.key, + self.default + )) + args.append(ColumnDefault(self.default)) + + if self.server_default is not None: + if isinstance(self.server_default, FetchedValue): + args.append(self.server_default._as_for_update(False)) + else: + args.append(DefaultClause(self.server_default)) + + if self.onupdate is not None: + if isinstance(self.onupdate, (ColumnDefault, Sequence)): + args.append(self.onupdate) + else: + args.append(ColumnDefault(self.onupdate, for_update=True)) + + if self.server_onupdate is not None: + if isinstance(self.server_onupdate, FetchedValue): + args.append(self.server_onupdate._as_for_update(True)) + else: + args.append(DefaultClause(self.server_onupdate, + for_update=True)) + self._init_items(*args) + + util.set_creation_order(self) + + if 'info' in kwargs: + self.info = kwargs.pop('info') + + if kwargs: + raise exc.ArgumentError( + "Unknown arguments passed to Column: " + repr(list(kwargs))) + +# @property +# def quote(self): +# return getattr(self.name, "quote", None) + + def __str__(self): + if self.name is None: + return "(no name)" + elif self.table is not None: + if self.table.named_with_column: + return (self.table.description + "." + self.description) + else: + return self.description + else: + return self.description + + def references(self, column): + """Return True if this Column references the given column via foreign + key.""" + + for fk in self.foreign_keys: + if fk.column.proxy_set.intersection(column.proxy_set): + return True + else: + return False + + def append_foreign_key(self, fk): + fk._set_parent_with_dispatch(self) + + def __repr__(self): + kwarg = [] + if self.key != self.name: + kwarg.append('key') + if self.primary_key: + kwarg.append('primary_key') + if not self.nullable: + kwarg.append('nullable') + if self.onupdate: + kwarg.append('onupdate') + if self.default: + kwarg.append('default') + if self.server_default: + kwarg.append('server_default') + return "Column(%s)" % ', '.join( + [repr(self.name)] + [repr(self.type)] + + [repr(x) for x in self.foreign_keys if x is not None] + + [repr(x) for x in self.constraints] + + [(self.table is not None and "table=<%s>" % + self.table.description or "table=None")] + + ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]) + + def _set_parent(self, table): + if not self.name: + raise exc.ArgumentError( + "Column must be constructed with a non-blank name or " + "assign a non-blank .name before adding to a Table.") + if self.key is None: + self.key = self.name + + existing = getattr(self, 'table', None) + if existing is not None and existing is not table: + raise exc.ArgumentError( + "Column object '%s' already assigned to Table '%s'" % ( + self.key, + existing.description + )) + + if self.key in table._columns: + col = table._columns.get(self.key) + if col is not self: + for fk in col.foreign_keys: + table.foreign_keys.remove(fk) + if fk.constraint in table.constraints: + # this might have been removed + # already, if it's a composite constraint + # and more than one col being replaced + table.constraints.remove(fk.constraint) + + table._columns.replace(self) + + if self.primary_key: + table.primary_key._replace(self) + elif self.key in table.primary_key: + raise exc.ArgumentError( + "Trying to redefine primary-key column '%s' as a " + "non-primary-key column on table '%s'" % ( + self.key, table.fullname)) + + self.table = table + + if self.index: + if isinstance(self.index, util.string_types): + raise exc.ArgumentError( + "The 'index' keyword argument on Column is boolean only. " + "To create indexes with a specific name, create an " + "explicit Index object external to the Table.") + Index(None, self, unique=bool(self.unique)) + elif self.unique: + if isinstance(self.unique, util.string_types): + raise exc.ArgumentError( + "The 'unique' keyword argument on Column is boolean " + "only. To create unique constraints or indexes with a " + "specific name, append an explicit UniqueConstraint to " + "the Table's list of elements, or create an explicit " + "Index object external to the Table.") + table.append_constraint(UniqueConstraint(self.key)) + + self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table)) + + def _setup_on_memoized_fks(self, fn): + fk_keys = [ + ((self.table.key, self.key), False), + ((self.table.key, self.name), True), + ] + for fk_key, link_to_name in fk_keys: + if fk_key in self.table.metadata._fk_memos: + for fk in self.table.metadata._fk_memos[fk_key]: + if fk.link_to_name is link_to_name: + fn(fk) + + def _on_table_attach(self, fn): + if self.table is not None: + fn(self, self.table) + else: + event.listen(self, 'after_parent_attach', fn) + + def copy(self, **kw): + """Create a copy of this ``Column``, unitialized. + + This is used in ``Table.tometadata``. + + """ + + # Constraint objects plus non-constraint-bound ForeignKey objects + args = \ + [c.copy(**kw) for c in self.constraints if not c._type_bound] + \ + [c.copy(**kw) for c in self.foreign_keys if not c.constraint] + + type_ = self.type + if isinstance(type_, SchemaEventTarget): + type_ = type_.copy(**kw) + + c = self._constructor( + name=self.name, + type_=type_, + key=self.key, + primary_key=self.primary_key, + nullable=self.nullable, + unique=self.unique, + system=self.system, + # quote=self.quote, + index=self.index, + autoincrement=self.autoincrement, + default=self.default, + server_default=self.server_default, + onupdate=self.onupdate, + server_onupdate=self.server_onupdate, + doc=self.doc, + *args + ) + return self._schema_item_copy(c) + + def _make_proxy(self, selectable, name=None, key=None, + name_is_truncatable=False, **kw): + """Create a *proxy* for this column. + + This is a copy of this ``Column`` referenced by a different parent + (such as an alias or select statement). The column should + be used only in select scenarios, as its full DDL/default + information is not transferred. + + """ + fk = [ForeignKey(f.column, _constraint=f.constraint) + for f in self.foreign_keys] + if name is None and self.name is None: + raise exc.InvalidRequestError( + "Cannot initialize a sub-selectable" + " with this Column object until its 'name' has " + "been assigned.") + try: + c = self._constructor( + _as_truncated(name or self.name) if + name_is_truncatable else (name or self.name), + self.type, + key=key if key else name if name else self.key, + primary_key=self.primary_key, + nullable=self.nullable, + _proxies=[self], *fk) + except TypeError: + util.raise_from_cause( + TypeError( + "Could not create a copy of this %r object. " + "Ensure the class includes a _constructor() " + "attribute or method which accepts the " + "standard Column constructor arguments, or " + "references the Column class itself." % self.__class__) + ) + + c.table = selectable + selectable._columns.add(c) + if selectable._is_clone_of is not None: + c._is_clone_of = selectable._is_clone_of.columns[c.key] + if self.primary_key: + selectable.primary_key.add(c) + c.dispatch.after_parent_attach(c, selectable) + return c + + def get_children(self, schema_visitor=False, **kwargs): + if schema_visitor: + return [x for x in (self.default, self.onupdate) + if x is not None] + \ + list(self.foreign_keys) + list(self.constraints) + else: + return ColumnClause.get_children(self, **kwargs) + + +class ForeignKey(DialectKWArgs, SchemaItem): + """Defines a dependency between two columns. + + ``ForeignKey`` is specified as an argument to a :class:`.Column` object, + e.g.:: + + t = Table("remote_table", metadata, + Column("remote_id", ForeignKey("main_table.id")) + ) + + Note that ``ForeignKey`` is only a marker object that defines + a dependency between two columns. The actual constraint + is in all cases represented by the :class:`.ForeignKeyConstraint` + object. This object will be generated automatically when + a ``ForeignKey`` is associated with a :class:`.Column` which + in turn is associated with a :class:`.Table`. Conversely, + when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`, + ``ForeignKey`` markers are automatically generated to be + present on each associated :class:`.Column`, which are also + associated with the constraint object. + + Note that you cannot define a "composite" foreign key constraint, + that is a constraint between a grouping of multiple parent/child + columns, using ``ForeignKey`` objects. To define this grouping, + the :class:`.ForeignKeyConstraint` object must be used, and applied + to the :class:`.Table`. The associated ``ForeignKey`` objects + are created automatically. + + The ``ForeignKey`` objects associated with an individual + :class:`.Column` object are available in the `foreign_keys` collection + of that column. + + Further examples of foreign key configuration are in + :ref:`metadata_foreignkeys`. + + """ + + __visit_name__ = 'foreign_key' + + def __init__(self, column, _constraint=None, use_alter=False, name=None, + onupdate=None, ondelete=None, deferrable=None, + initially=None, link_to_name=False, match=None, + info=None, + **dialect_kw): + r""" + Construct a column-level FOREIGN KEY. + + The :class:`.ForeignKey` object when constructed generates a + :class:`.ForeignKeyConstraint` which is associated with the parent + :class:`.Table` object's collection of constraints. + + :param column: A single target column for the key relationship. A + :class:`.Column` object or a column name as a string: + ``tablename.columnkey`` or ``schema.tablename.columnkey``. + ``columnkey`` is the ``key`` which has been assigned to the column + (defaults to the column name itself), unless ``link_to_name`` is + ``True`` in which case the rendered name of the column is used. + + .. versionadded:: 0.7.4 + Note that if the schema name is not included, and the + underlying :class:`.MetaData` has a "schema", that value will + be used. + + :param name: Optional string. An in-database name for the key if + `constraint` is not provided. + + :param onupdate: Optional string. If set, emit ON UPDATE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + + :param ondelete: Optional string. If set, emit ON DELETE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + + :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT + DEFERRABLE when issuing DDL for this constraint. + + :param initially: Optional string. If set, emit INITIALLY when + issuing DDL for this constraint. + + :param link_to_name: if True, the string name given in ``column`` is + the rendered name of the referenced column, not its locally + assigned ``key``. + + :param use_alter: passed to the underlying + :class:`.ForeignKeyConstraint` to indicate the constraint should + be generated/dropped externally from the CREATE TABLE/ DROP TABLE + statement. See :paramref:`.ForeignKeyConstraint.use_alter` + for further description. + + .. seealso:: + + :paramref:`.ForeignKeyConstraint.use_alter` + + :ref:`use_alter` + + :param match: Optional string. If set, emit MATCH when issuing + DDL for this constraint. Typical values include SIMPLE, PARTIAL + and FULL. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + .. versionadded:: 1.0.0 + + :param \**dialect_kw: Additional keyword arguments are dialect + specific, and passed in the form ``_``. The + arguments are ultimately handled by a corresponding + :class:`.ForeignKeyConstraint`. See the documentation regarding + an individual dialect at :ref:`dialect_toplevel` for detail on + documented arguments. + + .. versionadded:: 0.9.2 + + """ + + self._colspec = column + if isinstance(self._colspec, util.string_types): + self._table_column = None + else: + if hasattr(self._colspec, '__clause_element__'): + self._table_column = self._colspec.__clause_element__() + else: + self._table_column = self._colspec + + if not isinstance(self._table_column, ColumnClause): + raise exc.ArgumentError( + "String, Column, or Column-bound argument " + "expected, got %r" % self._table_column) + elif not isinstance( + self._table_column.table, (util.NoneType, TableClause)): + raise exc.ArgumentError( + "ForeignKey received Column not bound " + "to a Table, got: %r" % self._table_column.table + ) + + # the linked ForeignKeyConstraint. + # ForeignKey will create this when parent Column + # is attached to a Table, *or* ForeignKeyConstraint + # object passes itself in when creating ForeignKey + # markers. + self.constraint = _constraint + self.parent = None + self.use_alter = use_alter + self.name = name + self.onupdate = onupdate + self.ondelete = ondelete + self.deferrable = deferrable + self.initially = initially + self.link_to_name = link_to_name + self.match = match + if info: + self.info = info + self._unvalidated_dialect_kw = dialect_kw + + def __repr__(self): + return "ForeignKey(%r)" % self._get_colspec() + + def copy(self, schema=None): + """Produce a copy of this :class:`.ForeignKey` object. + + The new :class:`.ForeignKey` will not be bound + to any :class:`.Column`. + + This method is usually used by the internal + copy procedures of :class:`.Column`, :class:`.Table`, + and :class:`.MetaData`. + + :param schema: The returned :class:`.ForeignKey` will + reference the original table and column name, qualified + by the given string schema name. + + """ + + fk = ForeignKey( + self._get_colspec(schema=schema), + use_alter=self.use_alter, + name=self.name, + onupdate=self.onupdate, + ondelete=self.ondelete, + deferrable=self.deferrable, + initially=self.initially, + link_to_name=self.link_to_name, + match=self.match, + **self._unvalidated_dialect_kw + ) + return self._schema_item_copy(fk) + + def _get_colspec(self, schema=None, table_name=None): + """Return a string based 'column specification' for this + :class:`.ForeignKey`. + + This is usually the equivalent of the string-based "tablename.colname" + argument first passed to the object's constructor. + + """ + if schema: + _schema, tname, colname = self._column_tokens + if table_name is not None: + tname = table_name + return "%s.%s.%s" % (schema, tname, colname) + elif table_name: + schema, tname, colname = self._column_tokens + if schema: + return "%s.%s.%s" % (schema, table_name, colname) + else: + return "%s.%s" % (table_name, colname) + elif self._table_column is not None: + return "%s.%s" % ( + self._table_column.table.fullname, self._table_column.key) + else: + return self._colspec + + @property + def _referred_schema(self): + return self._column_tokens[0] + + def _table_key(self): + if self._table_column is not None: + if self._table_column.table is None: + return None + else: + return self._table_column.table.key + else: + schema, tname, colname = self._column_tokens + return _get_table_key(tname, schema) + + target_fullname = property(_get_colspec) + + def references(self, table): + """Return True if the given :class:`.Table` is referenced by this + :class:`.ForeignKey`.""" + + return table.corresponding_column(self.column) is not None + + def get_referent(self, table): + """Return the :class:`.Column` in the given :class:`.Table` + referenced by this :class:`.ForeignKey`. + + Returns None if this :class:`.ForeignKey` does not reference the given + :class:`.Table`. + + """ + + return table.corresponding_column(self.column) + + @util.memoized_property + def _column_tokens(self): + """parse a string-based _colspec into its component parts.""" + + m = self._get_colspec().split('.') + if m is None: + raise exc.ArgumentError( + "Invalid foreign key column specification: %s" % + self._colspec) + if (len(m) == 1): + tname = m.pop() + colname = None + else: + colname = m.pop() + tname = m.pop() + + # A FK between column 'bar' and table 'foo' can be + # specified as 'foo', 'foo.bar', 'dbo.foo.bar', + # 'otherdb.dbo.foo.bar'. Once we have the column name and + # the table name, treat everything else as the schema + # name. Some databases (e.g. Sybase) support + # inter-database foreign keys. See tickets#1341 and -- + # indirectly related -- Ticket #594. This assumes that '.' + # will never appear *within* any component of the FK. + + if (len(m) > 0): + schema = '.'.join(m) + else: + schema = None + return schema, tname, colname + + def _resolve_col_tokens(self): + if self.parent is None: + raise exc.InvalidRequestError( + "this ForeignKey object does not yet have a " + "parent Column associated with it.") + + elif self.parent.table is None: + raise exc.InvalidRequestError( + "this ForeignKey's parent column is not yet associated " + "with a Table.") + + parenttable = self.parent.table + + # assertion, can be commented out. + # basically Column._make_proxy() sends the actual + # target Column to the ForeignKey object, so the + # string resolution here is never called. + for c in self.parent.base_columns: + if isinstance(c, Column): + assert c.table is parenttable + break + else: + assert False + ###################### + + schema, tname, colname = self._column_tokens + + if schema is None and parenttable.metadata.schema is not None: + schema = parenttable.metadata.schema + + tablekey = _get_table_key(tname, schema) + return parenttable, tablekey, colname + + def _link_to_col_by_colstring(self, parenttable, table, colname): + if not hasattr(self.constraint, '_referred_table'): + self.constraint._referred_table = table + else: + assert self.constraint._referred_table is table + + _column = None + if colname is None: + # colname is None in the case that ForeignKey argument + # was specified as table name only, in which case we + # match the column name to the same column on the + # parent. + key = self.parent + _column = table.c.get(self.parent.key, None) + elif self.link_to_name: + key = colname + for c in table.c: + if c.name == colname: + _column = c + else: + key = colname + _column = table.c.get(colname, None) + + if _column is None: + raise exc.NoReferencedColumnError( + "Could not initialize target column " + "for ForeignKey '%s' on table '%s': " + "table '%s' has no column named '%s'" % + (self._colspec, parenttable.name, table.name, key), + table.name, key) + + self._set_target_column(_column) + + def _set_target_column(self, column): + # propagate TypeEngine to parent if it didn't have one + if self.parent.type._isnull: + self.parent.type = column.type + + # super-edgy case, if other FKs point to our column, + # they'd get the type propagated out also. + if isinstance(self.parent.table, Table): + + def set_type(fk): + if fk.parent.type._isnull: + fk.parent.type = column.type + self.parent._setup_on_memoized_fks(set_type) + + self.column = column + + @util.memoized_property + def column(self): + """Return the target :class:`.Column` referenced by this + :class:`.ForeignKey`. + + If no target column has been established, an exception + is raised. + + .. versionchanged:: 0.9.0 + Foreign key target column resolution now occurs as soon as both + the ForeignKey object and the remote Column to which it refers + are both associated with the same MetaData object. + + """ + + if isinstance(self._colspec, util.string_types): + + parenttable, tablekey, colname = self._resolve_col_tokens() + + if tablekey not in parenttable.metadata: + raise exc.NoReferencedTableError( + "Foreign key associated with column '%s' could not find " + "table '%s' with which to generate a " + "foreign key to target column '%s'" % + (self.parent, tablekey, colname), + tablekey) + elif parenttable.key not in parenttable.metadata: + raise exc.InvalidRequestError( + "Table %s is no longer associated with its " + "parent MetaData" % parenttable) + else: + raise exc.NoReferencedColumnError( + "Could not initialize target column for " + "ForeignKey '%s' on table '%s': " + "table '%s' has no column named '%s'" % ( + self._colspec, parenttable.name, tablekey, colname), + tablekey, colname) + elif hasattr(self._colspec, '__clause_element__'): + _column = self._colspec.__clause_element__() + return _column + else: + _column = self._colspec + return _column + + def _set_parent(self, column): + if self.parent is not None and self.parent is not column: + raise exc.InvalidRequestError( + "This ForeignKey already has a parent !") + self.parent = column + self.parent.foreign_keys.add(self) + self.parent._on_table_attach(self._set_table) + + def _set_remote_table(self, table): + parenttable, tablekey, colname = self._resolve_col_tokens() + self._link_to_col_by_colstring(parenttable, table, colname) + self.constraint._validate_dest_table(table) + + def _remove_from_metadata(self, metadata): + parenttable, table_key, colname = self._resolve_col_tokens() + fk_key = (table_key, colname) + + if self in metadata._fk_memos[fk_key]: + # TODO: no test coverage for self not in memos + metadata._fk_memos[fk_key].remove(self) + + def _set_table(self, column, table): + # standalone ForeignKey - create ForeignKeyConstraint + # on the hosting Table when attached to the Table. + if self.constraint is None and isinstance(table, Table): + self.constraint = ForeignKeyConstraint( + [], [], use_alter=self.use_alter, name=self.name, + onupdate=self.onupdate, ondelete=self.ondelete, + deferrable=self.deferrable, initially=self.initially, + match=self.match, + **self._unvalidated_dialect_kw + ) + self.constraint._append_element(column, self) + self.constraint._set_parent_with_dispatch(table) + table.foreign_keys.add(self) + + # set up remote ".column" attribute, or a note to pick it + # up when the other Table/Column shows up + if isinstance(self._colspec, util.string_types): + parenttable, table_key, colname = self._resolve_col_tokens() + fk_key = (table_key, colname) + if table_key in parenttable.metadata.tables: + table = parenttable.metadata.tables[table_key] + try: + self._link_to_col_by_colstring( + parenttable, table, colname) + except exc.NoReferencedColumnError: + # this is OK, we'll try later + pass + parenttable.metadata._fk_memos[fk_key].append(self) + elif hasattr(self._colspec, '__clause_element__'): + _column = self._colspec.__clause_element__() + self._set_target_column(_column) + else: + _column = self._colspec + self._set_target_column(_column) + + +class _NotAColumnExpr(object): + def _not_a_column_expr(self): + raise exc.InvalidRequestError( + "This %s cannot be used directly " + "as a column expression." % self.__class__.__name__) + + __clause_element__ = self_group = lambda self: self._not_a_column_expr() + _from_objects = property(lambda self: self._not_a_column_expr()) + + +class DefaultGenerator(_NotAColumnExpr, SchemaItem): + """Base class for column *default* values.""" + + __visit_name__ = 'default_generator' + + is_sequence = False + is_server_default = False + column = None + + def __init__(self, for_update=False): + self.for_update = for_update + + def _set_parent(self, column): + self.column = column + if self.for_update: + self.column.onupdate = self + else: + self.column.default = self + + def execute(self, bind=None, **kwargs): + if bind is None: + bind = _bind_or_error(self) + return bind._execute_default(self, **kwargs) + + def _execute_on_connection(self, connection, multiparams, params): + return connection._execute_default(self, multiparams, params) + + @property + def bind(self): + """Return the connectable associated with this default.""" + if getattr(self, 'column', None) is not None: + return self.column.table.bind + else: + return None + + +class ColumnDefault(DefaultGenerator): + """A plain default value on a column. + + This could correspond to a constant, a callable function, + or a SQL clause. + + :class:`.ColumnDefault` is generated automatically + whenever the ``default``, ``onupdate`` arguments of + :class:`.Column` are used. A :class:`.ColumnDefault` + can be passed positionally as well. + + For example, the following:: + + Column('foo', Integer, default=50) + + Is equivalent to:: + + Column('foo', Integer, ColumnDefault(50)) + + + """ + + def __init__(self, arg, **kwargs): + """"Construct a new :class:`.ColumnDefault`. + + + :param arg: argument representing the default value. + May be one of the following: + + * a plain non-callable Python value, such as a + string, integer, boolean, or other simple type. + The default value will be used as is each time. + * a SQL expression, that is one which derives from + :class:`.ColumnElement`. The SQL expression will + be rendered into the INSERT or UPDATE statement, + or in the case of a primary key column when + RETURNING is not used may be + pre-executed before an INSERT within a SELECT. + * A Python callable. The function will be invoked for each + new row subject to an INSERT or UPDATE. + The callable must accept exactly + zero or one positional arguments. The one-argument form + will receive an instance of the :class:`.ExecutionContext`, + which provides contextual information as to the current + :class:`.Connection` in use as well as the current + statement and parameters. + + """ + super(ColumnDefault, self).__init__(**kwargs) + if isinstance(arg, FetchedValue): + raise exc.ArgumentError( + "ColumnDefault may not be a server-side default type.") + if util.callable(arg): + arg = self._maybe_wrap_callable(arg) + self.arg = arg + + @util.memoized_property + def is_callable(self): + return util.callable(self.arg) + + @util.memoized_property + def is_clause_element(self): + return isinstance(self.arg, ClauseElement) + + @util.memoized_property + def is_scalar(self): + return not self.is_callable and \ + not self.is_clause_element and \ + not self.is_sequence + + def _maybe_wrap_callable(self, fn): + """Wrap callables that don't accept a context. + + This is to allow easy compatibility with default callables + that aren't specific to accepting of a context. + + """ + try: + argspec = util.get_callable_argspec(fn, no_self=True) + except TypeError: + return util.wrap_callable(lambda ctx: fn(), fn) + + defaulted = argspec[3] is not None and len(argspec[3]) or 0 + positionals = len(argspec[0]) - defaulted + + if positionals == 0: + return util.wrap_callable(lambda ctx: fn(), fn) + + elif positionals == 1: + return fn + else: + raise exc.ArgumentError( + "ColumnDefault Python function takes zero or one " + "positional arguments") + + def _visit_name(self): + if self.for_update: + return "column_onupdate" + else: + return "column_default" + __visit_name__ = property(_visit_name) + + def __repr__(self): + return "ColumnDefault(%r)" % self.arg + + +class Sequence(DefaultGenerator): + """Represents a named database sequence. + + The :class:`.Sequence` object represents the name and configurational + parameters of a database sequence. It also represents + a construct that can be "executed" by a SQLAlchemy :class:`.Engine` + or :class:`.Connection`, rendering the appropriate "next value" function + for the target database and returning a result. + + The :class:`.Sequence` is typically associated with a primary key column:: + + some_table = Table( + 'some_table', metadata, + Column('id', Integer, Sequence('some_table_seq'), + primary_key=True) + ) + + When CREATE TABLE is emitted for the above :class:`.Table`, if the + target platform supports sequences, a CREATE SEQUENCE statement will + be emitted as well. For platforms that don't support sequences, + the :class:`.Sequence` construct is ignored. + + .. seealso:: + + :class:`.CreateSequence` + + :class:`.DropSequence` + + """ + + __visit_name__ = 'sequence' + + is_sequence = True + + def __init__(self, name, start=None, increment=None, minvalue=None, + maxvalue=None, nominvalue=None, nomaxvalue=None, cycle=None, + schema=None, optional=False, quote=None, metadata=None, + quote_schema=None, + for_update=False): + """Construct a :class:`.Sequence` object. + + :param name: The name of the sequence. + :param start: the starting index of the sequence. This value is + used when the CREATE SEQUENCE command is emitted to the database + as the value of the "START WITH" clause. If ``None``, the + clause is omitted, which on most platforms indicates a starting + value of 1. + :param increment: the increment value of the sequence. This + value is used when the CREATE SEQUENCE command is emitted to + the database as the value of the "INCREMENT BY" clause. If ``None``, + the clause is omitted, which on most platforms indicates an + increment of 1. + :param minvalue: the minimum value of the sequence. This + value is used when the CREATE SEQUENCE command is emitted to + the database as the value of the "MINVALUE" clause. If ``None``, + the clause is omitted, which on most platforms indicates a + minvalue of 1 and -2^63-1 for ascending and descending sequences, + respectively. + + .. versionadded:: 1.0.7 + + :param maxvalue: the maximum value of the sequence. This + value is used when the CREATE SEQUENCE command is emitted to + the database as the value of the "MAXVALUE" clause. If ``None``, + the clause is omitted, which on most platforms indicates a + maxvalue of 2^63-1 and -1 for ascending and descending sequences, + respectively. + + .. versionadded:: 1.0.7 + + :param nominvalue: no minimum value of the sequence. This + value is used when the CREATE SEQUENCE command is emitted to + the database as the value of the "NO MINVALUE" clause. If ``None``, + the clause is omitted, which on most platforms indicates a + minvalue of 1 and -2^63-1 for ascending and descending sequences, + respectively. + + .. versionadded:: 1.0.7 + + :param nomaxvalue: no maximum value of the sequence. This + value is used when the CREATE SEQUENCE command is emitted to + the database as the value of the "NO MAXVALUE" clause. If ``None``, + the clause is omitted, which on most platforms indicates a + maxvalue of 2^63-1 and -1 for ascending and descending sequences, + respectively. + + .. versionadded:: 1.0.7 + + :param cycle: allows the sequence to wrap around when the maxvalue + or minvalue has been reached by an ascending or descending sequence + respectively. This value is used when the CREATE SEQUENCE command + is emitted to the database as the "CYCLE" clause. If the limit is + reached, the next number generated will be the minvalue or maxvalue, + respectively. If cycle=False (the default) any calls to nextval + after the sequence has reached its maximum value will return an + error. + + .. versionadded:: 1.0.7 + + :param schema: Optional schema name for the sequence, if located + in a schema other than the default. The rules for selecting the + schema name when a :class:`.MetaData` is also present are the same + as that of :paramref:`.Table.schema`. + + :param optional: boolean value, when ``True``, indicates that this + :class:`.Sequence` object only needs to be explicitly generated + on backends that don't provide another way to generate primary + key identifiers. Currently, it essentially means, "don't create + this sequence on the PostgreSQL backend, where the SERIAL keyword + creates a sequence for us automatically". + :param quote: boolean value, when ``True`` or ``False``, explicitly + forces quoting of the schema name on or off. When left at its + default of ``None``, normal quoting rules based on casing and + reserved words take place. + :param quote_schema: set the quoting preferences for the ``schema`` + name. + :param metadata: optional :class:`.MetaData` object which will be + associated with this :class:`.Sequence`. A :class:`.Sequence` + that is associated with a :class:`.MetaData` gains access to the + ``bind`` of that :class:`.MetaData`, meaning the + :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods will + make usage of that engine automatically. + + .. versionchanged:: 0.7 + Additionally, the appropriate CREATE SEQUENCE/ + DROP SEQUENCE DDL commands will be emitted corresponding to this + :class:`.Sequence` when :meth:`.MetaData.create_all` and + :meth:`.MetaData.drop_all` are invoked. + + Note that when a :class:`.Sequence` is applied to a :class:`.Column`, + the :class:`.Sequence` is automatically associated with the + :class:`.MetaData` object of that column's parent :class:`.Table`, + when that association is made. The :class:`.Sequence` will then + be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding + to when the :class:`.Table` object itself is created or dropped, + rather than that of the :class:`.MetaData` object overall. + :param for_update: Indicates this :class:`.Sequence`, when associated + with a :class:`.Column`, should be invoked for UPDATE statements + on that column's table, rather than for INSERT statements, when + no value is otherwise present for that column in the statement. + + """ + super(Sequence, self).__init__(for_update=for_update) + self.name = quoted_name(name, quote) + self.start = start + self.increment = increment + self.minvalue = minvalue + self.maxvalue = maxvalue + self.nominvalue = nominvalue + self.nomaxvalue = nomaxvalue + self.cycle = cycle + self.optional = optional + if schema is BLANK_SCHEMA: + self.schema = schema = None + elif metadata is not None and schema is None and metadata.schema: + self.schema = schema = metadata.schema + else: + self.schema = quoted_name(schema, quote_schema) + self.metadata = metadata + self._key = _get_table_key(name, schema) + if metadata: + self._set_metadata(metadata) + + @util.memoized_property + def is_callable(self): + return False + + @util.memoized_property + def is_clause_element(self): + return False + + @util.dependencies("sqlalchemy.sql.functions.func") + def next_value(self, func): + """Return a :class:`.next_value` function element + which will render the appropriate increment function + for this :class:`.Sequence` within any SQL expression. + + """ + return func.next_value(self, bind=self.bind) + + def _set_parent(self, column): + super(Sequence, self)._set_parent(column) + column._on_table_attach(self._set_table) + + def _set_table(self, column, table): + self._set_metadata(table.metadata) + + def _set_metadata(self, metadata): + self.metadata = metadata + self.metadata._sequences[self._key] = self + + @property + def bind(self): + if self.metadata: + return self.metadata.bind + else: + return None + + def create(self, bind=None, checkfirst=True): + """Creates this sequence in the database.""" + + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaGenerator, + self, + checkfirst=checkfirst) + + def drop(self, bind=None, checkfirst=True): + """Drops this sequence from the database.""" + + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaDropper, + self, + checkfirst=checkfirst) + + def _not_a_column_expr(self): + raise exc.InvalidRequestError( + "This %s cannot be used directly " + "as a column expression. Use func.next_value(sequence) " + "to produce a 'next value' function that's usable " + "as a column element." + % self.__class__.__name__) + + +@inspection._self_inspects +class FetchedValue(_NotAColumnExpr, SchemaEventTarget): + """A marker for a transparent database-side default. + + Use :class:`.FetchedValue` when the database is configured + to provide some automatic default for a column. + + E.g.:: + + Column('foo', Integer, FetchedValue()) + + Would indicate that some trigger or default generator + will create a new value for the ``foo`` column during an + INSERT. + + .. seealso:: + + :ref:`triggered_columns` + + """ + is_server_default = True + reflected = False + has_argument = False + + def __init__(self, for_update=False): + self.for_update = for_update + + def _as_for_update(self, for_update): + if for_update == self.for_update: + return self + else: + return self._clone(for_update) + + def _clone(self, for_update): + n = self.__class__.__new__(self.__class__) + n.__dict__.update(self.__dict__) + n.__dict__.pop('column', None) + n.for_update = for_update + return n + + def _set_parent(self, column): + self.column = column + if self.for_update: + self.column.server_onupdate = self + else: + self.column.server_default = self + + def __repr__(self): + return util.generic_repr(self) + + +class DefaultClause(FetchedValue): + """A DDL-specified DEFAULT column value. + + :class:`.DefaultClause` is a :class:`.FetchedValue` + that also generates a "DEFAULT" clause when + "CREATE TABLE" is emitted. + + :class:`.DefaultClause` is generated automatically + whenever the ``server_default``, ``server_onupdate`` arguments of + :class:`.Column` are used. A :class:`.DefaultClause` + can be passed positionally as well. + + For example, the following:: + + Column('foo', Integer, server_default="50") + + Is equivalent to:: + + Column('foo', Integer, DefaultClause("50")) + + """ + + has_argument = True + + def __init__(self, arg, for_update=False, _reflected=False): + util.assert_arg_type(arg, (util.string_types[0], + ClauseElement, + TextClause), 'arg') + super(DefaultClause, self).__init__(for_update) + self.arg = arg + self.reflected = _reflected + + def __repr__(self): + return "DefaultClause(%r, for_update=%r)" % \ + (self.arg, self.for_update) + + +class PassiveDefault(DefaultClause): + """A DDL-specified DEFAULT column value. + + .. deprecated:: 0.6 + :class:`.PassiveDefault` is deprecated. + Use :class:`.DefaultClause`. + """ + @util.deprecated("0.6", + ":class:`.PassiveDefault` is deprecated. " + "Use :class:`.DefaultClause`.", + False) + def __init__(self, *arg, **kw): + DefaultClause.__init__(self, *arg, **kw) + + +class Constraint(DialectKWArgs, SchemaItem): + """A table-level SQL constraint.""" + + __visit_name__ = 'constraint' + + def __init__(self, name=None, deferrable=None, initially=None, + _create_rule=None, info=None, _type_bound=False, + **dialect_kw): + r"""Create a SQL constraint. + + :param name: + Optional, the in-database name of this ``Constraint``. + + :param deferrable: + Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when + issuing DDL for this constraint. + + :param initially: + Optional string. If set, emit INITIALLY when issuing DDL + for this constraint. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + .. versionadded:: 1.0.0 + + :param _create_rule: + a callable which is passed the DDLCompiler object during + compilation. Returns True or False to signal inline generation of + this Constraint. + + The AddConstraint and DropConstraint DDL constructs provide + DDLElement's more comprehensive "conditional DDL" approach that is + passed a database connection when DDL is being issued. _create_rule + is instead called during any CREATE TABLE compilation, where there + may not be any transaction/connection in progress. However, it + allows conditional compilation of the constraint even for backends + which do not support addition of constraints through ALTER TABLE, + which currently includes SQLite. + + _create_rule is used by some types to create constraints. + Currently, its call signature is subject to change at any time. + + :param \**dialect_kw: Additional keyword arguments are dialect + specific, and passed in the form ``_``. See + the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + """ + + self.name = name + self.deferrable = deferrable + self.initially = initially + if info: + self.info = info + self._create_rule = _create_rule + self._type_bound = _type_bound + util.set_creation_order(self) + self._validate_dialect_kwargs(dialect_kw) + + @property + def table(self): + try: + if isinstance(self.parent, Table): + return self.parent + except AttributeError: + pass + raise exc.InvalidRequestError( + "This constraint is not bound to a table. Did you " + "mean to call table.append_constraint(constraint) ?") + + def _set_parent(self, parent): + self.parent = parent + parent.constraints.add(self) + + def copy(self, **kw): + raise NotImplementedError() + + +def _to_schema_column(element): + if hasattr(element, '__clause_element__'): + element = element.__clause_element__() + if not isinstance(element, Column): + raise exc.ArgumentError("schema.Column object expected") + return element + + +def _to_schema_column_or_string(element): + if hasattr(element, '__clause_element__'): + element = element.__clause_element__() + if not isinstance(element, util.string_types + (ColumnElement, )): + msg = "Element %r is not a string name or column element" + raise exc.ArgumentError(msg % element) + return element + + +class ColumnCollectionMixin(object): + + columns = None + """A :class:`.ColumnCollection` of :class:`.Column` objects. + + This collection represents the columns which are referred to by + this object. + + """ + + _allow_multiple_tables = False + + def __init__(self, *columns, **kw): + _autoattach = kw.pop('_autoattach', True) + self.columns = ColumnCollection() + self._pending_colargs = [_to_schema_column_or_string(c) + for c in columns] + if _autoattach and self._pending_colargs: + self._check_attach() + + @classmethod + def _extract_col_expression_collection(cls, expressions): + for expr in expressions: + strname = None + column = None + if hasattr(expr, '__clause_element__'): + expr = expr.__clause_element__() + + if not isinstance(expr, (ColumnElement, TextClause)): + # this assumes a string + strname = expr + else: + cols = [] + visitors.traverse(expr, {}, {'column': cols.append}) + if cols: + column = cols[0] + add_element = column if column is not None else strname + yield expr, column, strname, add_element + + def _check_attach(self, evt=False): + col_objs = [ + c for c in self._pending_colargs + if isinstance(c, Column) + ] + + cols_w_table = [ + c for c in col_objs if isinstance(c.table, Table) + ] + + cols_wo_table = set(col_objs).difference(cols_w_table) + + if cols_wo_table: + # feature #3341 - place event listeners for Column objects + # such that when all those cols are attached, we autoattach. + assert not evt, "Should not reach here on event call" + + # issue #3411 - don't do the per-column auto-attach if some of the + # columns are specified as strings. + has_string_cols = set(self._pending_colargs).difference(col_objs) + if not has_string_cols: + def _col_attached(column, table): + # this isinstance() corresponds with the + # isinstance() above; only want to count Table-bound + # columns + if isinstance(table, Table): + cols_wo_table.discard(column) + if not cols_wo_table: + self._check_attach(evt=True) + self._cols_wo_table = cols_wo_table + for col in cols_wo_table: + col._on_table_attach(_col_attached) + return + + columns = cols_w_table + + tables = set([c.table for c in columns]) + if len(tables) == 1: + self._set_parent_with_dispatch(tables.pop()) + elif len(tables) > 1 and not self._allow_multiple_tables: + table = columns[0].table + others = [c for c in columns[1:] if c.table is not table] + if others: + raise exc.ArgumentError( + "Column(s) %s are not part of table '%s'." % + (", ".join("'%s'" % c for c in others), + table.description) + ) + + def _set_parent(self, table): + for col in self._pending_colargs: + if isinstance(col, util.string_types): + col = table.c[col] + self.columns.add(col) + + +class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint): + """A constraint that proxies a ColumnCollection.""" + + def __init__(self, *columns, **kw): + r""" + :param \*columns: + A sequence of column names or Column objects. + + :param name: + Optional, the in-database name of this constraint. + + :param deferrable: + Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when + issuing DDL for this constraint. + + :param initially: + Optional string. If set, emit INITIALLY when issuing DDL + for this constraint. + + :param \**kw: other keyword arguments including dialect-specific + arguments are propagated to the :class:`.Constraint` superclass. + + """ + _autoattach = kw.pop('_autoattach', True) + Constraint.__init__(self, **kw) + ColumnCollectionMixin.__init__(self, *columns, _autoattach=_autoattach) + + columns = None + """A :class:`.ColumnCollection` representing the set of columns + for this constraint. + + """ + + def _set_parent(self, table): + Constraint._set_parent(self, table) + ColumnCollectionMixin._set_parent(self, table) + + def __contains__(self, x): + return x in self.columns + + def copy(self, **kw): + c = self.__class__(name=self.name, deferrable=self.deferrable, + initially=self.initially, *self.columns.keys()) + return self._schema_item_copy(c) + + def contains_column(self, col): + """Return True if this constraint contains the given column. + + Note that this object also contains an attribute ``.columns`` + which is a :class:`.ColumnCollection` of :class:`.Column` objects. + + """ + + return self.columns.contains_column(col) + + def __iter__(self): + # inlining of + # return iter(self.columns) + # ColumnCollection->OrderedProperties->OrderedDict + ordered_dict = self.columns._data + return (ordered_dict[key] for key in ordered_dict._list) + + def __len__(self): + return len(self.columns._data) + + +class CheckConstraint(ColumnCollectionConstraint): + """A table- or column-level CHECK constraint. + + Can be included in the definition of a Table or Column. + """ + + _allow_multiple_tables = True + + def __init__(self, sqltext, name=None, deferrable=None, + initially=None, table=None, info=None, _create_rule=None, + _autoattach=True, _type_bound=False): + r"""Construct a CHECK constraint. + + :param sqltext: + A string containing the constraint definition, which will be used + verbatim, or a SQL expression construct. If given as a string, + the object is converted to a :class:`.Text` object. If the textual + string includes a colon character, escape this using a backslash:: + + CheckConstraint(r"foo ~ E'a(?\:b|c)d") + + :param name: + Optional, the in-database name of the constraint. + + :param deferrable: + Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when + issuing DDL for this constraint. + + :param initially: + Optional string. If set, emit INITIALLY when issuing DDL + for this constraint. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + .. versionadded:: 1.0.0 + + """ + + self.sqltext = _literal_as_text(sqltext, warn=False) + + columns = [] + visitors.traverse(self.sqltext, {}, {'column': columns.append}) + + super(CheckConstraint, self).\ + __init__( + name=name, deferrable=deferrable, + initially=initially, _create_rule=_create_rule, info=info, + _type_bound=_type_bound, _autoattach=_autoattach, + *columns) + if table is not None: + self._set_parent_with_dispatch(table) + + def __visit_name__(self): + if isinstance(self.parent, Table): + return "check_constraint" + else: + return "column_check_constraint" + __visit_name__ = property(__visit_name__) + + def copy(self, target_table=None, **kw): + if target_table is not None: + def replace(col): + if self.table.c.contains_column(col): + return target_table.c[col.key] + else: + return None + sqltext = visitors.replacement_traverse(self.sqltext, {}, replace) + else: + sqltext = self.sqltext + c = CheckConstraint(sqltext, + name=self.name, + initially=self.initially, + deferrable=self.deferrable, + _create_rule=self._create_rule, + table=target_table, + _autoattach=False, + _type_bound=self._type_bound) + return self._schema_item_copy(c) + + +class ForeignKeyConstraint(ColumnCollectionConstraint): + """A table-level FOREIGN KEY constraint. + + Defines a single column or composite FOREIGN KEY ... REFERENCES + constraint. For a no-frills, single column foreign key, adding a + :class:`.ForeignKey` to the definition of a :class:`.Column` is a + shorthand equivalent for an unnamed, single column + :class:`.ForeignKeyConstraint`. + + Examples of foreign key configuration are in :ref:`metadata_foreignkeys`. + + """ + __visit_name__ = 'foreign_key_constraint' + + def __init__(self, columns, refcolumns, name=None, onupdate=None, + ondelete=None, deferrable=None, initially=None, + use_alter=False, link_to_name=False, match=None, + table=None, info=None, **dialect_kw): + r"""Construct a composite-capable FOREIGN KEY. + + :param columns: A sequence of local column names. The named columns + must be defined and present in the parent Table. The names should + match the ``key`` given to each column (defaults to the name) unless + ``link_to_name`` is True. + + :param refcolumns: A sequence of foreign column names or Column + objects. The columns must all be located within the same Table. + + :param name: Optional, the in-database name of the key. + + :param onupdate: Optional string. If set, emit ON UPDATE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + + :param ondelete: Optional string. If set, emit ON DELETE when + issuing DDL for this constraint. Typical values include CASCADE, + DELETE and RESTRICT. + + :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT + DEFERRABLE when issuing DDL for this constraint. + + :param initially: Optional string. If set, emit INITIALLY when + issuing DDL for this constraint. + + :param link_to_name: if True, the string name given in ``column`` is + the rendered name of the referenced column, not its locally assigned + ``key``. + + :param use_alter: If True, do not emit the DDL for this constraint as + part of the CREATE TABLE definition. Instead, generate it via an + ALTER TABLE statement issued after the full collection of tables + have been created, and drop it via an ALTER TABLE statement before + the full collection of tables are dropped. + + The use of :paramref:`.ForeignKeyConstraint.use_alter` is + particularly geared towards the case where two or more tables + are established within a mutually-dependent foreign key constraint + relationship; however, the :meth:`.MetaData.create_all` and + :meth:`.MetaData.drop_all` methods will perform this resolution + automatically, so the flag is normally not needed. + + .. versionchanged:: 1.0.0 Automatic resolution of foreign key + cycles has been added, removing the need to use the + :paramref:`.ForeignKeyConstraint.use_alter` in typical use + cases. + + .. seealso:: + + :ref:`use_alter` + + :param match: Optional string. If set, emit MATCH when issuing + DDL for this constraint. Typical values include SIMPLE, PARTIAL + and FULL. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + .. versionadded:: 1.0.0 + + :param \**dialect_kw: Additional keyword arguments are dialect + specific, and passed in the form ``_``. See + the documentation regarding an individual dialect at + :ref:`dialect_toplevel` for detail on documented arguments. + + .. versionadded:: 0.9.2 + + """ + + Constraint.__init__( + self, name=name, deferrable=deferrable, initially=initially, + info=info, **dialect_kw) + self.onupdate = onupdate + self.ondelete = ondelete + self.link_to_name = link_to_name + self.use_alter = use_alter + self.match = match + + # standalone ForeignKeyConstraint - create + # associated ForeignKey objects which will be applied to hosted + # Column objects (in col.foreign_keys), either now or when attached + # to the Table for string-specified names + self.elements = [ + ForeignKey( + refcol, + _constraint=self, + name=self.name, + onupdate=self.onupdate, + ondelete=self.ondelete, + use_alter=self.use_alter, + link_to_name=self.link_to_name, + match=self.match, + deferrable=self.deferrable, + initially=self.initially, + **self.dialect_kwargs + ) for refcol in refcolumns + ] + + ColumnCollectionMixin.__init__(self, *columns) + if table is not None: + if hasattr(self, "parent"): + assert table is self.parent + self._set_parent_with_dispatch(table) + + def _append_element(self, column, fk): + self.columns.add(column) + self.elements.append(fk) + + columns = None + """A :class:`.ColumnCollection` representing the set of columns + for this constraint. + + """ + + elements = None + """A sequence of :class:`.ForeignKey` objects. + + Each :class:`.ForeignKey` represents a single referring column/referred + column pair. + + This collection is intended to be read-only. + + """ + + @property + def _elements(self): + # legacy - provide a dictionary view of (column_key, fk) + return util.OrderedDict( + zip(self.column_keys, self.elements) + ) + + @property + def _referred_schema(self): + for elem in self.elements: + return elem._referred_schema + else: + return None + + @property + def referred_table(self): + """The :class:`.Table` object to which this + :class:`.ForeignKeyConstraint` references. + + This is a dynamically calculated attribute which may not be available + if the constraint and/or parent table is not yet associated with + a metadata collection that contains the referred table. + + .. versionadded:: 1.0.0 + + """ + return self.elements[0].column.table + + def _validate_dest_table(self, table): + table_keys = set([elem._table_key() + for elem in self.elements]) + if None not in table_keys and len(table_keys) > 1: + elem0, elem1 = sorted(table_keys)[0:2] + raise exc.ArgumentError( + 'ForeignKeyConstraint on %s(%s) refers to ' + 'multiple remote tables: %s and %s' % ( + table.fullname, + self._col_description, + elem0, + elem1 + )) + + @property + def column_keys(self): + """Return a list of string keys representing the local + columns in this :class:`.ForeignKeyConstraint`. + + This list is either the original string arguments sent + to the constructor of the :class:`.ForeignKeyConstraint`, + or if the constraint has been initialized with :class:`.Column` + objects, is the string .key of each element. + + .. versionadded:: 1.0.0 + + """ + if hasattr(self, "parent"): + return self.columns.keys() + else: + return [ + col.key if isinstance(col, ColumnElement) + else str(col) for col in self._pending_colargs + ] + + @property + def _col_description(self): + return ", ".join(self.column_keys) + + def _set_parent(self, table): + Constraint._set_parent(self, table) + + try: + ColumnCollectionConstraint._set_parent(self, table) + except KeyError as ke: + raise exc.ArgumentError( + "Can't create ForeignKeyConstraint " + "on table '%s': no column " + "named '%s' is present." % (table.description, ke.args[0])) + + for col, fk in zip(self.columns, self.elements): + if not hasattr(fk, 'parent') or \ + fk.parent is not col: + fk._set_parent_with_dispatch(col) + + self._validate_dest_table(table) + + def copy(self, schema=None, target_table=None, **kw): + fkc = ForeignKeyConstraint( + [x.parent.key for x in self.elements], + [x._get_colspec( + schema=schema, + table_name=target_table.name + if target_table is not None + and x._table_key() == x.parent.table.key + else None) + for x in self.elements], + name=self.name, + onupdate=self.onupdate, + ondelete=self.ondelete, + use_alter=self.use_alter, + deferrable=self.deferrable, + initially=self.initially, + link_to_name=self.link_to_name, + match=self.match + ) + for self_fk, other_fk in zip( + self.elements, + fkc.elements): + self_fk._schema_item_copy(other_fk) + return self._schema_item_copy(fkc) + + +class PrimaryKeyConstraint(ColumnCollectionConstraint): + """A table-level PRIMARY KEY constraint. + + The :class:`.PrimaryKeyConstraint` object is present automatically + on any :class:`.Table` object; it is assigned a set of + :class:`.Column` objects corresponding to those marked with + the :paramref:`.Column.primary_key` flag:: + + >>> my_table = Table('mytable', metadata, + ... Column('id', Integer, primary_key=True), + ... Column('version_id', Integer, primary_key=True), + ... Column('data', String(50)) + ... ) + >>> my_table.primary_key + PrimaryKeyConstraint( + Column('id', Integer(), table=, + primary_key=True, nullable=False), + Column('version_id', Integer(), table=, + primary_key=True, nullable=False) + ) + + The primary key of a :class:`.Table` can also be specified by using + a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage, + the "name" of the constraint can also be specified, as well as other + options which may be recognized by dialects:: + + my_table = Table('mytable', metadata, + Column('id', Integer), + Column('version_id', Integer), + Column('data', String(50)), + PrimaryKeyConstraint('id', 'version_id', + name='mytable_pk') + ) + + The two styles of column-specification should generally not be mixed. + An warning is emitted if the columns present in the + :class:`.PrimaryKeyConstraint` + don't match the columns that were marked as ``primary_key=True``, if both + are present; in this case, the columns are taken strictly from the + :class:`.PrimaryKeyConstraint` declaration, and those columns otherwise + marked as ``primary_key=True`` are ignored. This behavior is intended to + be backwards compatible with previous behavior. + + .. versionchanged:: 0.9.2 Using a mixture of columns within a + :class:`.PrimaryKeyConstraint` in addition to columns marked as + ``primary_key=True`` now emits a warning if the lists don't match. + The ultimate behavior of ignoring those columns marked with the flag + only is currently maintained for backwards compatibility; this warning + may raise an exception in a future release. + + For the use case where specific options are to be specified on the + :class:`.PrimaryKeyConstraint`, but the usual style of using + ``primary_key=True`` flags is still desirable, an empty + :class:`.PrimaryKeyConstraint` may be specified, which will take on the + primary key column collection from the :class:`.Table` based on the + flags:: + + my_table = Table('mytable', metadata, + Column('id', Integer, primary_key=True), + Column('version_id', Integer, primary_key=True), + Column('data', String(50)), + PrimaryKeyConstraint(name='mytable_pk', + mssql_clustered=True) + ) + + .. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now + be specified for the purposes of establishing keyword arguments with + the constraint, independently of the specification of "primary key" + columns within the :class:`.Table` itself; columns marked as + ``primary_key=True`` will be gathered into the empty constraint's + column collection. + + """ + + __visit_name__ = 'primary_key_constraint' + + def __init__(self, *columns, **kw): + self._implicit_generated = kw.pop('_implicit_generated', False) + super(PrimaryKeyConstraint, self).__init__(*columns, **kw) + + def _set_parent(self, table): + super(PrimaryKeyConstraint, self)._set_parent(table) + + if table.primary_key is not self: + table.constraints.discard(table.primary_key) + table.primary_key = self + table.constraints.add(self) + + table_pks = [c for c in table.c if c.primary_key] + if self.columns and table_pks and \ + set(table_pks) != set(self.columns.values()): + util.warn( + "Table '%s' specifies columns %s as primary_key=True, " + "not matching locally specified columns %s; setting the " + "current primary key columns to %s. This warning " + "may become an exception in a future release" % + ( + table.name, + ", ".join("'%s'" % c.name for c in table_pks), + ", ".join("'%s'" % c.name for c in self.columns), + ", ".join("'%s'" % c.name for c in self.columns) + ) + ) + table_pks[:] = [] + + for c in self.columns: + c.primary_key = True + c.nullable = False + self.columns.extend(table_pks) + + def _reload(self, columns): + """repopulate this :class:`.PrimaryKeyConstraint` given + a set of columns. + + Existing columns in the table that are marked as primary_key=True + are maintained. + + Also fires a new event. + + This is basically like putting a whole new + :class:`.PrimaryKeyConstraint` object on the parent + :class:`.Table` object without actually replacing the object. + + The ordering of the given list of columns is also maintained; these + columns will be appended to the list of columns after any which + are already present. + + """ + + # set the primary key flag on new columns. + # note any existing PK cols on the table also have their + # flag still set. + for col in columns: + col.primary_key = True + + self.columns.extend(columns) + + PrimaryKeyConstraint._autoincrement_column._reset(self) + self._set_parent_with_dispatch(self.table) + + def _replace(self, col): + PrimaryKeyConstraint._autoincrement_column._reset(self) + self.columns.replace(col) + + @property + def columns_autoinc_first(self): + autoinc = self._autoincrement_column + + if autoinc is not None: + return [autoinc] + [c for c in self.columns if c is not autoinc] + else: + return list(self.columns) + + @util.memoized_property + def _autoincrement_column(self): + + def _validate_autoinc(col, autoinc_true): + if col.type._type_affinity is None or not issubclass( + col.type._type_affinity, + type_api.INTEGERTYPE._type_affinity): + if autoinc_true: + raise exc.ArgumentError( + "Column type %s on column '%s' is not " + "compatible with autoincrement=True" % ( + col.type, + col + )) + else: + return False + elif not isinstance(col.default, (type(None), Sequence)) and \ + not autoinc_true: + return False + elif col.server_default is not None and not autoinc_true: + return False + elif ( + col.foreign_keys and col.autoincrement + not in (True, 'ignore_fk')): + return False + return True + + if len(self.columns) == 1: + col = list(self.columns)[0] + + if col.autoincrement is True: + _validate_autoinc(col, True) + return col + elif ( + col.autoincrement in ('auto', 'ignore_fk') and + _validate_autoinc(col, False) + ): + return col + + else: + autoinc = None + for col in self.columns: + if col.autoincrement is True: + _validate_autoinc(col, True) + if autoinc is not None: + raise exc.ArgumentError( + "Only one Column may be marked " + "autoincrement=True, found both %s and %s." % + (col.name, autoinc.name) + ) + else: + autoinc = col + + return autoinc + + +class UniqueConstraint(ColumnCollectionConstraint): + """A table-level UNIQUE constraint. + + Defines a single column or composite UNIQUE constraint. For a no-frills, + single column constraint, adding ``unique=True`` to the ``Column`` + definition is a shorthand equivalent for an unnamed, single column + UniqueConstraint. + """ + + __visit_name__ = 'unique_constraint' + + +class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem): + """A table-level INDEX. + + Defines a composite (one or more column) INDEX. + + E.g.:: + + sometable = Table("sometable", metadata, + Column("name", String(50)), + Column("address", String(100)) + ) + + Index("some_index", sometable.c.name) + + For a no-frills, single column index, adding + :class:`.Column` also supports ``index=True``:: + + sometable = Table("sometable", metadata, + Column("name", String(50), index=True) + ) + + For a composite index, multiple columns can be specified:: + + Index("some_index", sometable.c.name, sometable.c.address) + + Functional indexes are supported as well, typically by using the + :data:`.func` construct in conjunction with table-bound + :class:`.Column` objects:: + + Index("some_index", func.lower(sometable.c.name)) + + .. versionadded:: 0.8 support for functional and expression-based indexes. + + An :class:`.Index` can also be manually associated with a :class:`.Table`, + either through inline declaration or using + :meth:`.Table.append_constraint`. When this approach is used, the names + of the indexed columns can be specified as strings:: + + Table("sometable", metadata, + Column("name", String(50)), + Column("address", String(100)), + Index("some_index", "name", "address") + ) + + To support functional or expression-based indexes in this form, the + :func:`.text` construct may be used:: + + from sqlalchemy import text + + Table("sometable", metadata, + Column("name", String(50)), + Column("address", String(100)), + Index("some_index", text("lower(name)")) + ) + + .. versionadded:: 0.9.5 the :func:`.text` construct may be used to + specify :class:`.Index` expressions, provided the :class:`.Index` + is explicitly associated with the :class:`.Table`. + + + .. seealso:: + + :ref:`schema_indexes` - General information on :class:`.Index`. + + :ref:`postgresql_indexes` - PostgreSQL-specific options available for + the :class:`.Index` construct. + + :ref:`mysql_indexes` - MySQL-specific options available for the + :class:`.Index` construct. + + :ref:`mssql_indexes` - MSSQL-specific options available for the + :class:`.Index` construct. + + """ + + __visit_name__ = 'index' + + def __init__(self, name, *expressions, **kw): + r"""Construct an index object. + + :param name: + The name of the index + + :param \*expressions: + Column expressions to include in the index. The expressions + are normally instances of :class:`.Column`, but may also + be arbitrary SQL expressions which ultimately refer to a + :class:`.Column`. + + :param unique=False: + Keyword only argument; if True, create a unique index. + + :param quote=None: + Keyword only argument; whether to apply quoting to the name of + the index. Works in the same manner as that of + :paramref:`.Column.quote`. + + :param info=None: Optional data dictionary which will be populated + into the :attr:`.SchemaItem.info` attribute of this object. + + .. versionadded:: 1.0.0 + + :param \**kw: Additional keyword arguments not mentioned above are + dialect specific, and passed in the form + ``_``. See the documentation regarding an + individual dialect at :ref:`dialect_toplevel` for detail on + documented arguments. + + """ + self.table = None + + columns = [] + processed_expressions = [] + for expr, column, strname, add_element in self.\ + _extract_col_expression_collection(expressions): + if add_element is not None: + columns.append(add_element) + processed_expressions.append(expr) + + self.expressions = processed_expressions + self.name = quoted_name(name, kw.pop("quote", None)) + self.unique = kw.pop('unique', False) + if 'info' in kw: + self.info = kw.pop('info') + self._validate_dialect_kwargs(kw) + + # will call _set_parent() if table-bound column + # objects are present + ColumnCollectionMixin.__init__(self, *columns) + + def _set_parent(self, table): + ColumnCollectionMixin._set_parent(self, table) + + if self.table is not None and table is not self.table: + raise exc.ArgumentError( + "Index '%s' is against table '%s', and " + "cannot be associated with table '%s'." % ( + self.name, + self.table.description, + table.description + ) + ) + self.table = table + table.indexes.add(self) + + self.expressions = [ + expr if isinstance(expr, ClauseElement) + else colexpr + for expr, colexpr in util.zip_longest(self.expressions, + self.columns) + ] + + @property + def bind(self): + """Return the connectable associated with this Index.""" + + return self.table.bind + + def create(self, bind=None): + """Issue a ``CREATE`` statement for this + :class:`.Index`, using the given :class:`.Connectable` + for connectivity. + + .. seealso:: + + :meth:`.MetaData.create_all`. + + """ + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaGenerator, self) + return self + + def drop(self, bind=None): + """Issue a ``DROP`` statement for this + :class:`.Index`, using the given :class:`.Connectable` + for connectivity. + + .. seealso:: + + :meth:`.MetaData.drop_all`. + + """ + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaDropper, self) + + def __repr__(self): + return 'Index(%s)' % ( + ", ".join( + [repr(self.name)] + + [repr(e) for e in self.expressions] + + (self.unique and ["unique=True"] or []) + )) + + +DEFAULT_NAMING_CONVENTION = util.immutabledict({ + "ix": 'ix_%(column_0_label)s' +}) + + +class MetaData(SchemaItem): + """A collection of :class:`.Table` objects and their associated schema + constructs. + + Holds a collection of :class:`.Table` objects as well as + an optional binding to an :class:`.Engine` or + :class:`.Connection`. If bound, the :class:`.Table` objects + in the collection and their columns may participate in implicit SQL + execution. + + The :class:`.Table` objects themselves are stored in the + :attr:`.MetaData.tables` dictionary. + + :class:`.MetaData` is a thread-safe object for read operations. + Construction of new tables within a single :class:`.MetaData` object, + either explicitly or via reflection, may not be completely thread-safe. + + .. seealso:: + + :ref:`metadata_describing` - Introduction to database metadata + + """ + + __visit_name__ = 'metadata' + + def __init__(self, bind=None, reflect=False, schema=None, + quote_schema=None, + naming_convention=DEFAULT_NAMING_CONVENTION, + info=None + ): + """Create a new MetaData object. + + :param bind: + An Engine or Connection to bind to. May also be a string or URL + instance, these are passed to create_engine() and this MetaData will + be bound to the resulting engine. + + :param reflect: + Optional, automatically load all tables from the bound database. + Defaults to False. ``bind`` is required when this option is set. + + .. deprecated:: 0.8 + Please use the :meth:`.MetaData.reflect` method. + + :param schema: + The default schema to use for the :class:`.Table`, + :class:`.Sequence`, and potentially other objects associated with + this :class:`.MetaData`. Defaults to ``None``. + + When this value is set, any :class:`.Table` or :class:`.Sequence` + which specifies ``None`` for the schema parameter will instead + have this schema name defined. To build a :class:`.Table` + or :class:`.Sequence` that still has ``None`` for the schema + even when this parameter is present, use the :attr:`.BLANK_SCHEMA` + symbol. + + .. seealso:: + + :paramref:`.Table.schema` + + :paramref:`.Sequence.schema` + + :param quote_schema: + Sets the ``quote_schema`` flag for those :class:`.Table`, + :class:`.Sequence`, and other objects which make usage of the + local ``schema`` name. + + :param info: Optional data dictionary which will be populated into the + :attr:`.SchemaItem.info` attribute of this object. + + .. versionadded:: 1.0.0 + + :param naming_convention: a dictionary referring to values which + will establish default naming conventions for :class:`.Constraint` + and :class:`.Index` objects, for those objects which are not given + a name explicitly. + + The keys of this dictionary may be: + + * a constraint or Index class, e.g. the :class:`.UniqueConstraint`, + :class:`.ForeignKeyConstraint` class, the :class:`.Index` class + + * a string mnemonic for one of the known constraint classes; + ``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key, + primary key, index, check, and unique constraint, respectively. + + * the string name of a user-defined "token" that can be used + to define new naming tokens. + + The values associated with each "constraint class" or "constraint + mnemonic" key are string naming templates, such as + ``"uq_%(table_name)s_%(column_0_name)s"``, + which describe how the name should be composed. The values + associated with user-defined "token" keys should be callables of the + form ``fn(constraint, table)``, which accepts the constraint/index + object and :class:`.Table` as arguments, returning a string + result. + + The built-in names are as follows, some of which may only be + available for certain types of constraint: + + * ``%(table_name)s`` - the name of the :class:`.Table` object + associated with the constraint. + + * ``%(referred_table_name)s`` - the name of the :class:`.Table` + object associated with the referencing target of a + :class:`.ForeignKeyConstraint`. + + * ``%(column_0_name)s`` - the name of the :class:`.Column` at + index position "0" within the constraint. + + * ``%(column_0_label)s`` - the label of the :class:`.Column` at + index position "0", e.g. :attr:`.Column.label` + + * ``%(column_0_key)s`` - the key of the :class:`.Column` at + index position "0", e.g. :attr:`.Column.key` + + * ``%(referred_column_0_name)s`` - the name of a :class:`.Column` + at index position "0" referenced by a + :class:`.ForeignKeyConstraint`. + + * ``%(constraint_name)s`` - a special key that refers to the + existing name given to the constraint. When this key is + present, the :class:`.Constraint` object's existing name will be + replaced with one that is composed from template string that + uses this token. When this token is present, it is required that + the :class:`.Constraint` is given an explicit name ahead of time. + + * user-defined: any additional token may be implemented by passing + it along with a ``fn(constraint, table)`` callable to the + naming_convention dictionary. + + .. versionadded:: 0.9.2 + + .. seealso:: + + :ref:`constraint_naming_conventions` - for detailed usage + examples. + + """ + self.tables = util.immutabledict() + self.schema = quoted_name(schema, quote_schema) + self.naming_convention = naming_convention + if info: + self.info = info + self._schemas = set() + self._sequences = {} + self._fk_memos = collections.defaultdict(list) + + self.bind = bind + if reflect: + util.warn_deprecated("reflect=True is deprecate; please " + "use the reflect() method.") + if not bind: + raise exc.ArgumentError( + "A bind must be supplied in conjunction " + "with reflect=True") + self.reflect() + + tables = None + """A dictionary of :class:`.Table` objects keyed to their name or "table key". + + The exact key is that determined by the :attr:`.Table.key` attribute; + for a table with no :attr:`.Table.schema` attribute, this is the same + as :attr:`.Table.name`. For a table with a schema, it is typically of the + form ``schemaname.tablename``. + + .. seealso:: + + :attr:`.MetaData.sorted_tables` + + """ + + def __repr__(self): + return 'MetaData(bind=%r)' % self.bind + + def __contains__(self, table_or_key): + if not isinstance(table_or_key, util.string_types): + table_or_key = table_or_key.key + return table_or_key in self.tables + + def _add_table(self, name, schema, table): + key = _get_table_key(name, schema) + dict.__setitem__(self.tables, key, table) + if schema: + self._schemas.add(schema) + + def _remove_table(self, name, schema): + key = _get_table_key(name, schema) + removed = dict.pop(self.tables, key, None) + if removed is not None: + for fk in removed.foreign_keys: + fk._remove_from_metadata(self) + if self._schemas: + self._schemas = set([t.schema + for t in self.tables.values() + if t.schema is not None]) + + def __getstate__(self): + return {'tables': self.tables, + 'schema': self.schema, + 'schemas': self._schemas, + 'sequences': self._sequences, + 'fk_memos': self._fk_memos, + 'naming_convention': self.naming_convention + } + + def __setstate__(self, state): + self.tables = state['tables'] + self.schema = state['schema'] + self.naming_convention = state['naming_convention'] + self._bind = None + self._sequences = state['sequences'] + self._schemas = state['schemas'] + self._fk_memos = state['fk_memos'] + + def is_bound(self): + """True if this MetaData is bound to an Engine or Connection.""" + + return self._bind is not None + + def bind(self): + """An :class:`.Engine` or :class:`.Connection` to which this + :class:`.MetaData` is bound. + + Typically, a :class:`.Engine` is assigned to this attribute + so that "implicit execution" may be used, or alternatively + as a means of providing engine binding information to an + ORM :class:`.Session` object:: + + engine = create_engine("someurl://") + metadata.bind = engine + + .. seealso:: + + :ref:`dbengine_implicit` - background on "bound metadata" + + """ + return self._bind + + @util.dependencies("sqlalchemy.engine.url") + def _bind_to(self, url, bind): + """Bind this MetaData to an Engine, Connection, string or URL.""" + + if isinstance(bind, util.string_types + (url.URL, )): + self._bind = sqlalchemy.create_engine(bind) + else: + self._bind = bind + bind = property(bind, _bind_to) + + def clear(self): + """Clear all Table objects from this MetaData.""" + + dict.clear(self.tables) + self._schemas.clear() + self._fk_memos.clear() + + def remove(self, table): + """Remove the given Table object from this MetaData.""" + + self._remove_table(table.name, table.schema) + + @property + def sorted_tables(self): + """Returns a list of :class:`.Table` objects sorted in order of + foreign key dependency. + + The sorting will place :class:`.Table` objects that have dependencies + first, before the dependencies themselves, representing the + order in which they can be created. To get the order in which + the tables would be dropped, use the ``reversed()`` Python built-in. + + .. warning:: + + The :attr:`.sorted_tables` accessor cannot by itself accommodate + automatic resolution of dependency cycles between tables, which + are usually caused by mutually dependent foreign key constraints. + To resolve these cycles, either the + :paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled + to those constraints, or use the + :func:`.schema.sort_tables_and_constraints` function which will break + out foreign key constraints involved in cycles separately. + + .. seealso:: + + :func:`.schema.sort_tables` + + :func:`.schema.sort_tables_and_constraints` + + :attr:`.MetaData.tables` + + :meth:`.Inspector.get_table_names` + + :meth:`.Inspector.get_sorted_table_and_fkc_names` + + + """ + return ddl.sort_tables(sorted(self.tables.values(), key=lambda t: t.key)) + + def reflect(self, bind=None, schema=None, views=False, only=None, + extend_existing=False, + autoload_replace=True, + **dialect_kwargs): + r"""Load all available table definitions from the database. + + Automatically creates ``Table`` entries in this ``MetaData`` for any + table available in the database but not yet present in the + ``MetaData``. May be called multiple times to pick up tables recently + added to the database, however no special action is taken if a table + in this ``MetaData`` no longer exists in the database. + + :param bind: + A :class:`.Connectable` used to access the database; if None, uses + the existing bind on this ``MetaData``, if any. + + :param schema: + Optional, query and reflect tables from an alterate schema. + If None, the schema associated with this :class:`.MetaData` + is used, if any. + + :param views: + If True, also reflect views. + + :param only: + Optional. Load only a sub-set of available named tables. May be + specified as a sequence of names or a callable. + + If a sequence of names is provided, only those tables will be + reflected. An error is raised if a table is requested but not + available. Named tables already present in this ``MetaData`` are + ignored. + + If a callable is provided, it will be used as a boolean predicate to + filter the list of potential table names. The callable is called + with a table name and this ``MetaData`` instance as positional + arguments and should return a true value for any table to reflect. + + :param extend_existing: Passed along to each :class:`.Table` as + :paramref:`.Table.extend_existing`. + + .. versionadded:: 0.9.1 + + :param autoload_replace: Passed along to each :class:`.Table` as + :paramref:`.Table.autoload_replace`. + + .. versionadded:: 0.9.1 + + :param \**dialect_kwargs: Additional keyword arguments not mentioned + above are dialect specific, and passed in the form + ``_``. See the documentation regarding an + individual dialect at :ref:`dialect_toplevel` for detail on + documented arguments. + + .. versionadded:: 0.9.2 - Added + :paramref:`.MetaData.reflect.**dialect_kwargs` to support + dialect-level reflection options for all :class:`.Table` + objects reflected. + + """ + if bind is None: + bind = _bind_or_error(self) + + with bind.connect() as conn: + + reflect_opts = { + 'autoload': True, + 'autoload_with': conn, + 'extend_existing': extend_existing, + 'autoload_replace': autoload_replace, + '_extend_on': set() + } + + reflect_opts.update(dialect_kwargs) + + if schema is None: + schema = self.schema + + if schema is not None: + reflect_opts['schema'] = schema + + available = util.OrderedSet( + bind.engine.table_names(schema, connection=conn)) + if views: + available.update( + bind.dialect.get_view_names(conn, schema) + ) + + if schema is not None: + available_w_schema = util.OrderedSet(["%s.%s" % (schema, name) + for name in available]) + else: + available_w_schema = available + + current = set(self.tables) + + if only is None: + load = [name for name, schname in + zip(available, available_w_schema) + if extend_existing or schname not in current] + elif util.callable(only): + load = [name for name, schname in + zip(available, available_w_schema) + if (extend_existing or schname not in current) + and only(name, self)] + else: + missing = [name for name in only if name not in available] + if missing: + s = schema and (" schema '%s'" % schema) or '' + raise exc.InvalidRequestError( + 'Could not reflect: requested table(s) not available ' + 'in %r%s: (%s)' % + (bind.engine, s, ', '.join(missing))) + load = [name for name in only if extend_existing or + name not in current] + + for name in load: + Table(name, self, **reflect_opts) + + def append_ddl_listener(self, event_name, listener): + """Append a DDL event listener to this ``MetaData``. + + .. deprecated:: 0.7 + See :class:`.DDLEvents`. + + """ + def adapt_listener(target, connection, **kw): + tables = kw['tables'] + listener(event, target, connection, tables=tables) + + event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) + + def create_all(self, bind=None, tables=None, checkfirst=True): + """Create all tables stored in this metadata. + + Conditional by default, will not attempt to recreate tables already + present in the target database. + + :param bind: + A :class:`.Connectable` used to access the + database; if None, uses the existing bind on this ``MetaData``, if + any. + + :param tables: + Optional list of ``Table`` objects, which is a subset of the total + tables in the ``MetaData`` (others are ignored). + + :param checkfirst: + Defaults to True, don't issue CREATEs for tables already present + in the target database. + + """ + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaGenerator, + self, + checkfirst=checkfirst, + tables=tables) + + def drop_all(self, bind=None, tables=None, checkfirst=True): + """Drop all tables stored in this metadata. + + Conditional by default, will not attempt to drop tables not present in + the target database. + + :param bind: + A :class:`.Connectable` used to access the + database; if None, uses the existing bind on this ``MetaData``, if + any. + + :param tables: + Optional list of ``Table`` objects, which is a subset of the + total tables in the ``MetaData`` (others are ignored). + + :param checkfirst: + Defaults to True, only issue DROPs for tables confirmed to be + present in the target database. + + """ + if bind is None: + bind = _bind_or_error(self) + bind._run_visitor(ddl.SchemaDropper, + self, + checkfirst=checkfirst, + tables=tables) + + +class ThreadLocalMetaData(MetaData): + """A MetaData variant that presents a different ``bind`` in every thread. + + Makes the ``bind`` property of the MetaData a thread-local value, allowing + this collection of tables to be bound to different ``Engine`` + implementations or connections in each thread. + + The ThreadLocalMetaData starts off bound to None in each thread. Binds + must be made explicitly by assigning to the ``bind`` property or using + ``connect()``. You can also re-bind dynamically multiple times per + thread, just like a regular ``MetaData``. + + """ + + __visit_name__ = 'metadata' + + def __init__(self): + """Construct a ThreadLocalMetaData.""" + + self.context = util.threading.local() + self.__engines = {} + super(ThreadLocalMetaData, self).__init__() + + def bind(self): + """The bound Engine or Connection for this thread. + + This property may be assigned an Engine or Connection, or assigned a + string or URL to automatically create a basic Engine for this bind + with ``create_engine()``.""" + + return getattr(self.context, '_engine', None) + + @util.dependencies("sqlalchemy.engine.url") + def _bind_to(self, url, bind): + """Bind to a Connectable in the caller's thread.""" + + if isinstance(bind, util.string_types + (url.URL, )): + try: + self.context._engine = self.__engines[bind] + except KeyError: + e = sqlalchemy.create_engine(bind) + self.__engines[bind] = e + self.context._engine = e + else: + # TODO: this is squirrely. we shouldn't have to hold onto engines + # in a case like this + if bind not in self.__engines: + self.__engines[bind] = bind + self.context._engine = bind + + bind = property(bind, _bind_to) + + def is_bound(self): + """True if there is a bind for this thread.""" + return (hasattr(self.context, '_engine') and + self.context._engine is not None) + + def dispose(self): + """Dispose all bound engines, in all thread contexts.""" + + for e in self.__engines.values(): + if hasattr(e, 'dispose'): + e.dispose() + + +class _SchemaTranslateMap(object): + """Provide translation of schema names based on a mapping. + + Also provides helpers for producing cache keys and optimized + access when no mapping is present. + + Used by the :paramref:`.Connection.execution_options.schema_translate_map` + feature. + + .. versionadded:: 1.1 + + + """ + __slots__ = 'map_', '__call__', 'hash_key', 'is_default' + + _default_schema_getter = operator.attrgetter("schema") + + def __init__(self, map_): + self.map_ = map_ + if map_ is not None: + def schema_for_object(obj): + effective_schema = self._default_schema_getter(obj) + effective_schema = obj._translate_schema( + effective_schema, map_) + return effective_schema + self.__call__ = schema_for_object + self.hash_key = ";".join( + "%s=%s" % (k, map_[k]) + for k in sorted(map_, key=str) + ) + self.is_default = False + else: + self.hash_key = 0 + self.__call__ = self._default_schema_getter + self.is_default = True + + @classmethod + def _schema_getter(cls, map_): + if map_ is None: + return _default_schema_map + elif isinstance(map_, _SchemaTranslateMap): + return map_ + else: + return _SchemaTranslateMap(map_) + +_default_schema_map = _SchemaTranslateMap(None) +_schema_getter = _SchemaTranslateMap._schema_getter + diff --git a/app/lib/sqlalchemy/sql/selectable.py b/app/lib/sqlalchemy/sql/selectable.py new file mode 100644 index 0000000..b69d667 --- /dev/null +++ b/app/lib/sqlalchemy/sql/selectable.py @@ -0,0 +1,3716 @@ +# sql/selectable.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""The :class:`.FromClause` class of SQL expression elements, representing +SQL tables and derived rowsets. + +""" + +from .elements import ClauseElement, TextClause, ClauseList, \ + and_, Grouping, UnaryExpression, literal_column, BindParameter +from .elements import _clone, \ + _literal_as_text, _interpret_as_column_or_from, _expand_cloned,\ + _select_iterables, _anonymous_label, _clause_element_as_expr,\ + _cloned_intersection, _cloned_difference, True_, \ + _literal_as_label_reference, _literal_and_labels_as_label_reference +from .base import Immutable, Executable, _generative, \ + ColumnCollection, ColumnSet, _from_objects, Generative +from . import type_api +from .. import inspection +from .. import util +from .. import exc +from operator import attrgetter +from . import operators +import operator +import collections +from .annotation import Annotated +import itertools +from sqlalchemy.sql.visitors import Visitable + + +def _interpret_as_from(element): + insp = inspection.inspect(element, raiseerr=False) + if insp is None: + if isinstance(element, util.string_types): + util.warn_limited( + "Textual SQL FROM expression %(expr)r should be " + "explicitly declared as text(%(expr)r), " + "or use table(%(expr)r) for more specificity", + {"expr": util.ellipses_string(element)}) + + return TextClause(util.text_type(element)) + try: + return insp.selectable + except AttributeError: + raise exc.ArgumentError("FROM expression expected") + + +def _interpret_as_select(element): + element = _interpret_as_from(element) + if isinstance(element, Alias): + element = element.original + if not isinstance(element, SelectBase): + element = element.select() + return element + + +class _OffsetLimitParam(BindParameter): + @property + def _limit_offset_value(self): + return self.effective_value + + +def _offset_or_limit_clause(element, name=None, type_=None): + """Convert the given value to an "offset or limit" clause. + + This handles incoming integers and converts to an expression; if + an expression is already given, it is passed through. + + """ + if element is None: + return None + elif hasattr(element, '__clause_element__'): + return element.__clause_element__() + elif isinstance(element, Visitable): + return element + else: + value = util.asint(element) + return _OffsetLimitParam(name, value, type_=type_, unique=True) + + +def _offset_or_limit_clause_asint(clause, attrname): + """Convert the "offset or limit" clause of a select construct to an + integer. + + This is only possible if the value is stored as a simple bound parameter. + Otherwise, a compilation error is raised. + + """ + if clause is None: + return None + try: + value = clause._limit_offset_value + except AttributeError: + raise exc.CompileError( + "This SELECT structure does not use a simple " + "integer value for %s" % attrname) + else: + return util.asint(value) + + +def subquery(alias, *args, **kwargs): + r"""Return an :class:`.Alias` object derived + from a :class:`.Select`. + + name + alias name + + \*args, \**kwargs + + all other arguments are delivered to the + :func:`select` function. + + """ + return Select(*args, **kwargs).alias(alias) + + +def alias(selectable, name=None, flat=False): + """Return an :class:`.Alias` object. + + An :class:`.Alias` represents any :class:`.FromClause` + with an alternate name assigned within SQL, typically using the ``AS`` + clause when generated, e.g. ``SELECT * FROM table AS aliasname``. + + Similar functionality is available via the + :meth:`~.FromClause.alias` method + available on all :class:`.FromClause` subclasses. + + When an :class:`.Alias` is created from a :class:`.Table` object, + this has the effect of the table being rendered + as ``tablename AS aliasname`` in a SELECT statement. + + For :func:`.select` objects, the effect is that of creating a named + subquery, i.e. ``(select ...) AS aliasname``. + + The ``name`` parameter is optional, and provides the name + to use in the rendered SQL. If blank, an "anonymous" name + will be deterministically generated at compile time. + Deterministic means the name is guaranteed to be unique against + other constructs used in the same statement, and will also be the + same name for each successive compilation of the same statement + object. + + :param selectable: any :class:`.FromClause` subclass, + such as a table, select statement, etc. + + :param name: string name to be assigned as the alias. + If ``None``, a name will be deterministically generated + at compile time. + + :param flat: Will be passed through to if the given selectable + is an instance of :class:`.Join` - see :meth:`.Join.alias` + for details. + + .. versionadded:: 0.9.0 + + """ + return _interpret_as_from(selectable).alias(name=name, flat=flat) + + +def lateral(selectable, name=None): + """Return a :class:`.Lateral` object. + + :class:`.Lateral` is an :class:`.Alias` subclass that represents + a subquery with the LATERAL keyword applied to it. + + The special behavior of a LATERAL subquery is that it appears in the + FROM clause of an enclosing SELECT, but may correlate to other + FROM clauses of that SELECT. It is a special case of subquery + only supported by a small number of backends, currently more recent + PostgreSQL versions. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`lateral_selects` - overview of usage. + + """ + return _interpret_as_from(selectable).lateral(name=name) + + +def tablesample(selectable, sampling, name=None, seed=None): + """Return a :class:`.TableSample` object. + + :class:`.TableSample` is an :class:`.Alias` subclass that represents + a table with the TABLESAMPLE clause applied to it. + :func:`~.expression.tablesample` + is also available from the :class:`.FromClause` class via the + :meth:`.FromClause.tablesample` method. + + The TABLESAMPLE clause allows selecting a randomly selected approximate + percentage of rows from a table. It supports multiple sampling methods, + most commonly BERNOULLI and SYSTEM. + + e.g.:: + + from sqlalchemy import func + + selectable = people.tablesample( + func.bernoulli(1), + name='alias', + seed=func.random()) + stmt = select([selectable.c.people_id]) + + Assuming ``people`` with a column ``people_id``, the above + statement would render as:: + + SELECT alias.people_id FROM + people AS alias TABLESAMPLE bernoulli(:bernoulli_1) + REPEATABLE (random()) + + .. versionadded:: 1.1 + + :param sampling: a ``float`` percentage between 0 and 100 or + :class:`.functions.Function`. + + :param name: optional alias name + + :param seed: any real-valued SQL expression. When specified, the + REPEATABLE sub-clause is also rendered. + + """ + return _interpret_as_from(selectable).tablesample( + sampling, name=name, seed=seed) + + +class Selectable(ClauseElement): + """mark a class as being selectable""" + __visit_name__ = 'selectable' + + is_selectable = True + + @property + def selectable(self): + return self + + +class HasPrefixes(object): + _prefixes = () + + @_generative + def prefix_with(self, *expr, **kw): + r"""Add one or more expressions following the statement keyword, i.e. + SELECT, INSERT, UPDATE, or DELETE. Generative. + + This is used to support backend-specific prefix keywords such as those + provided by MySQL. + + E.g.:: + + stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql") + + Multiple prefixes can be specified by multiple calls + to :meth:`.prefix_with`. + + :param \*expr: textual or :class:`.ClauseElement` construct which + will be rendered following the INSERT, UPDATE, or DELETE + keyword. + :param \**kw: A single keyword 'dialect' is accepted. This is an + optional string dialect name which will + limit rendering of this prefix to only that dialect. + + """ + dialect = kw.pop('dialect', None) + if kw: + raise exc.ArgumentError("Unsupported argument(s): %s" % + ",".join(kw)) + self._setup_prefixes(expr, dialect) + + def _setup_prefixes(self, prefixes, dialect=None): + self._prefixes = self._prefixes + tuple( + [(_literal_as_text(p, warn=False), dialect) for p in prefixes]) + + +class HasSuffixes(object): + _suffixes = () + + @_generative + def suffix_with(self, *expr, **kw): + r"""Add one or more expressions following the statement as a whole. + + This is used to support backend-specific suffix keywords on + certain constructs. + + E.g.:: + + stmt = select([col1, col2]).cte().suffix_with( + "cycle empno set y_cycle to 1 default 0", dialect="oracle") + + Multiple suffixes can be specified by multiple calls + to :meth:`.suffix_with`. + + :param \*expr: textual or :class:`.ClauseElement` construct which + will be rendered following the target clause. + :param \**kw: A single keyword 'dialect' is accepted. This is an + optional string dialect name which will + limit rendering of this suffix to only that dialect. + + """ + dialect = kw.pop('dialect', None) + if kw: + raise exc.ArgumentError("Unsupported argument(s): %s" % + ",".join(kw)) + self._setup_suffixes(expr, dialect) + + def _setup_suffixes(self, suffixes, dialect=None): + self._suffixes = self._suffixes + tuple( + [(_literal_as_text(p, warn=False), dialect) for p in suffixes]) + + +class FromClause(Selectable): + """Represent an element that can be used within the ``FROM`` + clause of a ``SELECT`` statement. + + The most common forms of :class:`.FromClause` are the + :class:`.Table` and the :func:`.select` constructs. Key + features common to all :class:`.FromClause` objects include: + + * a :attr:`.c` collection, which provides per-name access to a collection + of :class:`.ColumnElement` objects. + * a :attr:`.primary_key` attribute, which is a collection of all those + :class:`.ColumnElement` objects that indicate the ``primary_key`` flag. + * Methods to generate various derivations of a "from" clause, including + :meth:`.FromClause.alias`, :meth:`.FromClause.join`, + :meth:`.FromClause.select`. + + + """ + __visit_name__ = 'fromclause' + named_with_column = False + _hide_froms = [] + + _is_join = False + _is_select = False + _is_from_container = False + + _textual = False + """a marker that allows us to easily distinguish a :class:`.TextAsFrom` + or similar object from other kinds of :class:`.FromClause` objects.""" + + schema = None + """Define the 'schema' attribute for this :class:`.FromClause`. + + This is typically ``None`` for most objects except that of + :class:`.Table`, where it is taken as the value of the + :paramref:`.Table.schema` argument. + + """ + + def _translate_schema(self, effective_schema, map_): + return effective_schema + + _memoized_property = util.group_expirable_memoized_property(["_columns"]) + + @util.deprecated( + '1.1', + message="``FromClause.count()`` is deprecated. Counting " + "rows requires that the correct column expression and " + "accommodations for joins, DISTINCT, etc. must be made, " + "otherwise results may not be what's expected. " + "Please use an appropriate ``func.count()`` expression " + "directly.") + @util.dependencies("sqlalchemy.sql.functions") + def count(self, functions, whereclause=None, **params): + """return a SELECT COUNT generated against this + :class:`.FromClause`. + + The function generates COUNT against the + first column in the primary key of the table, or against + the first column in the table overall. Explicit use of + ``func.count()`` should be preferred:: + + row_count = conn.scalar( + select([func.count('*')]).select_from(table) + ) + + + .. seealso:: + + :data:`.func` + + """ + + if self.primary_key: + col = list(self.primary_key)[0] + else: + col = list(self.columns)[0] + return Select( + [functions.func.count(col).label('tbl_row_count')], + whereclause, + from_obj=[self], + **params) + + def select(self, whereclause=None, **params): + """return a SELECT of this :class:`.FromClause`. + + .. seealso:: + + :func:`~.sql.expression.select` - general purpose + method which allows for arbitrary column lists. + + """ + + return Select([self], whereclause, **params) + + def join(self, right, onclause=None, isouter=False, full=False): + """Return a :class:`.Join` from this :class:`.FromClause` + to another :class:`FromClause`. + + E.g.:: + + from sqlalchemy import join + + j = user_table.join(address_table, + user_table.c.id == address_table.c.user_id) + stmt = select([user_table]).select_from(j) + + would emit SQL along the lines of:: + + SELECT user.id, user.name FROM user + JOIN address ON user.id = address.user_id + + :param right: the right side of the join; this is any + :class:`.FromClause` object such as a :class:`.Table` object, and + may also be a selectable-compatible object such as an ORM-mapped + class. + + :param onclause: a SQL expression representing the ON clause of the + join. If left at ``None``, :meth:`.FromClause.join` will attempt to + join the two tables based on a foreign key relationship. + + :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. + + :param full: if True, render a FULL OUTER JOIN, instead of LEFT OUTER + JOIN. Implies :paramref:`.FromClause.join.isouter`. + + .. versionadded:: 1.1 + + .. seealso:: + + :func:`.join` - standalone function + + :class:`.Join` - the type of object produced + + """ + + return Join(self, right, onclause, isouter, full) + + def outerjoin(self, right, onclause=None, full=False): + """Return a :class:`.Join` from this :class:`.FromClause` + to another :class:`FromClause`, with the "isouter" flag set to + True. + + E.g.:: + + from sqlalchemy import outerjoin + + j = user_table.outerjoin(address_table, + user_table.c.id == address_table.c.user_id) + + The above is equivalent to:: + + j = user_table.join( + address_table, + user_table.c.id == address_table.c.user_id, + isouter=True) + + :param right: the right side of the join; this is any + :class:`.FromClause` object such as a :class:`.Table` object, and + may also be a selectable-compatible object such as an ORM-mapped + class. + + :param onclause: a SQL expression representing the ON clause of the + join. If left at ``None``, :meth:`.FromClause.join` will attempt to + join the two tables based on a foreign key relationship. + + :param full: if True, render a FULL OUTER JOIN, instead of + LEFT OUTER JOIN. + + .. versionadded:: 1.1 + + .. seealso:: + + :meth:`.FromClause.join` + + :class:`.Join` + + """ + + return Join(self, right, onclause, True, full) + + def alias(self, name=None, flat=False): + """return an alias of this :class:`.FromClause`. + + This is shorthand for calling:: + + from sqlalchemy import alias + a = alias(self, name=name) + + See :func:`~.expression.alias` for details. + + """ + + return Alias(self, name) + + def lateral(self, name=None): + """Return a LATERAL alias of this :class:`.FromClause`. + + The return value is the :class:`.Lateral` construct also + provided by the top-level :func:`~.expression.lateral` function. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`lateral_selects` - overview of usage. + + """ + return Lateral(self, name) + + def tablesample(self, sampling, name=None, seed=None): + """Return a TABLESAMPLE alias of this :class:`.FromClause`. + + The return value is the :class:`.TableSample` construct also + provided by the top-level :func:`~.expression.tablesample` function. + + .. versionadded:: 1.1 + + .. seealso:: + + :func:`~.expression.tablesample` - usage guidelines and parameters + + """ + return TableSample(self, sampling, name, seed) + + def is_derived_from(self, fromclause): + """Return True if this FromClause is 'derived' from the given + FromClause. + + An example would be an Alias of a Table is derived from that Table. + + """ + # this is essentially an "identity" check in the base class. + # Other constructs override this to traverse through + # contained elements. + return fromclause in self._cloned_set + + def _is_lexical_equivalent(self, other): + """Return True if this FromClause and the other represent + the same lexical identity. + + This tests if either one is a copy of the other, or + if they are the same via annotation identity. + + """ + return self._cloned_set.intersection(other._cloned_set) + + @util.dependencies("sqlalchemy.sql.util") + def replace_selectable(self, sqlutil, old, alias): + """replace all occurrences of FromClause 'old' with the given Alias + object, returning a copy of this :class:`.FromClause`. + + """ + + return sqlutil.ClauseAdapter(alias).traverse(self) + + def correspond_on_equivalents(self, column, equivalents): + """Return corresponding_column for the given column, or if None + search for a match in the given dictionary. + + """ + col = self.corresponding_column(column, require_embedded=True) + if col is None and col in equivalents: + for equiv in equivalents[col]: + nc = self.corresponding_column(equiv, require_embedded=True) + if nc: + return nc + return col + + def corresponding_column(self, column, require_embedded=False): + """Given a :class:`.ColumnElement`, return the exported + :class:`.ColumnElement` object from this :class:`.Selectable` + which corresponds to that original + :class:`~sqlalchemy.schema.Column` via a common ancestor + column. + + :param column: the target :class:`.ColumnElement` to be matched + + :param require_embedded: only return corresponding columns for + the given :class:`.ColumnElement`, if the given + :class:`.ColumnElement` is actually present within a sub-element + of this :class:`.FromClause`. Normally the column will match if + it merely shares a common ancestor with one of the exported + columns of this :class:`.FromClause`. + + """ + + def embedded(expanded_proxy_set, target_set): + for t in target_set.difference(expanded_proxy_set): + if not set(_expand_cloned([t]) + ).intersection(expanded_proxy_set): + return False + return True + + # don't dig around if the column is locally present + if self.c.contains_column(column): + return column + col, intersect = None, None + target_set = column.proxy_set + cols = self.c._all_columns + for c in cols: + expanded_proxy_set = set(_expand_cloned(c.proxy_set)) + i = target_set.intersection(expanded_proxy_set) + if i and (not require_embedded + or embedded(expanded_proxy_set, target_set)): + if col is None: + + # no corresponding column yet, pick this one. + + col, intersect = c, i + elif len(i) > len(intersect): + + # 'c' has a larger field of correspondence than + # 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x + # matches a1.c.x->table.c.x better than + # selectable.c.x->table.c.x does. + + col, intersect = c, i + elif i == intersect: + + # they have the same field of correspondence. see + # which proxy_set has fewer columns in it, which + # indicates a closer relationship with the root + # column. Also take into account the "weight" + # attribute which CompoundSelect() uses to give + # higher precedence to columns based on vertical + # position in the compound statement, and discard + # columns that have no reference to the target + # column (also occurs with CompoundSelect) + + col_distance = util.reduce( + operator.add, + [sc._annotations.get('weight', 1) for sc in + col.proxy_set if sc.shares_lineage(column)]) + c_distance = util.reduce( + operator.add, + [sc._annotations.get('weight', 1) for sc in + c.proxy_set if sc.shares_lineage(column)]) + if c_distance < col_distance: + col, intersect = c, i + return col + + @property + def description(self): + """a brief description of this FromClause. + + Used primarily for error message formatting. + + """ + return getattr(self, 'name', self.__class__.__name__ + " object") + + def _reset_exported(self): + """delete memoized collections when a FromClause is cloned.""" + + self._memoized_property.expire_instance(self) + + @_memoized_property + def columns(self): + """A named-based collection of :class:`.ColumnElement` objects + maintained by this :class:`.FromClause`. + + The :attr:`.columns`, or :attr:`.c` collection, is the gateway + to the construction of SQL expressions using table-bound or + other selectable-bound columns:: + + select([mytable]).where(mytable.c.somecolumn == 5) + + """ + + if '_columns' not in self.__dict__: + self._init_collections() + self._populate_column_collection() + return self._columns.as_immutable() + + @_memoized_property + def primary_key(self): + """Return the collection of Column objects which comprise the + primary key of this FromClause.""" + + self._init_collections() + self._populate_column_collection() + return self.primary_key + + @_memoized_property + def foreign_keys(self): + """Return the collection of ForeignKey objects which this + FromClause references.""" + + self._init_collections() + self._populate_column_collection() + return self.foreign_keys + + c = property(attrgetter('columns'), + doc="An alias for the :attr:`.columns` attribute.") + _select_iterable = property(attrgetter('columns')) + + def _init_collections(self): + assert '_columns' not in self.__dict__ + assert 'primary_key' not in self.__dict__ + assert 'foreign_keys' not in self.__dict__ + + self._columns = ColumnCollection() + self.primary_key = ColumnSet() + self.foreign_keys = set() + + @property + def _cols_populated(self): + return '_columns' in self.__dict__ + + def _populate_column_collection(self): + """Called on subclasses to establish the .c collection. + + Each implementation has a different way of establishing + this collection. + + """ + + def _refresh_for_new_column(self, column): + """Given a column added to the .c collection of an underlying + selectable, produce the local version of that column, assuming this + selectable ultimately should proxy this column. + + this is used to "ping" a derived selectable to add a new column + to its .c. collection when a Column has been added to one of the + Table objects it ultimtely derives from. + + If the given selectable hasn't populated its .c. collection yet, + it should at least pass on the message to the contained selectables, + but it will return None. + + This method is currently used by Declarative to allow Table + columns to be added to a partially constructed inheritance + mapping that may have already produced joins. The method + isn't public right now, as the full span of implications + and/or caveats aren't yet clear. + + It's also possible that this functionality could be invoked by + default via an event, which would require that + selectables maintain a weak referencing collection of all + derivations. + + """ + if not self._cols_populated: + return None + elif (column.key in self.columns and + self.columns[column.key] is column): + return column + else: + return None + + +class Join(FromClause): + """represent a ``JOIN`` construct between two :class:`.FromClause` + elements. + + The public constructor function for :class:`.Join` is the module-level + :func:`.join()` function, as well as the :meth:`.FromClause.join` method + of any :class:`.FromClause` (e.g. such as :class:`.Table`). + + .. seealso:: + + :func:`.join` + + :meth:`.FromClause.join` + + """ + __visit_name__ = 'join' + + _is_join = True + + def __init__(self, left, right, onclause=None, isouter=False, full=False): + """Construct a new :class:`.Join`. + + The usual entrypoint here is the :func:`~.expression.join` + function or the :meth:`.FromClause.join` method of any + :class:`.FromClause` object. + + """ + self.left = _interpret_as_from(left) + self.right = _interpret_as_from(right).self_group() + + if onclause is None: + self.onclause = self._match_primaries(self.left, self.right) + else: + self.onclause = onclause + + self.isouter = isouter + self.full = full + + @classmethod + def _create_outerjoin(cls, left, right, onclause=None, full=False): + """Return an ``OUTER JOIN`` clause element. + + The returned object is an instance of :class:`.Join`. + + Similar functionality is also available via the + :meth:`~.FromClause.outerjoin()` method on any + :class:`.FromClause`. + + :param left: The left side of the join. + + :param right: The right side of the join. + + :param onclause: Optional criterion for the ``ON`` clause, is + derived from foreign key relationships established between + left and right otherwise. + + To chain joins together, use the :meth:`.FromClause.join` or + :meth:`.FromClause.outerjoin` methods on the resulting + :class:`.Join` object. + + """ + return cls(left, right, onclause, isouter=True, full=full) + + @classmethod + def _create_join(cls, left, right, onclause=None, isouter=False, + full=False): + """Produce a :class:`.Join` object, given two :class:`.FromClause` + expressions. + + E.g.:: + + j = join(user_table, address_table, + user_table.c.id == address_table.c.user_id) + stmt = select([user_table]).select_from(j) + + would emit SQL along the lines of:: + + SELECT user.id, user.name FROM user + JOIN address ON user.id = address.user_id + + Similar functionality is available given any + :class:`.FromClause` object (e.g. such as a :class:`.Table`) using + the :meth:`.FromClause.join` method. + + :param left: The left side of the join. + + :param right: the right side of the join; this is any + :class:`.FromClause` object such as a :class:`.Table` object, and + may also be a selectable-compatible object such as an ORM-mapped + class. + + :param onclause: a SQL expression representing the ON clause of the + join. If left at ``None``, :meth:`.FromClause.join` will attempt to + join the two tables based on a foreign key relationship. + + :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. + + :param full: if True, render a FULL OUTER JOIN, instead of JOIN. + + .. versionadded:: 1.1 + + .. seealso:: + + :meth:`.FromClause.join` - method form, based on a given left side + + :class:`.Join` - the type of object produced + + """ + + return cls(left, right, onclause, isouter, full) + + @property + def description(self): + return "Join object on %s(%d) and %s(%d)" % ( + self.left.description, + id(self.left), + self.right.description, + id(self.right)) + + def is_derived_from(self, fromclause): + return fromclause is self or \ + self.left.is_derived_from(fromclause) or \ + self.right.is_derived_from(fromclause) + + def self_group(self, against=None): + return FromGrouping(self) + + @util.dependencies("sqlalchemy.sql.util") + def _populate_column_collection(self, sqlutil): + columns = [c for c in self.left.columns] + \ + [c for c in self.right.columns] + + self.primary_key.extend(sqlutil.reduce_columns( + (c for c in columns if c.primary_key), self.onclause)) + self._columns.update((col._label, col) for col in columns) + self.foreign_keys.update(itertools.chain( + *[col.foreign_keys for col in columns])) + + def _refresh_for_new_column(self, column): + col = self.left._refresh_for_new_column(column) + if col is None: + col = self.right._refresh_for_new_column(column) + if col is not None: + if self._cols_populated: + self._columns[col._label] = col + self.foreign_keys.update(col.foreign_keys) + if col.primary_key: + self.primary_key.add(col) + return col + return None + + def _copy_internals(self, clone=_clone, **kw): + self._reset_exported() + self.left = clone(self.left, **kw) + self.right = clone(self.right, **kw) + self.onclause = clone(self.onclause, **kw) + + def get_children(self, **kwargs): + return self.left, self.right, self.onclause + + def _match_primaries(self, left, right): + if isinstance(left, Join): + left_right = left.right + else: + left_right = None + return self._join_condition(left, right, a_subset=left_right) + + @classmethod + def _join_condition(cls, a, b, ignore_nonexistent_tables=False, + a_subset=None, + consider_as_foreign_keys=None): + """create a join condition between two tables or selectables. + + e.g.:: + + join_condition(tablea, tableb) + + would produce an expression along the lines of:: + + tablea.c.id==tableb.c.tablea_id + + The join is determined based on the foreign key relationships + between the two selectables. If there are multiple ways + to join, or no way to join, an error is raised. + + :param ignore_nonexistent_tables: Deprecated - this + flag is no longer used. Only resolution errors regarding + the two given tables are propagated. + + :param a_subset: An optional expression that is a sub-component + of ``a``. An attempt will be made to join to just this sub-component + first before looking at the full ``a`` construct, and if found + will be successful even if there are other ways to join to ``a``. + This allows the "right side" of a join to be passed thereby + providing a "natural join". + + """ + constraints = cls._joincond_scan_left_right( + a, a_subset, b, consider_as_foreign_keys) + + if len(constraints) > 1: + cls._joincond_trim_constraints( + a, b, constraints, consider_as_foreign_keys) + + if len(constraints) == 0: + if isinstance(b, FromGrouping): + hint = " Perhaps you meant to convert the right side to a "\ + "subquery using alias()?" + else: + hint = "" + raise exc.NoForeignKeysError( + "Can't find any foreign key relationships " + "between '%s' and '%s'.%s" % + (a.description, b.description, hint)) + + crit = [(x == y) for x, y in list(constraints.values())[0]] + if len(crit) == 1: + return (crit[0]) + else: + return and_(*crit) + + @classmethod + def _joincond_scan_left_right( + cls, a, a_subset, b, consider_as_foreign_keys): + constraints = collections.defaultdict(list) + + for left in (a_subset, a): + if left is None: + continue + for fk in sorted( + b.foreign_keys, + key=lambda fk: fk.parent._creation_order): + if consider_as_foreign_keys is not None and \ + fk.parent not in consider_as_foreign_keys: + continue + try: + col = fk.get_referent(left) + except exc.NoReferenceError as nrte: + if nrte.table_name == left.name: + raise + else: + continue + + if col is not None: + constraints[fk.constraint].append((col, fk.parent)) + if left is not b: + for fk in sorted( + left.foreign_keys, + key=lambda fk: fk.parent._creation_order): + if consider_as_foreign_keys is not None and \ + fk.parent not in consider_as_foreign_keys: + continue + try: + col = fk.get_referent(b) + except exc.NoReferenceError as nrte: + if nrte.table_name == b.name: + raise + else: + continue + + if col is not None: + constraints[fk.constraint].append((col, fk.parent)) + if constraints: + break + return constraints + + @classmethod + def _joincond_trim_constraints( + cls, a, b, constraints, consider_as_foreign_keys): + # more than one constraint matched. narrow down the list + # to include just those FKCs that match exactly to + # "consider_as_foreign_keys". + if consider_as_foreign_keys: + for const in list(constraints): + if set(f.parent for f in const.elements) != set( + consider_as_foreign_keys): + del constraints[const] + + # if still multiple constraints, but + # they all refer to the exact same end result, use it. + if len(constraints) > 1: + dedupe = set(tuple(crit) for crit in constraints.values()) + if len(dedupe) == 1: + key = list(constraints)[0] + constraints = {key: constraints[key]} + + if len(constraints) != 1: + raise exc.AmbiguousForeignKeysError( + "Can't determine join between '%s' and '%s'; " + "tables have more than one foreign key " + "constraint relationship between them. " + "Please specify the 'onclause' of this " + "join explicitly." % (a.description, b.description)) + + def select(self, whereclause=None, **kwargs): + r"""Create a :class:`.Select` from this :class:`.Join`. + + The equivalent long-hand form, given a :class:`.Join` object + ``j``, is:: + + from sqlalchemy import select + j = select([j.left, j.right], **kw).\ + where(whereclause).\ + select_from(j) + + :param whereclause: the WHERE criterion that will be sent to + the :func:`select()` function + + :param \**kwargs: all other kwargs are sent to the + underlying :func:`select()` function. + + """ + collist = [self.left, self.right] + + return Select(collist, whereclause, from_obj=[self], **kwargs) + + @property + def bind(self): + return self.left.bind or self.right.bind + + @util.dependencies("sqlalchemy.sql.util") + def alias(self, sqlutil, name=None, flat=False): + r"""return an alias of this :class:`.Join`. + + The default behavior here is to first produce a SELECT + construct from this :class:`.Join`, then to produce an + :class:`.Alias` from that. So given a join of the form:: + + j = table_a.join(table_b, table_a.c.id == table_b.c.a_id) + + The JOIN by itself would look like:: + + table_a JOIN table_b ON table_a.id = table_b.a_id + + Whereas the alias of the above, ``j.alias()``, would in a + SELECT context look like:: + + (SELECT table_a.id AS table_a_id, table_b.id AS table_b_id, + table_b.a_id AS table_b_a_id + FROM table_a + JOIN table_b ON table_a.id = table_b.a_id) AS anon_1 + + The equivalent long-hand form, given a :class:`.Join` object + ``j``, is:: + + from sqlalchemy import select, alias + j = alias( + select([j.left, j.right]).\ + select_from(j).\ + with_labels(True).\ + correlate(False), + name=name + ) + + The selectable produced by :meth:`.Join.alias` features the same + columns as that of the two individual selectables presented under + a single name - the individual columns are "auto-labeled", meaning + the ``.c.`` collection of the resulting :class:`.Alias` represents + the names of the individual columns using a + ``_`` scheme:: + + j.c.table_a_id + j.c.table_b_a_id + + :meth:`.Join.alias` also features an alternate + option for aliasing joins which produces no enclosing SELECT and + does not normally apply labels to the column names. The + ``flat=True`` option will call :meth:`.FromClause.alias` + against the left and right sides individually. + Using this option, no new ``SELECT`` is produced; + we instead, from a construct as below:: + + j = table_a.join(table_b, table_a.c.id == table_b.c.a_id) + j = j.alias(flat=True) + + we get a result like this:: + + table_a AS table_a_1 JOIN table_b AS table_b_1 ON + table_a_1.id = table_b_1.a_id + + The ``flat=True`` argument is also propagated to the contained + selectables, so that a composite join such as:: + + j = table_a.join( + table_b.join(table_c, + table_b.c.id == table_c.c.b_id), + table_b.c.a_id == table_a.c.id + ).alias(flat=True) + + Will produce an expression like:: + + table_a AS table_a_1 JOIN ( + table_b AS table_b_1 JOIN table_c AS table_c_1 + ON table_b_1.id = table_c_1.b_id + ) ON table_a_1.id = table_b_1.a_id + + The standalone :func:`~.expression.alias` function as well as the + base :meth:`.FromClause.alias` method also support the ``flat=True`` + argument as a no-op, so that the argument can be passed to the + ``alias()`` method of any selectable. + + .. versionadded:: 0.9.0 Added the ``flat=True`` option to create + "aliases" of joins without enclosing inside of a SELECT + subquery. + + :param name: name given to the alias. + + :param flat: if True, produce an alias of the left and right + sides of this :class:`.Join` and return the join of those + two selectables. This produces join expression that does not + include an enclosing SELECT. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :func:`~.expression.alias` + + """ + if flat: + assert name is None, "Can't send name argument with flat" + left_a, right_a = self.left.alias(flat=True), \ + self.right.alias(flat=True) + adapter = sqlutil.ClauseAdapter(left_a).\ + chain(sqlutil.ClauseAdapter(right_a)) + + return left_a.join(right_a, adapter.traverse(self.onclause), + isouter=self.isouter, full=self.full) + else: + return self.select(use_labels=True, correlate=False).alias(name) + + @property + def _hide_froms(self): + return itertools.chain(*[_from_objects(x.left, x.right) + for x in self._cloned_set]) + + @property + def _from_objects(self): + return [self] + \ + self.onclause._from_objects + \ + self.left._from_objects + \ + self.right._from_objects + + +class Alias(FromClause): + """Represents an table or selectable alias (AS). + + Represents an alias, as typically applied to any table or + sub-select within a SQL statement using the ``AS`` keyword (or + without the keyword on certain databases such as Oracle). + + This object is constructed from the :func:`~.expression.alias` module + level function as well as the :meth:`.FromClause.alias` method available + on all :class:`.FromClause` subclasses. + + """ + + __visit_name__ = 'alias' + named_with_column = True + + _is_from_container = True + + def __init__(self, selectable, name=None): + baseselectable = selectable + while isinstance(baseselectable, Alias): + baseselectable = baseselectable.element + self.original = baseselectable + self.supports_execution = baseselectable.supports_execution + if self.supports_execution: + self._execution_options = baseselectable._execution_options + self.element = selectable + if name is None: + if self.original.named_with_column: + name = getattr(self.original, 'name', None) + name = _anonymous_label('%%(%d %s)s' % (id(self), name + or 'anon')) + self.name = name + + def self_group(self, target=None): + if isinstance(target, CompoundSelect) and \ + isinstance(self.original, Select) and \ + self.original._needs_parens_for_grouping(): + return FromGrouping(self) + + return super(Alias, self).self_group(target) + + @property + def description(self): + if util.py3k: + return self.name + else: + return self.name.encode('ascii', 'backslashreplace') + + def as_scalar(self): + try: + return self.element.as_scalar() + except AttributeError: + raise AttributeError("Element %s does not support " + "'as_scalar()'" % self.element) + + def is_derived_from(self, fromclause): + if fromclause in self._cloned_set: + return True + return self.element.is_derived_from(fromclause) + + def _populate_column_collection(self): + for col in self.element.columns._all_columns: + col._make_proxy(self) + + def _refresh_for_new_column(self, column): + col = self.element._refresh_for_new_column(column) + if col is not None: + if not self._cols_populated: + return None + else: + return col._make_proxy(self) + else: + return None + + def _copy_internals(self, clone=_clone, **kw): + # don't apply anything to an aliased Table + # for now. May want to drive this from + # the given **kw. + if isinstance(self.element, TableClause): + return + self._reset_exported() + self.element = clone(self.element, **kw) + baseselectable = self.element + while isinstance(baseselectable, Alias): + baseselectable = baseselectable.element + self.original = baseselectable + + def get_children(self, column_collections=True, **kw): + if column_collections: + for c in self.c: + yield c + yield self.element + + @property + def _from_objects(self): + return [self] + + @property + def bind(self): + return self.element.bind + + +class Lateral(Alias): + """Represent a LATERAL subquery. + + This object is constructed from the :func:`~.expression.lateral` module + level function as well as the :meth:`.FromClause.lateral` method available + on all :class:`.FromClause` subclasses. + + While LATERAL is part of the SQL standard, curently only more recent + PostgreSQL versions provide support for this keyword. + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`lateral_selects` - overview of usage. + + """ + + __visit_name__ = 'lateral' + + +class TableSample(Alias): + """Represent a TABLESAMPLE clause. + + This object is constructed from the :func:`~.expression.tablesample` module + level function as well as the :meth:`.FromClause.tablesample` method available + on all :class:`.FromClause` subclasses. + + .. versionadded:: 1.1 + + .. seealso:: + + :func:`~.expression.tablesample` + + """ + + __visit_name__ = 'tablesample' + + def __init__(self, selectable, sampling, + name=None, + seed=None): + self.sampling = sampling + self.seed = seed + super(TableSample, self).__init__(selectable, name=name) + + @util.dependencies("sqlalchemy.sql.functions") + def _get_method(self, functions): + if isinstance(self.sampling, functions.Function): + return self.sampling + else: + return functions.func.system(self.sampling) + + +class CTE(Generative, HasSuffixes, Alias): + """Represent a Common Table Expression. + + The :class:`.CTE` object is obtained using the + :meth:`.SelectBase.cte` method from any selectable. + See that method for complete examples. + + .. versionadded:: 0.7.6 + + """ + __visit_name__ = 'cte' + + def __init__(self, selectable, + name=None, + recursive=False, + _cte_alias=None, + _restates=frozenset(), + _suffixes=None): + self.recursive = recursive + self._cte_alias = _cte_alias + self._restates = _restates + if _suffixes: + self._suffixes = _suffixes + super(CTE, self).__init__(selectable, name=name) + + def _copy_internals(self, clone=_clone, **kw): + super(CTE, self)._copy_internals(clone, **kw) + if self._cte_alias is not None: + self._cte_alias = self + self._restates = frozenset([ + clone(elem, **kw) for elem in self._restates + ]) + + @util.dependencies("sqlalchemy.sql.dml") + def _populate_column_collection(self, dml): + if isinstance(self.element, dml.UpdateBase): + for col in self.element._returning: + col._make_proxy(self) + else: + for col in self.element.columns._all_columns: + col._make_proxy(self) + + def alias(self, name=None, flat=False): + return CTE( + self.original, + name=name, + recursive=self.recursive, + _cte_alias=self, + _suffixes=self._suffixes + ) + + def union(self, other): + return CTE( + self.original.union(other), + name=self.name, + recursive=self.recursive, + _restates=self._restates.union([self]), + _suffixes=self._suffixes + ) + + def union_all(self, other): + return CTE( + self.original.union_all(other), + name=self.name, + recursive=self.recursive, + _restates=self._restates.union([self]), + _suffixes=self._suffixes + ) + + +class HasCTE(object): + """Mixin that declares a class to include CTE support. + + .. versionadded:: 1.1 + + """ + + def cte(self, name=None, recursive=False): + r"""Return a new :class:`.CTE`, or Common Table Expression instance. + + Common table expressions are a SQL standard whereby SELECT + statements can draw upon secondary statements specified along + with the primary statement, using a clause called "WITH". + Special semantics regarding UNION can also be employed to + allow "recursive" queries, where a SELECT statement can draw + upon the set of rows that have previously been selected. + + CTEs can also be applied to DML constructs UPDATE, INSERT + and DELETE on some databases, both as a source of CTE rows + when combined with RETURNING, as well as a consumer of + CTE rows. + + SQLAlchemy detects :class:`.CTE` objects, which are treated + similarly to :class:`.Alias` objects, as special elements + to be delivered to the FROM clause of the statement as well + as to a WITH clause at the top of the statement. + + .. versionchanged:: 1.1 Added support for UPDATE/INSERT/DELETE as + CTE, CTEs added to UPDATE/INSERT/DELETE. + + :param name: name given to the common table expression. Like + :meth:`._FromClause.alias`, the name can be left as ``None`` + in which case an anonymous symbol will be used at query + compile time. + :param recursive: if ``True``, will render ``WITH RECURSIVE``. + A recursive common table expression is intended to be used in + conjunction with UNION ALL in order to derive rows + from those already selected. + + The following examples include two from PostgreSQL's documentation at + http://www.postgresql.org/docs/current/static/queries-with.html, + as well as additional examples. + + Example 1, non recursive:: + + from sqlalchemy import (Table, Column, String, Integer, + MetaData, select, func) + + metadata = MetaData() + + orders = Table('orders', metadata, + Column('region', String), + Column('amount', Integer), + Column('product', String), + Column('quantity', Integer) + ) + + regional_sales = select([ + orders.c.region, + func.sum(orders.c.amount).label('total_sales') + ]).group_by(orders.c.region).cte("regional_sales") + + + top_regions = select([regional_sales.c.region]).\ + where( + regional_sales.c.total_sales > + select([ + func.sum(regional_sales.c.total_sales)/10 + ]) + ).cte("top_regions") + + statement = select([ + orders.c.region, + orders.c.product, + func.sum(orders.c.quantity).label("product_units"), + func.sum(orders.c.amount).label("product_sales") + ]).where(orders.c.region.in_( + select([top_regions.c.region]) + )).group_by(orders.c.region, orders.c.product) + + result = conn.execute(statement).fetchall() + + Example 2, WITH RECURSIVE:: + + from sqlalchemy import (Table, Column, String, Integer, + MetaData, select, func) + + metadata = MetaData() + + parts = Table('parts', metadata, + Column('part', String), + Column('sub_part', String), + Column('quantity', Integer), + ) + + included_parts = select([ + parts.c.sub_part, + parts.c.part, + parts.c.quantity]).\ + where(parts.c.part=='our part').\ + cte(recursive=True) + + + incl_alias = included_parts.alias() + parts_alias = parts.alias() + included_parts = included_parts.union_all( + select([ + parts_alias.c.sub_part, + parts_alias.c.part, + parts_alias.c.quantity + ]). + where(parts_alias.c.part==incl_alias.c.sub_part) + ) + + statement = select([ + included_parts.c.sub_part, + func.sum(included_parts.c.quantity). + label('total_quantity') + ]).\ + group_by(included_parts.c.sub_part) + + result = conn.execute(statement).fetchall() + + Example 3, an upsert using UPDATE and INSERT with CTEs:: + + from datetime import date + from sqlalchemy import (MetaData, Table, Column, Integer, + Date, select, literal, and_, exists) + + metadata = MetaData() + + visitors = Table('visitors', metadata, + Column('product_id', Integer, primary_key=True), + Column('date', Date, primary_key=True), + Column('count', Integer), + ) + + # add 5 visitors for the product_id == 1 + product_id = 1 + day = date.today() + count = 5 + + update_cte = ( + visitors.update() + .where(and_(visitors.c.product_id == product_id, + visitors.c.date == day)) + .values(count=visitors.c.count + count) + .returning(literal(1)) + .cte('update_cte') + ) + + upsert = visitors.insert().from_select( + [visitors.c.product_id, visitors.c.date, visitors.c.count], + select([literal(product_id), literal(day), literal(count)]) + .where(~exists(update_cte.select())) + ) + + connection.execute(upsert) + + .. seealso:: + + :meth:`.orm.query.Query.cte` - ORM version of + :meth:`.HasCTE.cte`. + + """ + return CTE(self, name=name, recursive=recursive) + + +class FromGrouping(FromClause): + """Represent a grouping of a FROM clause""" + __visit_name__ = 'grouping' + + def __init__(self, element): + self.element = element + + def _init_collections(self): + pass + + @property + def columns(self): + return self.element.columns + + @property + def primary_key(self): + return self.element.primary_key + + @property + def foreign_keys(self): + return self.element.foreign_keys + + def is_derived_from(self, element): + return self.element.is_derived_from(element) + + def alias(self, **kw): + return FromGrouping(self.element.alias(**kw)) + + @property + def _hide_froms(self): + return self.element._hide_froms + + def get_children(self, **kwargs): + return self.element, + + def _copy_internals(self, clone=_clone, **kw): + self.element = clone(self.element, **kw) + + @property + def _from_objects(self): + return self.element._from_objects + + def __getattr__(self, attr): + return getattr(self.element, attr) + + def __getstate__(self): + return {'element': self.element} + + def __setstate__(self, state): + self.element = state['element'] + + +class TableClause(Immutable, FromClause): + """Represents a minimal "table" construct. + + This is a lightweight table object that has only a name and a + collection of columns, which are typically produced + by the :func:`.expression.column` function:: + + from sqlalchemy import table, column + + user = table("user", + column("id"), + column("name"), + column("description"), + ) + + The :class:`.TableClause` construct serves as the base for + the more commonly used :class:`~.schema.Table` object, providing + the usual set of :class:`~.expression.FromClause` services including + the ``.c.`` collection and statement generation methods. + + It does **not** provide all the additional schema-level services + of :class:`~.schema.Table`, including constraints, references to other + tables, or support for :class:`.MetaData`-level services. It's useful + on its own as an ad-hoc construct used to generate quick SQL + statements when a more fully fledged :class:`~.schema.Table` + is not on hand. + + """ + + __visit_name__ = 'table' + + named_with_column = True + + implicit_returning = False + """:class:`.TableClause` doesn't support having a primary key or column + -level defaults, so implicit returning doesn't apply.""" + + _autoincrement_column = None + """No PK or default support so no autoincrement column.""" + + def __init__(self, name, *columns): + """Produce a new :class:`.TableClause`. + + The object returned is an instance of :class:`.TableClause`, which + represents the "syntactical" portion of the schema-level + :class:`~.schema.Table` object. + It may be used to construct lightweight table constructs. + + .. versionchanged:: 1.0.0 :func:`.expression.table` can now + be imported from the plain ``sqlalchemy`` namespace like any + other SQL element. + + :param name: Name of the table. + + :param columns: A collection of :func:`.expression.column` constructs. + + """ + + super(TableClause, self).__init__() + self.name = self.fullname = name + self._columns = ColumnCollection() + self.primary_key = ColumnSet() + self.foreign_keys = set() + for c in columns: + self.append_column(c) + + def _init_collections(self): + pass + + @util.memoized_property + def description(self): + if util.py3k: + return self.name + else: + return self.name.encode('ascii', 'backslashreplace') + + def append_column(self, c): + self._columns[c.key] = c + c.table = self + + def get_children(self, column_collections=True, **kwargs): + if column_collections: + return [c for c in self.c] + else: + return [] + + @util.dependencies("sqlalchemy.sql.dml") + def insert(self, dml, values=None, inline=False, **kwargs): + """Generate an :func:`.insert` construct against this + :class:`.TableClause`. + + E.g.:: + + table.insert().values(name='foo') + + See :func:`.insert` for argument and usage information. + + """ + + return dml.Insert(self, values=values, inline=inline, **kwargs) + + @util.dependencies("sqlalchemy.sql.dml") + def update( + self, dml, whereclause=None, values=None, inline=False, **kwargs): + """Generate an :func:`.update` construct against this + :class:`.TableClause`. + + E.g.:: + + table.update().where(table.c.id==7).values(name='foo') + + See :func:`.update` for argument and usage information. + + """ + + return dml.Update(self, whereclause=whereclause, + values=values, inline=inline, **kwargs) + + @util.dependencies("sqlalchemy.sql.dml") + def delete(self, dml, whereclause=None, **kwargs): + """Generate a :func:`.delete` construct against this + :class:`.TableClause`. + + E.g.:: + + table.delete().where(table.c.id==7) + + See :func:`.delete` for argument and usage information. + + """ + + return dml.Delete(self, whereclause, **kwargs) + + @property + def _from_objects(self): + return [self] + + +class ForUpdateArg(ClauseElement): + + @classmethod + def parse_legacy_select(self, arg): + """Parse the for_update argument of :func:`.select`. + + :param mode: Defines the lockmode to use. + + ``None`` - translates to no lockmode + + ``'update'`` - translates to ``FOR UPDATE`` + (standard SQL, supported by most dialects) + + ``'nowait'`` - translates to ``FOR UPDATE NOWAIT`` + (supported by Oracle, PostgreSQL 8.1 upwards) + + ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), + and ``FOR SHARE`` (for PostgreSQL) + + ``'read_nowait'`` - translates to ``FOR SHARE NOWAIT`` + (supported by PostgreSQL). ``FOR SHARE`` and + ``FOR SHARE NOWAIT`` (PostgreSQL). + + """ + if arg in (None, False): + return None + + nowait = read = False + if arg == 'nowait': + nowait = True + elif arg == 'read': + read = True + elif arg == 'read_nowait': + read = nowait = True + elif arg is not True: + raise exc.ArgumentError("Unknown for_update argument: %r" % arg) + + return ForUpdateArg(read=read, nowait=nowait) + + @property + def legacy_for_update_value(self): + if self.read and not self.nowait: + return "read" + elif self.read and self.nowait: + return "read_nowait" + elif self.nowait: + return "nowait" + else: + return True + + def _copy_internals(self, clone=_clone, **kw): + if self.of is not None: + self.of = [clone(col, **kw) for col in self.of] + + def __init__( + self, nowait=False, read=False, of=None, + skip_locked=False, key_share=False): + """Represents arguments specified to :meth:`.Select.for_update`. + + .. versionadded:: 0.9.0 + + """ + + self.nowait = nowait + self.read = read + self.skip_locked = skip_locked + self.key_share = key_share + if of is not None: + self.of = [_interpret_as_column_or_from(elem) + for elem in util.to_list(of)] + else: + self.of = None + + +class SelectBase(HasCTE, Executable, FromClause): + """Base class for SELECT statements. + + + This includes :class:`.Select`, :class:`.CompoundSelect` and + :class:`.TextAsFrom`. + + + """ + + def as_scalar(self): + """return a 'scalar' representation of this selectable, which can be + used as a column expression. + + Typically, a select statement which has only one column in its columns + clause is eligible to be used as a scalar expression. + + The returned object is an instance of + :class:`ScalarSelect`. + + """ + return ScalarSelect(self) + + def label(self, name): + """return a 'scalar' representation of this selectable, embedded as a + subquery with a label. + + .. seealso:: + + :meth:`~.SelectBase.as_scalar`. + + """ + return self.as_scalar().label(name) + + @_generative + @util.deprecated('0.6', + message="``autocommit()`` is deprecated. Use " + ":meth:`.Executable.execution_options` with the " + "'autocommit' flag.") + def autocommit(self): + """return a new selectable with the 'autocommit' flag set to + True. + """ + + self._execution_options = \ + self._execution_options.union({'autocommit': True}) + + def _generate(self): + """Override the default _generate() method to also clear out + exported collections.""" + + s = self.__class__.__new__(self.__class__) + s.__dict__ = self.__dict__.copy() + s._reset_exported() + return s + + @property + def _from_objects(self): + return [self] + + +class GenerativeSelect(SelectBase): + """Base class for SELECT statements where additional elements can be + added. + + This serves as the base for :class:`.Select` and :class:`.CompoundSelect` + where elements such as ORDER BY, GROUP BY can be added and column + rendering can be controlled. Compare to :class:`.TextAsFrom`, which, + while it subclasses :class:`.SelectBase` and is also a SELECT construct, + represents a fixed textual string which cannot be altered at this level, + only wrapped as a subquery. + + .. versionadded:: 0.9.0 :class:`.GenerativeSelect` was added to + provide functionality specific to :class:`.Select` and + :class:`.CompoundSelect` while allowing :class:`.SelectBase` to be + used for other SELECT-like objects, e.g. :class:`.TextAsFrom`. + + """ + _order_by_clause = ClauseList() + _group_by_clause = ClauseList() + _limit_clause = None + _offset_clause = None + _for_update_arg = None + + def __init__(self, + use_labels=False, + for_update=False, + limit=None, + offset=None, + order_by=None, + group_by=None, + bind=None, + autocommit=None): + self.use_labels = use_labels + + if for_update is not False: + self._for_update_arg = (ForUpdateArg. + parse_legacy_select(for_update)) + + if autocommit is not None: + util.warn_deprecated('autocommit on select() is ' + 'deprecated. Use .execution_options(a' + 'utocommit=True)') + self._execution_options = \ + self._execution_options.union( + {'autocommit': autocommit}) + if limit is not None: + self._limit_clause = _offset_or_limit_clause(limit) + if offset is not None: + self._offset_clause = _offset_or_limit_clause(offset) + self._bind = bind + + if order_by is not None: + self._order_by_clause = ClauseList( + *util.to_list(order_by), + _literal_as_text=_literal_and_labels_as_label_reference) + if group_by is not None: + self._group_by_clause = ClauseList( + *util.to_list(group_by), + _literal_as_text=_literal_as_label_reference) + + @property + def for_update(self): + """Provide legacy dialect support for the ``for_update`` attribute. + """ + if self._for_update_arg is not None: + return self._for_update_arg.legacy_for_update_value + else: + return None + + @for_update.setter + def for_update(self, value): + self._for_update_arg = ForUpdateArg.parse_legacy_select(value) + + @_generative + def with_for_update(self, nowait=False, read=False, of=None, + skip_locked=False, key_share=False): + """Specify a ``FOR UPDATE`` clause for this :class:`.GenerativeSelect`. + + E.g.:: + + stmt = select([table]).with_for_update(nowait=True) + + On a database like PostgreSQL or Oracle, the above would render a + statement like:: + + SELECT table.a, table.b FROM table FOR UPDATE NOWAIT + + on other backends, the ``nowait`` option is ignored and instead + would produce:: + + SELECT table.a, table.b FROM table FOR UPDATE + + When called with no arguments, the statement will render with + the suffix ``FOR UPDATE``. Additional arguments can then be + provided which allow for common database-specific + variants. + + :param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle + and PostgreSQL dialects. + + :param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL, + ``FOR SHARE`` on PostgreSQL. On PostgreSQL, when combined with + ``nowait``, will render ``FOR SHARE NOWAIT``. + + :param of: SQL expression or list of SQL expression elements + (typically :class:`.Column` objects or a compatible expression) which + will render into a ``FOR UPDATE OF`` clause; supported by PostgreSQL + and Oracle. May render as a table or as a column depending on + backend. + + :param skip_locked: boolean, will render ``FOR UPDATE SKIP LOCKED`` + on Oracle and PostgreSQL dialects or ``FOR SHARE SKIP LOCKED`` if + ``read=True`` is also specified. + + .. versionadded:: 1.1.0 + + :param key_share: boolean, will render ``FOR NO KEY UPDATE``, + or if combined with ``read=True`` will render ``FOR KEY SHARE``, + on the PostgreSQL dialect. + + .. versionadded:: 1.1.0 + + """ + self._for_update_arg = ForUpdateArg(nowait=nowait, read=read, of=of, + skip_locked=skip_locked, + key_share=key_share) + + @_generative + def apply_labels(self): + """return a new selectable with the 'use_labels' flag set to True. + + This will result in column expressions being generated using labels + against their table name, such as "SELECT somecolumn AS + tablename_somecolumn". This allows selectables which contain multiple + FROM clauses to produce a unique set of column names regardless of + name conflicts among the individual FROM clauses. + + """ + self.use_labels = True + + @property + def _limit(self): + """Get an integer value for the limit. This should only be used + by code that cannot support a limit as a BindParameter or + other custom clause as it will throw an exception if the limit + isn't currently set to an integer. + + """ + return _offset_or_limit_clause_asint(self._limit_clause, "limit") + + @property + def _simple_int_limit(self): + """True if the LIMIT clause is a simple integer, False + if it is not present or is a SQL expression. + """ + return isinstance(self._limit_clause, _OffsetLimitParam) + + @property + def _simple_int_offset(self): + """True if the OFFSET clause is a simple integer, False + if it is not present or is a SQL expression. + """ + return isinstance(self._offset_clause, _OffsetLimitParam) + + @property + def _offset(self): + """Get an integer value for the offset. This should only be used + by code that cannot support an offset as a BindParameter or + other custom clause as it will throw an exception if the + offset isn't currently set to an integer. + + """ + return _offset_or_limit_clause_asint(self._offset_clause, "offset") + + @_generative + def limit(self, limit): + """return a new selectable with the given LIMIT criterion + applied. + + This is a numerical value which usually renders as a ``LIMIT`` + expression in the resulting select. Backends that don't + support ``LIMIT`` will attempt to provide similar + functionality. + + .. versionchanged:: 1.0.0 - :meth:`.Select.limit` can now + accept arbitrary SQL expressions as well as integer values. + + :param limit: an integer LIMIT parameter, or a SQL expression + that provides an integer result. + + """ + + self._limit_clause = _offset_or_limit_clause(limit) + + @_generative + def offset(self, offset): + """return a new selectable with the given OFFSET criterion + applied. + + + This is a numeric value which usually renders as an ``OFFSET`` + expression in the resulting select. Backends that don't + support ``OFFSET`` will attempt to provide similar + functionality. + + + .. versionchanged:: 1.0.0 - :meth:`.Select.offset` can now + accept arbitrary SQL expressions as well as integer values. + + :param offset: an integer OFFSET parameter, or a SQL expression + that provides an integer result. + + """ + + self._offset_clause = _offset_or_limit_clause(offset) + + @_generative + def order_by(self, *clauses): + """return a new selectable with the given list of ORDER BY + criterion applied. + + The criterion will be appended to any pre-existing ORDER BY + criterion. + + """ + + self.append_order_by(*clauses) + + @_generative + def group_by(self, *clauses): + """return a new selectable with the given list of GROUP BY + criterion applied. + + The criterion will be appended to any pre-existing GROUP BY + criterion. + + """ + + self.append_group_by(*clauses) + + def append_order_by(self, *clauses): + """Append the given ORDER BY criterion applied to this selectable. + + The criterion will be appended to any pre-existing ORDER BY criterion. + + This is an **in-place** mutation method; the + :meth:`~.GenerativeSelect.order_by` method is preferred, as it + provides standard :term:`method chaining`. + + """ + if len(clauses) == 1 and clauses[0] is None: + self._order_by_clause = ClauseList() + else: + if getattr(self, '_order_by_clause', None) is not None: + clauses = list(self._order_by_clause) + list(clauses) + self._order_by_clause = ClauseList( + *clauses, + _literal_as_text=_literal_and_labels_as_label_reference) + + def append_group_by(self, *clauses): + """Append the given GROUP BY criterion applied to this selectable. + + The criterion will be appended to any pre-existing GROUP BY criterion. + + This is an **in-place** mutation method; the + :meth:`~.GenerativeSelect.group_by` method is preferred, as it + provides standard :term:`method chaining`. + + """ + if len(clauses) == 1 and clauses[0] is None: + self._group_by_clause = ClauseList() + else: + if getattr(self, '_group_by_clause', None) is not None: + clauses = list(self._group_by_clause) + list(clauses) + self._group_by_clause = ClauseList( + *clauses, _literal_as_text=_literal_as_label_reference) + + @property + def _label_resolve_dict(self): + raise NotImplementedError() + + def _copy_internals(self, clone=_clone, **kw): + if self._limit_clause is not None: + self._limit_clause = clone(self._limit_clause, **kw) + if self._offset_clause is not None: + self._offset_clause = clone(self._offset_clause, **kw) + + +class CompoundSelect(GenerativeSelect): + """Forms the basis of ``UNION``, ``UNION ALL``, and other + SELECT-based set operations. + + + .. seealso:: + + :func:`.union` + + :func:`.union_all` + + :func:`.intersect` + + :func:`.intersect_all` + + :func:`.except` + + :func:`.except_all` + + """ + + __visit_name__ = 'compound_select' + + UNION = util.symbol('UNION') + UNION_ALL = util.symbol('UNION ALL') + EXCEPT = util.symbol('EXCEPT') + EXCEPT_ALL = util.symbol('EXCEPT ALL') + INTERSECT = util.symbol('INTERSECT') + INTERSECT_ALL = util.symbol('INTERSECT ALL') + + _is_from_container = True + + def __init__(self, keyword, *selects, **kwargs): + self._auto_correlate = kwargs.pop('correlate', False) + self.keyword = keyword + self.selects = [] + + numcols = None + + # some DBs do not like ORDER BY in the inner queries of a UNION, etc. + for n, s in enumerate(selects): + s = _clause_element_as_expr(s) + + if not numcols: + numcols = len(s.c._all_columns) + elif len(s.c._all_columns) != numcols: + raise exc.ArgumentError( + 'All selectables passed to ' + 'CompoundSelect must have identical numbers of ' + 'columns; select #%d has %d columns, select ' + '#%d has %d' % + (1, len(self.selects[0].c._all_columns), + n + 1, len(s.c._all_columns)) + ) + + self.selects.append(s.self_group(self)) + + GenerativeSelect.__init__(self, **kwargs) + + @property + def _label_resolve_dict(self): + d = dict( + (c.key, c) for c in self.c + ) + return d, d, d + + @classmethod + def _create_union(cls, *selects, **kwargs): + r"""Return a ``UNION`` of multiple selectables. + + The returned object is an instance of + :class:`.CompoundSelect`. + + A similar :func:`union()` method is available on all + :class:`.FromClause` subclasses. + + \*selects + a list of :class:`.Select` instances. + + \**kwargs + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs) + + @classmethod + def _create_union_all(cls, *selects, **kwargs): + r"""Return a ``UNION ALL`` of multiple selectables. + + The returned object is an instance of + :class:`.CompoundSelect`. + + A similar :func:`union_all()` method is available on all + :class:`.FromClause` subclasses. + + \*selects + a list of :class:`.Select` instances. + + \**kwargs + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) + + @classmethod + def _create_except(cls, *selects, **kwargs): + r"""Return an ``EXCEPT`` of multiple selectables. + + The returned object is an instance of + :class:`.CompoundSelect`. + + \*selects + a list of :class:`.Select` instances. + + \**kwargs + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs) + + @classmethod + def _create_except_all(cls, *selects, **kwargs): + r"""Return an ``EXCEPT ALL`` of multiple selectables. + + The returned object is an instance of + :class:`.CompoundSelect`. + + \*selects + a list of :class:`.Select` instances. + + \**kwargs + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs) + + @classmethod + def _create_intersect(cls, *selects, **kwargs): + r"""Return an ``INTERSECT`` of multiple selectables. + + The returned object is an instance of + :class:`.CompoundSelect`. + + \*selects + a list of :class:`.Select` instances. + + \**kwargs + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs) + + @classmethod + def _create_intersect_all(cls, *selects, **kwargs): + r"""Return an ``INTERSECT ALL`` of multiple selectables. + + The returned object is an instance of + :class:`.CompoundSelect`. + + \*selects + a list of :class:`.Select` instances. + + \**kwargs + available keyword arguments are the same as those of + :func:`select`. + + """ + return CompoundSelect( + CompoundSelect.INTERSECT_ALL, *selects, **kwargs) + + def _scalar_type(self): + return self.selects[0]._scalar_type() + + def self_group(self, against=None): + return FromGrouping(self) + + def is_derived_from(self, fromclause): + for s in self.selects: + if s.is_derived_from(fromclause): + return True + return False + + def _populate_column_collection(self): + for cols in zip(*[s.c._all_columns for s in self.selects]): + + # this is a slightly hacky thing - the union exports a + # column that resembles just that of the *first* selectable. + # to get at a "composite" column, particularly foreign keys, + # you have to dig through the proxies collection which we + # generate below. We may want to improve upon this, such as + # perhaps _make_proxy can accept a list of other columns + # that are "shared" - schema.column can then copy all the + # ForeignKeys in. this would allow the union() to have all + # those fks too. + + proxy = cols[0]._make_proxy( + self, name=cols[0]._label if self.use_labels else None, + key=cols[0]._key_label if self.use_labels else None) + + # hand-construct the "_proxies" collection to include all + # derived columns place a 'weight' annotation corresponding + # to how low in the list of select()s the column occurs, so + # that the corresponding_column() operation can resolve + # conflicts + + proxy._proxies = [ + c._annotate({'weight': i + 1}) for (i, c) in enumerate(cols)] + + def _refresh_for_new_column(self, column): + for s in self.selects: + s._refresh_for_new_column(column) + + if not self._cols_populated: + return None + + raise NotImplementedError("CompoundSelect constructs don't support " + "addition of columns to underlying " + "selectables") + + def _copy_internals(self, clone=_clone, **kw): + super(CompoundSelect, self)._copy_internals(clone, **kw) + self._reset_exported() + self.selects = [clone(s, **kw) for s in self.selects] + if hasattr(self, '_col_map'): + del self._col_map + for attr in ( + '_order_by_clause', '_group_by_clause', '_for_update_arg'): + if getattr(self, attr) is not None: + setattr(self, attr, clone(getattr(self, attr), **kw)) + + def get_children(self, column_collections=True, **kwargs): + return (column_collections and list(self.c) or []) \ + + [self._order_by_clause, self._group_by_clause] \ + + list(self.selects) + + def bind(self): + if self._bind: + return self._bind + for s in self.selects: + e = s.bind + if e: + return e + else: + return None + + def _set_bind(self, bind): + self._bind = bind + bind = property(bind, _set_bind) + + +class Select(HasPrefixes, HasSuffixes, GenerativeSelect): + """Represents a ``SELECT`` statement. + + """ + + __visit_name__ = 'select' + + _prefixes = () + _suffixes = () + _hints = util.immutabledict() + _statement_hints = () + _distinct = False + _from_cloned = None + _correlate = () + _correlate_except = None + _memoized_property = SelectBase._memoized_property + _is_select = True + + def __init__(self, + columns=None, + whereclause=None, + from_obj=None, + distinct=False, + having=None, + correlate=True, + prefixes=None, + suffixes=None, + **kwargs): + """Construct a new :class:`.Select`. + + Similar functionality is also available via the + :meth:`.FromClause.select` method on any :class:`.FromClause`. + + All arguments which accept :class:`.ClauseElement` arguments also + accept string arguments, which will be converted as appropriate into + either :func:`text()` or :func:`literal_column()` constructs. + + .. seealso:: + + :ref:`coretutorial_selecting` - Core Tutorial description of + :func:`.select`. + + :param columns: + A list of :class:`.ColumnElement` or :class:`.FromClause` + objects which will form the columns clause of the resulting + statement. For those objects that are instances of + :class:`.FromClause` (typically :class:`.Table` or :class:`.Alias` + objects), the :attr:`.FromClause.c` collection is extracted + to form a collection of :class:`.ColumnElement` objects. + + This parameter will also accept :class:`.Text` constructs as + given, as well as ORM-mapped classes. + + .. note:: + + The :paramref:`.select.columns` parameter is not available + in the method form of :func:`.select`, e.g. + :meth:`.FromClause.select`. + + .. seealso:: + + :meth:`.Select.column` + + :meth:`.Select.with_only_columns` + + :param whereclause: + A :class:`.ClauseElement` expression which will be used to form the + ``WHERE`` clause. It is typically preferable to add WHERE + criterion to an existing :class:`.Select` using method chaining + with :meth:`.Select.where`. + + .. seealso:: + + :meth:`.Select.where` + + :param from_obj: + A list of :class:`.ClauseElement` objects which will be added to the + ``FROM`` clause of the resulting statement. This is equivalent + to calling :meth:`.Select.select_from` using method chaining on + an existing :class:`.Select` object. + + .. seealso:: + + :meth:`.Select.select_from` - full description of explicit + FROM clause specification. + + :param autocommit: + Deprecated. Use ``.execution_options(autocommit=)`` + to set the autocommit option. + + .. seealso:: + + :meth:`.Executable.execution_options` + + :param bind=None: + an :class:`~.Engine` or :class:`~.Connection` instance + to which the + resulting :class:`.Select` object will be bound. The + :class:`.Select` object will otherwise automatically bind to + whatever :class:`~.base.Connectable` instances can be located within + its contained :class:`.ClauseElement` members. + + :param correlate=True: + indicates that this :class:`.Select` object should have its + contained :class:`.FromClause` elements "correlated" to an enclosing + :class:`.Select` object. It is typically preferable to specify + correlations on an existing :class:`.Select` construct using + :meth:`.Select.correlate`. + + .. seealso:: + + :meth:`.Select.correlate` - full description of correlation. + + :param distinct=False: + when ``True``, applies a ``DISTINCT`` qualifier to the columns + clause of the resulting statement. + + The boolean argument may also be a column expression or list + of column expressions - this is a special calling form which + is understood by the PostgreSQL dialect to render the + ``DISTINCT ON ()`` syntax. + + ``distinct`` is also available on an existing :class:`.Select` + object via the :meth:`~.Select.distinct` method. + + .. seealso:: + + :meth:`.Select.distinct` + + :param for_update=False: + when ``True``, applies ``FOR UPDATE`` to the end of the + resulting statement. + + .. deprecated:: 0.9.0 - use + :meth:`.Select.with_for_update` to specify the + structure of the ``FOR UPDATE`` clause. + + ``for_update`` accepts various string values interpreted by + specific backends, including: + + * ``"read"`` - on MySQL, translates to ``LOCK IN SHARE MODE``; + on PostgreSQL, translates to ``FOR SHARE``. + * ``"nowait"`` - on PostgreSQL and Oracle, translates to + ``FOR UPDATE NOWAIT``. + * ``"read_nowait"`` - on PostgreSQL, translates to + ``FOR SHARE NOWAIT``. + + .. seealso:: + + :meth:`.Select.with_for_update` - improved API for + specifying the ``FOR UPDATE`` clause. + + :param group_by: + a list of :class:`.ClauseElement` objects which will comprise the + ``GROUP BY`` clause of the resulting select. This parameter + is typically specified more naturally using the + :meth:`.Select.group_by` method on an existing :class:`.Select`. + + .. seealso:: + + :meth:`.Select.group_by` + + :param having: + a :class:`.ClauseElement` that will comprise the ``HAVING`` clause + of the resulting select when ``GROUP BY`` is used. This parameter + is typically specified more naturally using the + :meth:`.Select.having` method on an existing :class:`.Select`. + + .. seealso:: + + :meth:`.Select.having` + + :param limit=None: + a numerical value which usually renders as a ``LIMIT`` + expression in the resulting select. Backends that don't + support ``LIMIT`` will attempt to provide similar + functionality. This parameter is typically specified more naturally + using the :meth:`.Select.limit` method on an existing + :class:`.Select`. + + .. seealso:: + + :meth:`.Select.limit` + + :param offset=None: + a numeric value which usually renders as an ``OFFSET`` + expression in the resulting select. Backends that don't + support ``OFFSET`` will attempt to provide similar + functionality. This parameter is typically specified more naturally + using the :meth:`.Select.offset` method on an existing + :class:`.Select`. + + .. seealso:: + + :meth:`.Select.offset` + + :param order_by: + a scalar or list of :class:`.ClauseElement` objects which will + comprise the ``ORDER BY`` clause of the resulting select. + This parameter is typically specified more naturally using the + :meth:`.Select.order_by` method on an existing :class:`.Select`. + + .. seealso:: + + :meth:`.Select.order_by` + + :param use_labels=False: + when ``True``, the statement will be generated using labels + for each column in the columns clause, which qualify each + column with its parent table's (or aliases) name so that name + conflicts between columns in different tables don't occur. + The format of the label is _. The "c" + collection of the resulting :class:`.Select` object will use these + names as well for targeting column members. + + This parameter can also be specified on an existing + :class:`.Select` object using the :meth:`.Select.apply_labels` + method. + + .. seealso:: + + :meth:`.Select.apply_labels` + + """ + self._auto_correlate = correlate + if distinct is not False: + if distinct is True: + self._distinct = True + else: + self._distinct = [ + _literal_as_text(e) + for e in util.to_list(distinct) + ] + + if from_obj is not None: + self._from_obj = util.OrderedSet( + _interpret_as_from(f) + for f in util.to_list(from_obj)) + else: + self._from_obj = util.OrderedSet() + + try: + cols_present = bool(columns) + except TypeError: + raise exc.ArgumentError("columns argument to select() must " + "be a Python list or other iterable") + + if cols_present: + self._raw_columns = [] + for c in columns: + c = _interpret_as_column_or_from(c) + if isinstance(c, ScalarSelect): + c = c.self_group(against=operators.comma_op) + self._raw_columns.append(c) + else: + self._raw_columns = [] + + if whereclause is not None: + self._whereclause = _literal_as_text( + whereclause).self_group(against=operators._asbool) + else: + self._whereclause = None + + if having is not None: + self._having = _literal_as_text( + having).self_group(against=operators._asbool) + else: + self._having = None + + if prefixes: + self._setup_prefixes(prefixes) + + if suffixes: + self._setup_suffixes(suffixes) + + GenerativeSelect.__init__(self, **kwargs) + + @property + def _froms(self): + # would love to cache this, + # but there's just enough edge cases, particularly now that + # declarative encourages construction of SQL expressions + # without tables present, to just regen this each time. + froms = [] + seen = set() + translate = self._from_cloned + + for item in itertools.chain( + _from_objects(*self._raw_columns), + _from_objects(self._whereclause) + if self._whereclause is not None else (), + self._from_obj + ): + if item is self: + raise exc.InvalidRequestError( + "select() construct refers to itself as a FROM") + if translate and item in translate: + item = translate[item] + if not seen.intersection(item._cloned_set): + froms.append(item) + seen.update(item._cloned_set) + + return froms + + def _get_display_froms(self, explicit_correlate_froms=None, + implicit_correlate_froms=None): + """Return the full list of 'from' clauses to be displayed. + + Takes into account a set of existing froms which may be + rendered in the FROM clause of enclosing selects; this Select + may want to leave those absent if it is automatically + correlating. + + """ + froms = self._froms + + toremove = set(itertools.chain(*[ + _expand_cloned(f._hide_froms) + for f in froms])) + if toremove: + # if we're maintaining clones of froms, + # add the copies out to the toremove list. only include + # clones that are lexical equivalents. + if self._from_cloned: + toremove.update( + self._from_cloned[f] for f in + toremove.intersection(self._from_cloned) + if self._from_cloned[f]._is_lexical_equivalent(f) + ) + # filter out to FROM clauses not in the list, + # using a list to maintain ordering + froms = [f for f in froms if f not in toremove] + + if self._correlate: + to_correlate = self._correlate + if to_correlate: + froms = [ + f for f in froms if f not in + _cloned_intersection( + _cloned_intersection( + froms, explicit_correlate_froms or ()), + to_correlate + ) + ] + + if self._correlate_except is not None: + + froms = [ + f for f in froms if f not in + _cloned_difference( + _cloned_intersection( + froms, explicit_correlate_froms or ()), + self._correlate_except + ) + ] + + if self._auto_correlate and \ + implicit_correlate_froms and \ + len(froms) > 1: + + froms = [ + f for f in froms if f not in + _cloned_intersection(froms, implicit_correlate_froms) + ] + + if not len(froms): + raise exc.InvalidRequestError("Select statement '%s" + "' returned no FROM clauses " + "due to auto-correlation; " + "specify correlate() " + "to control correlation " + "manually." % self) + + return froms + + def _scalar_type(self): + elem = self._raw_columns[0] + cols = list(elem._select_iterable) + return cols[0].type + + @property + def froms(self): + """Return the displayed list of FromClause elements.""" + + return self._get_display_froms() + + def with_statement_hint(self, text, dialect_name='*'): + """add a statement hint to this :class:`.Select`. + + This method is similar to :meth:`.Select.with_hint` except that + it does not require an individual table, and instead applies to the + statement as a whole. + + Hints here are specific to the backend database and may include + directives such as isolation levels, file directives, fetch directives, + etc. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.Select.with_hint` + + """ + return self.with_hint(None, text, dialect_name) + + @_generative + def with_hint(self, selectable, text, dialect_name='*'): + r"""Add an indexing or other executional context hint for the given + selectable to this :class:`.Select`. + + The text of the hint is rendered in the appropriate + location for the database backend in use, relative + to the given :class:`.Table` or :class:`.Alias` passed as the + ``selectable`` argument. The dialect implementation + typically uses Python string substitution syntax + with the token ``%(name)s`` to render the name of + the table or alias. E.g. when using Oracle, the + following:: + + select([mytable]).\ + with_hint(mytable, "index(%(name)s ix_mytable)") + + Would render SQL as:: + + select /*+ index(mytable ix_mytable) */ ... from mytable + + The ``dialect_name`` option will limit the rendering of a particular + hint to a particular backend. Such as, to add hints for both Oracle + and Sybase simultaneously:: + + select([mytable]).\ + with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\ + with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') + + .. seealso:: + + :meth:`.Select.with_statement_hint` + + """ + if selectable is None: + self._statement_hints += ((dialect_name, text), ) + else: + self._hints = self._hints.union( + {(selectable, dialect_name): text}) + + @property + def type(self): + raise exc.InvalidRequestError("Select objects don't have a type. " + "Call as_scalar() on this Select " + "object to return a 'scalar' version " + "of this Select.") + + @_memoized_property.method + def locate_all_froms(self): + """return a Set of all FromClause elements referenced by this Select. + + This set is a superset of that returned by the ``froms`` property, + which is specifically for those FromClause elements that would + actually be rendered. + + """ + froms = self._froms + return froms + list(_from_objects(*froms)) + + @property + def inner_columns(self): + """an iterator of all ColumnElement expressions which would + be rendered into the columns clause of the resulting SELECT statement. + + """ + return _select_iterables(self._raw_columns) + + @_memoized_property + def _label_resolve_dict(self): + with_cols = dict( + (c._resolve_label or c._label or c.key, c) + for c in _select_iterables(self._raw_columns) + if c._allow_label_resolve) + only_froms = dict( + (c.key, c) for c in + _select_iterables(self.froms) if c._allow_label_resolve) + only_cols = with_cols.copy() + for key, value in only_froms.items(): + with_cols.setdefault(key, value) + + return with_cols, only_froms, only_cols + + def is_derived_from(self, fromclause): + if self in fromclause._cloned_set: + return True + + for f in self.locate_all_froms(): + if f.is_derived_from(fromclause): + return True + return False + + def _copy_internals(self, clone=_clone, **kw): + super(Select, self)._copy_internals(clone, **kw) + + # Select() object has been cloned and probably adapted by the + # given clone function. Apply the cloning function to internal + # objects + + # 1. keep a dictionary of the froms we've cloned, and what + # they've become. This is consulted later when we derive + # additional froms from "whereclause" and the columns clause, + # which may still reference the uncloned parent table. + # as of 0.7.4 we also put the current version of _froms, which + # gets cleared on each generation. previously we were "baking" + # _froms into self._from_obj. + self._from_cloned = from_cloned = dict( + (f, clone(f, **kw)) for f in self._from_obj.union(self._froms)) + + # 3. update persistent _from_obj with the cloned versions. + self._from_obj = util.OrderedSet(from_cloned[f] for f in + self._from_obj) + + # the _correlate collection is done separately, what can happen + # here is the same item is _correlate as in _from_obj but the + # _correlate version has an annotation on it - (specifically + # RelationshipProperty.Comparator._criterion_exists() does + # this). Also keep _correlate liberally open with its previous + # contents, as this set is used for matching, not rendering. + self._correlate = set(clone(f) for f in + self._correlate).union(self._correlate) + + # 4. clone other things. The difficulty here is that Column + # objects are not actually cloned, and refer to their original + # .table, resulting in the wrong "from" parent after a clone + # operation. Hence _from_cloned and _from_obj supersede what is + # present here. + self._raw_columns = [clone(c, **kw) for c in self._raw_columns] + for attr in '_whereclause', '_having', '_order_by_clause', \ + '_group_by_clause', '_for_update_arg': + if getattr(self, attr) is not None: + setattr(self, attr, clone(getattr(self, attr), **kw)) + + # erase exported column list, _froms collection, + # etc. + self._reset_exported() + + def get_children(self, column_collections=True, **kwargs): + """return child elements as per the ClauseElement specification.""" + + return (column_collections and list(self.columns) or []) + \ + self._raw_columns + list(self._froms) + \ + [x for x in + (self._whereclause, self._having, + self._order_by_clause, self._group_by_clause) + if x is not None] + + @_generative + def column(self, column): + """return a new select() construct with the given column expression + added to its columns clause. + + """ + self.append_column(column) + + @util.dependencies("sqlalchemy.sql.util") + def reduce_columns(self, sqlutil, only_synonyms=True): + """Return a new :func`.select` construct with redundantly + named, equivalently-valued columns removed from the columns clause. + + "Redundant" here means two columns where one refers to the + other either based on foreign key, or via a simple equality + comparison in the WHERE clause of the statement. The primary purpose + of this method is to automatically construct a select statement + with all uniquely-named columns, without the need to use + table-qualified labels as :meth:`.apply_labels` does. + + When columns are omitted based on foreign key, the referred-to + column is the one that's kept. When columns are omitted based on + WHERE eqivalence, the first column in the columns clause is the + one that's kept. + + :param only_synonyms: when True, limit the removal of columns + to those which have the same name as the equivalent. Otherwise, + all columns that are equivalent to another are removed. + + .. versionadded:: 0.8 + + """ + return self.with_only_columns( + sqlutil.reduce_columns( + self.inner_columns, + only_synonyms=only_synonyms, + *(self._whereclause, ) + tuple(self._from_obj) + ) + ) + + @_generative + def with_only_columns(self, columns): + r"""Return a new :func:`.select` construct with its columns + clause replaced with the given columns. + + .. versionchanged:: 0.7.3 + Due to a bug fix, this method has a slight + behavioral change as of version 0.7.3. + Prior to version 0.7.3, the FROM clause of + a :func:`.select` was calculated upfront and as new columns + were added; in 0.7.3 and later it's calculated + at compile time, fixing an issue regarding late binding + of columns to parent tables. This changes the behavior of + :meth:`.Select.with_only_columns` in that FROM clauses no + longer represented in the new list are dropped, + but this behavior is more consistent in + that the FROM clauses are consistently derived from the + current columns clause. The original intent of this method + is to allow trimming of the existing columns list to be fewer + columns than originally present; the use case of replacing + the columns list with an entirely different one hadn't + been anticipated until 0.7.3 was released; the usage + guidelines below illustrate how this should be done. + + This method is exactly equivalent to as if the original + :func:`.select` had been called with the given columns + clause. I.e. a statement:: + + s = select([table1.c.a, table1.c.b]) + s = s.with_only_columns([table1.c.b]) + + should be exactly equivalent to:: + + s = select([table1.c.b]) + + This means that FROM clauses which are only derived + from the column list will be discarded if the new column + list no longer contains that FROM:: + + >>> table1 = table('t1', column('a'), column('b')) + >>> table2 = table('t2', column('a'), column('b')) + >>> s1 = select([table1.c.a, table2.c.b]) + >>> print s1 + SELECT t1.a, t2.b FROM t1, t2 + >>> s2 = s1.with_only_columns([table2.c.b]) + >>> print s2 + SELECT t2.b FROM t1 + + The preferred way to maintain a specific FROM clause + in the construct, assuming it won't be represented anywhere + else (i.e. not in the WHERE clause, etc.) is to set it using + :meth:`.Select.select_from`:: + + >>> s1 = select([table1.c.a, table2.c.b]).\ + ... select_from(table1.join(table2, + ... table1.c.a==table2.c.a)) + >>> s2 = s1.with_only_columns([table2.c.b]) + >>> print s2 + SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a + + Care should also be taken to use the correct + set of column objects passed to :meth:`.Select.with_only_columns`. + Since the method is essentially equivalent to calling the + :func:`.select` construct in the first place with the given + columns, the columns passed to :meth:`.Select.with_only_columns` + should usually be a subset of those which were passed + to the :func:`.select` construct, not those which are available + from the ``.c`` collection of that :func:`.select`. That + is:: + + s = select([table1.c.a, table1.c.b]).select_from(table1) + s = s.with_only_columns([table1.c.b]) + + and **not**:: + + # usually incorrect + s = s.with_only_columns([s.c.b]) + + The latter would produce the SQL:: + + SELECT b + FROM (SELECT t1.a AS a, t1.b AS b + FROM t1), t1 + + Since the :func:`.select` construct is essentially being + asked to select both from ``table1`` as well as itself. + + """ + self._reset_exported() + rc = [] + for c in columns: + c = _interpret_as_column_or_from(c) + if isinstance(c, ScalarSelect): + c = c.self_group(against=operators.comma_op) + rc.append(c) + self._raw_columns = rc + + @_generative + def where(self, whereclause): + """return a new select() construct with the given expression added to + its WHERE clause, joined to the existing clause via AND, if any. + + """ + + self.append_whereclause(whereclause) + + @_generative + def having(self, having): + """return a new select() construct with the given expression added to + its HAVING clause, joined to the existing clause via AND, if any. + + """ + self.append_having(having) + + @_generative + def distinct(self, *expr): + r"""Return a new select() construct which will apply DISTINCT to its + columns clause. + + :param \*expr: optional column expressions. When present, + the PostgreSQL dialect will render a ``DISTINCT ON (>)`` + construct. + + """ + if expr: + expr = [_literal_as_label_reference(e) for e in expr] + if isinstance(self._distinct, list): + self._distinct = self._distinct + expr + else: + self._distinct = expr + else: + self._distinct = True + + @_generative + def select_from(self, fromclause): + r"""return a new :func:`.select` construct with the + given FROM expression + merged into its list of FROM objects. + + E.g.:: + + table1 = table('t1', column('a')) + table2 = table('t2', column('b')) + s = select([table1.c.a]).\ + select_from( + table1.join(table2, table1.c.a==table2.c.b) + ) + + The "from" list is a unique set on the identity of each element, + so adding an already present :class:`.Table` or other selectable + will have no effect. Passing a :class:`.Join` that refers + to an already present :class:`.Table` or other selectable will have + the effect of concealing the presence of that selectable as + an individual element in the rendered FROM list, instead + rendering it into a JOIN clause. + + While the typical purpose of :meth:`.Select.select_from` is to + replace the default, derived FROM clause with a join, it can + also be called with individual table elements, multiple times + if desired, in the case that the FROM clause cannot be fully + derived from the columns clause:: + + select([func.count('*')]).select_from(table1) + + """ + self.append_from(fromclause) + + @_generative + def correlate(self, *fromclauses): + r"""return a new :class:`.Select` which will correlate the given FROM + clauses to that of an enclosing :class:`.Select`. + + Calling this method turns off the :class:`.Select` object's + default behavior of "auto-correlation". Normally, FROM elements + which appear in a :class:`.Select` that encloses this one via + its :term:`WHERE clause`, ORDER BY, HAVING or + :term:`columns clause` will be omitted from this :class:`.Select` + object's :term:`FROM clause`. + Setting an explicit correlation collection using the + :meth:`.Select.correlate` method provides a fixed list of FROM objects + that can potentially take place in this process. + + When :meth:`.Select.correlate` is used to apply specific FROM clauses + for correlation, the FROM elements become candidates for + correlation regardless of how deeply nested this :class:`.Select` + object is, relative to an enclosing :class:`.Select` which refers to + the same FROM object. This is in contrast to the behavior of + "auto-correlation" which only correlates to an immediate enclosing + :class:`.Select`. Multi-level correlation ensures that the link + between enclosed and enclosing :class:`.Select` is always via + at least one WHERE/ORDER BY/HAVING/columns clause in order for + correlation to take place. + + If ``None`` is passed, the :class:`.Select` object will correlate + none of its FROM entries, and all will render unconditionally + in the local FROM clause. + + :param \*fromclauses: a list of one or more :class:`.FromClause` + constructs, or other compatible constructs (i.e. ORM-mapped + classes) to become part of the correlate collection. + + .. versionchanged:: 0.8.0 ORM-mapped classes are accepted by + :meth:`.Select.correlate`. + + .. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no + longer unconditionally removes entries from the FROM clause; + instead, the candidate FROM entries must also be matched by a FROM + entry located in an enclosing :class:`.Select`, which ultimately + encloses this one as present in the WHERE clause, ORDER BY clause, + HAVING clause, or columns clause of an enclosing :meth:`.Select`. + + .. versionchanged:: 0.8.2 explicit correlation takes place + via any level of nesting of :class:`.Select` objects; in previous + 0.8 versions, correlation would only occur relative to the + immediate enclosing :class:`.Select` construct. + + .. seealso:: + + :meth:`.Select.correlate_except` + + :ref:`correlated_subqueries` + + """ + self._auto_correlate = False + if fromclauses and fromclauses[0] is None: + self._correlate = () + else: + self._correlate = set(self._correlate).union( + _interpret_as_from(f) for f in fromclauses) + + @_generative + def correlate_except(self, *fromclauses): + r"""return a new :class:`.Select` which will omit the given FROM + clauses from the auto-correlation process. + + Calling :meth:`.Select.correlate_except` turns off the + :class:`.Select` object's default behavior of + "auto-correlation" for the given FROM elements. An element + specified here will unconditionally appear in the FROM list, while + all other FROM elements remain subject to normal auto-correlation + behaviors. + + .. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except` + method was improved to fully prevent FROM clauses specified here + from being omitted from the immediate FROM clause of this + :class:`.Select`. + + If ``None`` is passed, the :class:`.Select` object will correlate + all of its FROM entries. + + .. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will + correctly auto-correlate all FROM clauses. + + :param \*fromclauses: a list of one or more :class:`.FromClause` + constructs, or other compatible constructs (i.e. ORM-mapped + classes) to become part of the correlate-exception collection. + + .. seealso:: + + :meth:`.Select.correlate` + + :ref:`correlated_subqueries` + + """ + + self._auto_correlate = False + if fromclauses and fromclauses[0] is None: + self._correlate_except = () + else: + self._correlate_except = set(self._correlate_except or ()).union( + _interpret_as_from(f) for f in fromclauses) + + def append_correlation(self, fromclause): + """append the given correlation expression to this select() + construct. + + This is an **in-place** mutation method; the + :meth:`~.Select.correlate` method is preferred, as it provides + standard :term:`method chaining`. + + """ + + self._auto_correlate = False + self._correlate = set(self._correlate).union( + _interpret_as_from(f) for f in fromclause) + + def append_column(self, column): + """append the given column expression to the columns clause of this + select() construct. + + This is an **in-place** mutation method; the + :meth:`~.Select.column` method is preferred, as it provides standard + :term:`method chaining`. + + """ + self._reset_exported() + column = _interpret_as_column_or_from(column) + + if isinstance(column, ScalarSelect): + column = column.self_group(against=operators.comma_op) + + self._raw_columns = self._raw_columns + [column] + + def append_prefix(self, clause): + """append the given columns clause prefix expression to this select() + construct. + + This is an **in-place** mutation method; the + :meth:`~.Select.prefix_with` method is preferred, as it provides + standard :term:`method chaining`. + + """ + clause = _literal_as_text(clause) + self._prefixes = self._prefixes + (clause,) + + def append_whereclause(self, whereclause): + """append the given expression to this select() construct's WHERE + criterion. + + The expression will be joined to existing WHERE criterion via AND. + + This is an **in-place** mutation method; the + :meth:`~.Select.where` method is preferred, as it provides standard + :term:`method chaining`. + + """ + + self._reset_exported() + self._whereclause = and_( + True_._ifnone(self._whereclause), whereclause) + + def append_having(self, having): + """append the given expression to this select() construct's HAVING + criterion. + + The expression will be joined to existing HAVING criterion via AND. + + This is an **in-place** mutation method; the + :meth:`~.Select.having` method is preferred, as it provides standard + :term:`method chaining`. + + """ + self._reset_exported() + self._having = and_(True_._ifnone(self._having), having) + + def append_from(self, fromclause): + """append the given FromClause expression to this select() construct's + FROM clause. + + This is an **in-place** mutation method; the + :meth:`~.Select.select_from` method is preferred, as it provides + standard :term:`method chaining`. + + """ + self._reset_exported() + fromclause = _interpret_as_from(fromclause) + self._from_obj = self._from_obj.union([fromclause]) + + @_memoized_property + def _columns_plus_names(self): + if self.use_labels: + names = set() + + def name_for_col(c): + if c._label is None or not c._render_label_in_columns_clause: + return (None, c) + + name = c._label + if name in names: + name = c.anon_label + else: + names.add(name) + return name, c + + return [ + name_for_col(c) + for c in util.unique_list( + _select_iterables(self._raw_columns)) + ] + else: + return [ + (None, c) + for c in util.unique_list( + _select_iterables(self._raw_columns)) + ] + + def _populate_column_collection(self): + for name, c in self._columns_plus_names: + if not hasattr(c, '_make_proxy'): + continue + if name is None: + key = None + elif self.use_labels: + key = c._key_label + if key is not None and key in self.c: + key = c.anon_label + else: + key = None + + c._make_proxy(self, key=key, + name=name, + name_is_truncatable=True) + + def _refresh_for_new_column(self, column): + for fromclause in self._froms: + col = fromclause._refresh_for_new_column(column) + if col is not None: + if col in self.inner_columns and self._cols_populated: + our_label = col._key_label if self.use_labels else col.key + if our_label not in self.c: + return col._make_proxy( + self, + name=col._label if self.use_labels else None, + key=col._key_label if self.use_labels else None, + name_is_truncatable=True) + return None + return None + + def _needs_parens_for_grouping(self): + return ( + self._limit_clause is not None or + self._offset_clause is not None or + bool(self._order_by_clause.clauses) + ) + + def self_group(self, against=None): + """return a 'grouping' construct as per the ClauseElement + specification. + + This produces an element that can be embedded in an expression. Note + that this method is called automatically as needed when constructing + expressions and should not require explicit use. + + """ + if isinstance(against, CompoundSelect) and \ + not self._needs_parens_for_grouping(): + return self + return FromGrouping(self) + + def union(self, other, **kwargs): + """return a SQL UNION of this select() construct against the given + selectable.""" + + return CompoundSelect._create_union(self, other, **kwargs) + + def union_all(self, other, **kwargs): + """return a SQL UNION ALL of this select() construct against the given + selectable. + + """ + return CompoundSelect._create_union_all(self, other, **kwargs) + + def except_(self, other, **kwargs): + """return a SQL EXCEPT of this select() construct against the given + selectable.""" + + return CompoundSelect._create_except(self, other, **kwargs) + + def except_all(self, other, **kwargs): + """return a SQL EXCEPT ALL of this select() construct against the + given selectable. + + """ + return CompoundSelect._create_except_all(self, other, **kwargs) + + def intersect(self, other, **kwargs): + """return a SQL INTERSECT of this select() construct against the given + selectable. + + """ + return CompoundSelect._create_intersect(self, other, **kwargs) + + def intersect_all(self, other, **kwargs): + """return a SQL INTERSECT ALL of this select() construct against the + given selectable. + + """ + return CompoundSelect._create_intersect_all(self, other, **kwargs) + + def bind(self): + if self._bind: + return self._bind + froms = self._froms + if not froms: + for c in self._raw_columns: + e = c.bind + if e: + self._bind = e + return e + else: + e = list(froms)[0].bind + if e: + self._bind = e + return e + + return None + + def _set_bind(self, bind): + self._bind = bind + bind = property(bind, _set_bind) + + +class ScalarSelect(Generative, Grouping): + _from_objects = [] + _is_from_container = True + + def __init__(self, element): + self.element = element + self.type = element._scalar_type() + + @property + def columns(self): + raise exc.InvalidRequestError('Scalar Select expression has no ' + 'columns; use this object directly ' + 'within a column-level expression.') + c = columns + + @_generative + def where(self, crit): + """Apply a WHERE clause to the SELECT statement referred to + by this :class:`.ScalarSelect`. + + """ + self.element = self.element.where(crit) + + def self_group(self, **kwargs): + return self + + +class Exists(UnaryExpression): + """Represent an ``EXISTS`` clause. + + """ + __visit_name__ = UnaryExpression.__visit_name__ + _from_objects = [] + + def __init__(self, *args, **kwargs): + """Construct a new :class:`.Exists` against an existing + :class:`.Select` object. + + Calling styles are of the following forms:: + + # use on an existing select() + s = select([table.c.col1]).where(table.c.col2==5) + s = exists(s) + + # construct a select() at once + exists(['*'], **select_arguments).where(criterion) + + # columns argument is optional, generates "EXISTS (SELECT *)" + # by default. + exists().where(table.c.col2==5) + + """ + if args and isinstance(args[0], (SelectBase, ScalarSelect)): + s = args[0] + else: + if not args: + args = ([literal_column('*')],) + s = Select(*args, **kwargs).as_scalar().self_group() + + UnaryExpression.__init__(self, s, operator=operators.exists, + type_=type_api.BOOLEANTYPE, + wraps_column_expression=True) + + def select(self, whereclause=None, **params): + return Select([self], whereclause, **params) + + def correlate(self, *fromclause): + e = self._clone() + e.element = self.element.correlate(*fromclause).self_group() + return e + + def correlate_except(self, *fromclause): + e = self._clone() + e.element = self.element.correlate_except(*fromclause).self_group() + return e + + def select_from(self, clause): + """return a new :class:`.Exists` construct, applying the given + expression to the :meth:`.Select.select_from` method of the select + statement contained. + + """ + e = self._clone() + e.element = self.element.select_from(clause).self_group() + return e + + def where(self, clause): + """return a new exists() construct with the given expression added to + its WHERE clause, joined to the existing clause via AND, if any. + + """ + e = self._clone() + e.element = self.element.where(clause).self_group() + return e + + +class TextAsFrom(SelectBase): + """Wrap a :class:`.TextClause` construct within a :class:`.SelectBase` + interface. + + This allows the :class:`.TextClause` object to gain a ``.c`` collection + and other FROM-like capabilities such as :meth:`.FromClause.alias`, + :meth:`.SelectBase.cte`, etc. + + The :class:`.TextAsFrom` construct is produced via the + :meth:`.TextClause.columns` method - see that method for details. + + .. versionadded:: 0.9.0 + + .. seealso:: + + :func:`.text` + + :meth:`.TextClause.columns` + + """ + __visit_name__ = "text_as_from" + + _textual = True + + def __init__(self, text, columns, positional=False): + self.element = text + self.column_args = columns + self.positional = positional + + @property + def _bind(self): + return self.element._bind + + @_generative + def bindparams(self, *binds, **bind_as_values): + self.element = self.element.bindparams(*binds, **bind_as_values) + + def _populate_column_collection(self): + for c in self.column_args: + c._make_proxy(self) + + def _copy_internals(self, clone=_clone, **kw): + self._reset_exported() + self.element = clone(self.element, **kw) + + def _scalar_type(self): + return self.column_args[0].type + + +class AnnotatedFromClause(Annotated): + def __init__(self, element, values): + # force FromClause to generate their internal + # collections into __dict__ + element.c + Annotated.__init__(self, element, values) diff --git a/app/lib/sqlalchemy/sql/sqltypes.py b/app/lib/sqlalchemy/sql/sqltypes.py new file mode 100644 index 0000000..8a114ec --- /dev/null +++ b/app/lib/sqlalchemy/sql/sqltypes.py @@ -0,0 +1,2619 @@ +# sql/sqltypes.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""SQL specific types. + +""" + +import datetime as dt +import codecs +import collections +import json + +from . import elements +from .type_api import TypeEngine, TypeDecorator, to_instance, Variant +from .elements import quoted_name, TypeCoerce as type_coerce, _defer_name, \ + Slice, _literal_as_binds +from .. import exc, util, processors +from .base import _bind_or_error, SchemaEventTarget +from . import operators +from .. import inspection +from .. import event +from ..util import pickle +from ..util import compat +import decimal + +if util.jython: + import array + + +class _DateAffinity(object): + + """Mixin date/time specific expression adaptations. + + Rules are implemented within Date,Time,Interval,DateTime, Numeric, + Integer. Based on http://www.postgresql.org/docs/current/static + /functions-datetime.html. + + """ + + @property + def _expression_adaptations(self): + raise NotImplementedError() + + class Comparator(TypeEngine.Comparator): + _blank_dict = util.immutabledict() + + def _adapt_expression(self, op, other_comparator): + othertype = other_comparator.type._type_affinity + return ( + op, to_instance( + self.type._expression_adaptations. + get(op, self._blank_dict). + get(othertype, NULLTYPE)) + ) + comparator_factory = Comparator + + +class Concatenable(object): + + """A mixin that marks a type as supporting 'concatenation', + typically strings.""" + + class Comparator(TypeEngine.Comparator): + + def _adapt_expression(self, op, other_comparator): + if (op is operators.add and + isinstance( + other_comparator, + (Concatenable.Comparator, NullType.Comparator) + )): + return operators.concat_op, self.expr.type + else: + return super(Concatenable.Comparator, self)._adapt_expression( + op, other_comparator) + + comparator_factory = Comparator + + +class Indexable(object): + """A mixin that marks a type as supporting indexing operations, + such as array or JSON structures. + + + .. versionadded:: 1.1.0 + + + """ + + class Comparator(TypeEngine.Comparator): + + def _setup_getitem(self, index): + raise NotImplementedError() + + def __getitem__(self, index): + adjusted_op, adjusted_right_expr, result_type = \ + self._setup_getitem(index) + return self.operate( + adjusted_op, + adjusted_right_expr, + result_type=result_type + ) + + comparator_factory = Comparator + + +class String(Concatenable, TypeEngine): + + """The base for all string and character types. + + In SQL, corresponds to VARCHAR. Can also take Python unicode objects + and encode to the database's encoding in bind params (and the reverse for + result sets.) + + The `length` field is usually required when the `String` type is + used within a CREATE TABLE statement, as VARCHAR requires a length + on most databases. + + """ + + __visit_name__ = 'string' + + def __init__(self, length=None, collation=None, + convert_unicode=False, + unicode_error=None, + _warn_on_bytestring=False + ): + """ + Create a string-holding type. + + :param length: optional, a length for the column for use in + DDL and CAST expressions. May be safely omitted if no ``CREATE + TABLE`` will be issued. Certain databases may require a + ``length`` for use in DDL, and will raise an exception when + the ``CREATE TABLE`` DDL is issued if a ``VARCHAR`` + with no length is included. Whether the value is + interpreted as bytes or characters is database specific. + + :param collation: Optional, a column-level collation for + use in DDL and CAST expressions. Renders using the + COLLATE keyword supported by SQLite, MySQL, and PostgreSQL. + E.g.:: + + >>> from sqlalchemy import cast, select, String + >>> print select([cast('some string', String(collation='utf8'))]) + SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1 + + .. versionadded:: 0.8 Added support for COLLATE to all + string types. + + :param convert_unicode: When set to ``True``, the + :class:`.String` type will assume that + input is to be passed as Python ``unicode`` objects, + and results returned as Python ``unicode`` objects. + If the DBAPI in use does not support Python unicode + (which is fewer and fewer these days), SQLAlchemy + will encode/decode the value, using the + value of the ``encoding`` parameter passed to + :func:`.create_engine` as the encoding. + + When using a DBAPI that natively supports Python + unicode objects, this flag generally does not + need to be set. For columns that are explicitly + intended to store non-ASCII data, the :class:`.Unicode` + or :class:`.UnicodeText` + types should be used regardless, which feature + the same behavior of ``convert_unicode`` but + also indicate an underlying column type that + directly supports unicode, such as ``NVARCHAR``. + + For the extremely rare case that Python ``unicode`` + is to be encoded/decoded by SQLAlchemy on a backend + that does natively support Python ``unicode``, + the value ``force`` can be passed here which will + cause SQLAlchemy's encode/decode services to be + used unconditionally. + + :param unicode_error: Optional, a method to use to handle Unicode + conversion errors. Behaves like the ``errors`` keyword argument to + the standard library's ``string.decode()`` functions. This flag + requires that ``convert_unicode`` is set to ``force`` - otherwise, + SQLAlchemy is not guaranteed to handle the task of unicode + conversion. Note that this flag adds significant performance + overhead to row-fetching operations for backends that already + return unicode objects natively (which most DBAPIs do). This + flag should only be used as a last resort for reading + strings from a column with varied or corrupted encodings. + + """ + if unicode_error is not None and convert_unicode != 'force': + raise exc.ArgumentError("convert_unicode must be 'force' " + "when unicode_error is set.") + + self.length = length + self.collation = collation + self.convert_unicode = convert_unicode + self.unicode_error = unicode_error + self._warn_on_bytestring = _warn_on_bytestring + + def literal_processor(self, dialect): + def process(value): + value = value.replace("'", "''") + return "'%s'" % value + return process + + def bind_processor(self, dialect): + if self.convert_unicode or dialect.convert_unicode: + if dialect.supports_unicode_binds and \ + self.convert_unicode != 'force': + if self._warn_on_bytestring: + def process(value): + if isinstance(value, util.binary_type): + util.warn_limited( + "Unicode type received non-unicode " + "bind param value %r.", + (util.ellipses_string(value),)) + return value + return process + else: + return None + else: + encoder = codecs.getencoder(dialect.encoding) + warn_on_bytestring = self._warn_on_bytestring + + def process(value): + if isinstance(value, util.text_type): + return encoder(value, self.unicode_error)[0] + elif warn_on_bytestring and value is not None: + util.warn_limited( + "Unicode type received non-unicode bind " + "param value %r.", + (util.ellipses_string(value),)) + return value + return process + else: + return None + + def result_processor(self, dialect, coltype): + wants_unicode = self.convert_unicode or dialect.convert_unicode + needs_convert = wants_unicode and \ + (dialect.returns_unicode_strings is not True or + self.convert_unicode in ('force', 'force_nocheck')) + needs_isinstance = ( + needs_convert and + dialect.returns_unicode_strings and + self.convert_unicode != 'force_nocheck' + ) + if needs_convert: + if needs_isinstance: + return processors.to_conditional_unicode_processor_factory( + dialect.encoding, self.unicode_error) + else: + return processors.to_unicode_processor_factory( + dialect.encoding, self.unicode_error) + else: + return None + + @property + def python_type(self): + if self.convert_unicode: + return util.text_type + else: + return str + + def get_dbapi_type(self, dbapi): + return dbapi.STRING + + +class Text(String): + + """A variably sized string type. + + In SQL, usually corresponds to CLOB or TEXT. Can also take Python + unicode objects and encode to the database's encoding in bind + params (and the reverse for result sets.) In general, TEXT objects + do not have a length; while some databases will accept a length + argument here, it will be rejected by others. + + """ + __visit_name__ = 'text' + + +class Unicode(String): + + """A variable length Unicode string type. + + The :class:`.Unicode` type is a :class:`.String` subclass + that assumes input and output as Python ``unicode`` data, + and in that regard is equivalent to the usage of the + ``convert_unicode`` flag with the :class:`.String` type. + However, unlike plain :class:`.String`, it also implies an + underlying column type that is explicitly supporting of non-ASCII + data, such as ``NVARCHAR`` on Oracle and SQL Server. + This can impact the output of ``CREATE TABLE`` statements + and ``CAST`` functions at the dialect level, and can + also affect the handling of bound parameters in some + specific DBAPI scenarios. + + The encoding used by the :class:`.Unicode` type is usually + determined by the DBAPI itself; most modern DBAPIs + feature support for Python ``unicode`` objects as bound + values and result set values, and the encoding should + be configured as detailed in the notes for the target + DBAPI in the :ref:`dialect_toplevel` section. + + For those DBAPIs which do not support, or are not configured + to accommodate Python ``unicode`` objects + directly, SQLAlchemy does the encoding and decoding + outside of the DBAPI. The encoding in this scenario + is determined by the ``encoding`` flag passed to + :func:`.create_engine`. + + When using the :class:`.Unicode` type, it is only appropriate + to pass Python ``unicode`` objects, and not plain ``str``. + If a plain ``str`` is passed under Python 2, a warning + is emitted. If you notice your application emitting these warnings but + you're not sure of the source of them, the Python + ``warnings`` filter, documented at + http://docs.python.org/library/warnings.html, + can be used to turn these warnings into exceptions + which will illustrate a stack trace:: + + import warnings + warnings.simplefilter('error') + + For an application that wishes to pass plain bytestrings + and Python ``unicode`` objects to the ``Unicode`` type + equally, the bytestrings must first be decoded into + unicode. The recipe at :ref:`coerce_to_unicode` illustrates + how this is done. + + See also: + + :class:`.UnicodeText` - unlengthed textual counterpart + to :class:`.Unicode`. + + """ + + __visit_name__ = 'unicode' + + def __init__(self, length=None, **kwargs): + """ + Create a :class:`.Unicode` object. + + Parameters are the same as that of :class:`.String`, + with the exception that ``convert_unicode`` + defaults to ``True``. + + """ + kwargs.setdefault('convert_unicode', True) + kwargs.setdefault('_warn_on_bytestring', True) + super(Unicode, self).__init__(length=length, **kwargs) + + +class UnicodeText(Text): + + """An unbounded-length Unicode string type. + + See :class:`.Unicode` for details on the unicode + behavior of this object. + + Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a + unicode-capable type being used on the backend, such as + ``NCLOB``, ``NTEXT``. + + """ + + __visit_name__ = 'unicode_text' + + def __init__(self, length=None, **kwargs): + """ + Create a Unicode-converting Text type. + + Parameters are the same as that of :class:`.Text`, + with the exception that ``convert_unicode`` + defaults to ``True``. + + """ + kwargs.setdefault('convert_unicode', True) + kwargs.setdefault('_warn_on_bytestring', True) + super(UnicodeText, self).__init__(length=length, **kwargs) + + +class Integer(_DateAffinity, TypeEngine): + + """A type for ``int`` integers.""" + + __visit_name__ = 'integer' + + def get_dbapi_type(self, dbapi): + return dbapi.NUMBER + + @property + def python_type(self): + return int + + def literal_processor(self, dialect): + def process(value): + return str(value) + return process + + @util.memoized_property + def _expression_adaptations(self): + # TODO: need a dictionary object that will + # handle operators generically here, this is incomplete + return { + operators.add: { + Date: Date, + Integer: self.__class__, + Numeric: Numeric, + }, + operators.mul: { + Interval: Interval, + Integer: self.__class__, + Numeric: Numeric, + }, + operators.div: { + Integer: self.__class__, + Numeric: Numeric, + }, + operators.truediv: { + Integer: self.__class__, + Numeric: Numeric, + }, + operators.sub: { + Integer: self.__class__, + Numeric: Numeric, + }, + } + + +class SmallInteger(Integer): + + """A type for smaller ``int`` integers. + + Typically generates a ``SMALLINT`` in DDL, and otherwise acts like + a normal :class:`.Integer` on the Python side. + + """ + + __visit_name__ = 'small_integer' + + +class BigInteger(Integer): + + """A type for bigger ``int`` integers. + + Typically generates a ``BIGINT`` in DDL, and otherwise acts like + a normal :class:`.Integer` on the Python side. + + """ + + __visit_name__ = 'big_integer' + + +class Numeric(_DateAffinity, TypeEngine): + + """A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``. + + This type returns Python ``decimal.Decimal`` objects by default, unless + the :paramref:`.Numeric.asdecimal` flag is set to False, in which case + they are coerced to Python ``float`` objects. + + .. note:: + + The :class:`.Numeric` type is designed to receive data from a database + type that is explicitly known to be a decimal type + (e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point + type (e.g. ``FLOAT``, ``REAL``, others). + If the database column on the server is in fact a floating-point type + type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float` + type or a subclass, otherwise numeric coercion between + ``float``/``Decimal`` may or may not function as expected. + + .. note:: + + The Python ``decimal.Decimal`` class is generally slow + performing; cPython 3.3 has now switched to use the `cdecimal + `_ library natively. For + older Python versions, the ``cdecimal`` library can be patched + into any application where it will replace the ``decimal`` + library fully, however this needs to be applied globally and + before any other modules have been imported, as follows:: + + import sys + import cdecimal + sys.modules["decimal"] = cdecimal + + Note that the ``cdecimal`` and ``decimal`` libraries are **not + compatible with each other**, so patching ``cdecimal`` at the + global level is the only way it can be used effectively with + various DBAPIs that hardcode to import the ``decimal`` library. + + """ + + __visit_name__ = 'numeric' + + _default_decimal_return_scale = 10 + + def __init__(self, precision=None, scale=None, + decimal_return_scale=None, asdecimal=True): + """ + Construct a Numeric. + + :param precision: the numeric precision for use in DDL ``CREATE + TABLE``. + + :param scale: the numeric scale for use in DDL ``CREATE TABLE``. + + :param asdecimal: default True. Return whether or not + values should be sent as Python Decimal objects, or + as floats. Different DBAPIs send one or the other based on + datatypes - the Numeric type will ensure that return values + are one or the other across DBAPIs consistently. + + :param decimal_return_scale: Default scale to use when converting + from floats to Python decimals. Floating point values will typically + be much longer due to decimal inaccuracy, and most floating point + database types don't have a notion of "scale", so by default the + float type looks for the first ten decimal places when converting. + Specfiying this value will override that length. Types which + do include an explicit ".scale" value, such as the base + :class:`.Numeric` as well as the MySQL float types, will use the + value of ".scale" as the default for decimal_return_scale, if not + otherwise specified. + + .. versionadded:: 0.9.0 + + When using the ``Numeric`` type, care should be taken to ensure + that the asdecimal setting is apppropriate for the DBAPI in use - + when Numeric applies a conversion from Decimal->float or float-> + Decimal, this conversion incurs an additional performance overhead + for all result columns received. + + DBAPIs that return Decimal natively (e.g. psycopg2) will have + better accuracy and higher performance with a setting of ``True``, + as the native translation to Decimal reduces the amount of floating- + point issues at play, and the Numeric type itself doesn't need + to apply any further conversions. However, another DBAPI which + returns floats natively *will* incur an additional conversion + overhead, and is still subject to floating point data loss - in + which case ``asdecimal=False`` will at least remove the extra + conversion overhead. + + """ + self.precision = precision + self.scale = scale + self.decimal_return_scale = decimal_return_scale + self.asdecimal = asdecimal + + @property + def _effective_decimal_return_scale(self): + if self.decimal_return_scale is not None: + return self.decimal_return_scale + elif getattr(self, "scale", None) is not None: + return self.scale + else: + return self._default_decimal_return_scale + + def get_dbapi_type(self, dbapi): + return dbapi.NUMBER + + def literal_processor(self, dialect): + def process(value): + return str(value) + return process + + @property + def python_type(self): + if self.asdecimal: + return decimal.Decimal + else: + return float + + def bind_processor(self, dialect): + if dialect.supports_native_decimal: + return None + else: + return processors.to_float + + def result_processor(self, dialect, coltype): + if self.asdecimal: + if dialect.supports_native_decimal: + # we're a "numeric", DBAPI will give us Decimal directly + return None + else: + util.warn('Dialect %s+%s does *not* support Decimal ' + 'objects natively, and SQLAlchemy must ' + 'convert from floating point - rounding ' + 'errors and other issues may occur. Please ' + 'consider storing Decimal numbers as strings ' + 'or integers on this platform for lossless ' + 'storage.' % (dialect.name, dialect.driver)) + + # we're a "numeric", DBAPI returns floats, convert. + return processors.to_decimal_processor_factory( + decimal.Decimal, + self.scale if self.scale is not None + else self._default_decimal_return_scale) + else: + if dialect.supports_native_decimal: + return processors.to_float + else: + return None + + @util.memoized_property + def _expression_adaptations(self): + return { + operators.mul: { + Interval: Interval, + Numeric: self.__class__, + Integer: self.__class__, + }, + operators.div: { + Numeric: self.__class__, + Integer: self.__class__, + }, + operators.truediv: { + Numeric: self.__class__, + Integer: self.__class__, + }, + operators.add: { + Numeric: self.__class__, + Integer: self.__class__, + }, + operators.sub: { + Numeric: self.__class__, + Integer: self.__class__, + } + } + + +class Float(Numeric): + + """Type representing floating point types, such as ``FLOAT`` or ``REAL``. + + This type returns Python ``float`` objects by default, unless the + :paramref:`.Float.asdecimal` flag is set to True, in which case they + are coerced to ``decimal.Decimal`` objects. + + .. note:: + + The :class:`.Float` type is designed to receive data from a database + type that is explicitly known to be a floating point type + (e.g. ``FLOAT``, ``REAL``, others) + and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others). + If the database column on the server is in fact a Numeric + type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric` + type or a subclass, otherwise numeric coercion between + ``float``/``Decimal`` may or may not function as expected. + + """ + + __visit_name__ = 'float' + + scale = None + + def __init__(self, precision=None, asdecimal=False, + decimal_return_scale=None, **kwargs): + r""" + Construct a Float. + + :param precision: the numeric precision for use in DDL ``CREATE + TABLE``. + + :param asdecimal: the same flag as that of :class:`.Numeric`, but + defaults to ``False``. Note that setting this flag to ``True`` + results in floating point conversion. + + :param decimal_return_scale: Default scale to use when converting + from floats to Python decimals. Floating point values will typically + be much longer due to decimal inaccuracy, and most floating point + database types don't have a notion of "scale", so by default the + float type looks for the first ten decimal places when converting. + Specfiying this value will override that length. Note that the + MySQL float types, which do include "scale", will use "scale" + as the default for decimal_return_scale, if not otherwise specified. + + .. versionadded:: 0.9.0 + + :param \**kwargs: deprecated. Additional arguments here are ignored + by the default :class:`.Float` type. For database specific + floats that support additional arguments, see that dialect's + documentation for details, such as + :class:`sqlalchemy.dialects.mysql.FLOAT`. + + """ + self.precision = precision + self.asdecimal = asdecimal + self.decimal_return_scale = decimal_return_scale + if kwargs: + util.warn_deprecated("Additional keyword arguments " + "passed to Float ignored.") + + def result_processor(self, dialect, coltype): + if self.asdecimal: + return processors.to_decimal_processor_factory( + decimal.Decimal, + self._effective_decimal_return_scale) + else: + return None + + @util.memoized_property + def _expression_adaptations(self): + return { + operators.mul: { + Interval: Interval, + Numeric: self.__class__, + }, + operators.div: { + Numeric: self.__class__, + }, + operators.truediv: { + Numeric: self.__class__, + }, + operators.add: { + Numeric: self.__class__, + }, + operators.sub: { + Numeric: self.__class__, + } + } + + +class DateTime(_DateAffinity, TypeEngine): + + """A type for ``datetime.datetime()`` objects. + + Date and time types return objects from the Python ``datetime`` + module. Most DBAPIs have built in support for the datetime + module, with the noted exception of SQLite. In the case of + SQLite, date and time types are stored as strings which are then + converted back to datetime objects when rows are returned. + + For the time representation within the datetime type, some + backends include additional options, such as timezone support and + fractional seconds support. For fractional seconds, use the + dialect-specific datatype, such as :class:`.mysql.TIME`. For + timezone support, use at least the :class:`~.types.TIMESTAMP` datatype, + if not the dialect-specific datatype object. + + """ + + __visit_name__ = 'datetime' + + def __init__(self, timezone=False): + """Construct a new :class:`.DateTime`. + + :param timezone: boolean. Indicates that the datetime type should + enable timezone support, if available on the + **base date/time-holding type only**. It is recommended + to make use of the :class:`~.types.TIMESTAMP` datatype directly when + using this flag, as some databases include separate generic + date/time-holding types distinct from the timezone-capable + TIMESTAMP datatype, such as Oracle. + + + """ + self.timezone = timezone + + def get_dbapi_type(self, dbapi): + return dbapi.DATETIME + + @property + def python_type(self): + return dt.datetime + + @util.memoized_property + def _expression_adaptations(self): + return { + operators.add: { + Interval: self.__class__, + }, + operators.sub: { + Interval: self.__class__, + DateTime: Interval, + }, + } + + +class Date(_DateAffinity, TypeEngine): + + """A type for ``datetime.date()`` objects.""" + + __visit_name__ = 'date' + + def get_dbapi_type(self, dbapi): + return dbapi.DATETIME + + @property + def python_type(self): + return dt.date + + @util.memoized_property + def _expression_adaptations(self): + return { + operators.add: { + Integer: self.__class__, + Interval: DateTime, + Time: DateTime, + }, + operators.sub: { + # date - integer = date + Integer: self.__class__, + + # date - date = integer. + Date: Integer, + + Interval: DateTime, + + # date - datetime = interval, + # this one is not in the PG docs + # but works + DateTime: Interval, + }, + } + + +class Time(_DateAffinity, TypeEngine): + + """A type for ``datetime.time()`` objects.""" + + __visit_name__ = 'time' + + def __init__(self, timezone=False): + self.timezone = timezone + + def get_dbapi_type(self, dbapi): + return dbapi.DATETIME + + @property + def python_type(self): + return dt.time + + @util.memoized_property + def _expression_adaptations(self): + return { + operators.add: { + Date: DateTime, + Interval: self.__class__ + }, + operators.sub: { + Time: Interval, + Interval: self.__class__, + }, + } + + +class _Binary(TypeEngine): + + """Define base behavior for binary types.""" + + def __init__(self, length=None): + self.length = length + + def literal_processor(self, dialect): + def process(value): + value = value.decode(dialect.encoding).replace("'", "''") + return "'%s'" % value + return process + + @property + def python_type(self): + return util.binary_type + + # Python 3 - sqlite3 doesn't need the `Binary` conversion + # here, though pg8000 does to indicate "bytea" + def bind_processor(self, dialect): + if dialect.dbapi is None: + return None + + DBAPIBinary = dialect.dbapi.Binary + + def process(value): + if value is not None: + return DBAPIBinary(value) + else: + return None + return process + + # Python 3 has native bytes() type + # both sqlite3 and pg8000 seem to return it, + # psycopg2 as of 2.5 returns 'memoryview' + if util.py2k: + def result_processor(self, dialect, coltype): + if util.jython: + def process(value): + if value is not None: + if isinstance(value, array.array): + return value.tostring() + return str(value) + else: + return None + else: + process = processors.to_str + return process + else: + def result_processor(self, dialect, coltype): + def process(value): + if value is not None: + value = bytes(value) + return value + return process + + def coerce_compared_value(self, op, value): + """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" + + if isinstance(value, util.string_types): + return self + else: + return super(_Binary, self).coerce_compared_value(op, value) + + def get_dbapi_type(self, dbapi): + return dbapi.BINARY + + +class LargeBinary(_Binary): + + """A type for large binary byte data. + + The :class:`.LargeBinary` type corresponds to a large and/or unlengthed + binary type for the target platform, such as BLOB on MySQL and BYTEA for + PostgreSQL. It also handles the necessary conversions for the DBAPI. + + """ + + __visit_name__ = 'large_binary' + + def __init__(self, length=None): + """ + Construct a LargeBinary type. + + :param length: optional, a length for the column for use in + DDL statements, for those binary types that accept a length, + such as the MySQL BLOB type. + + """ + _Binary.__init__(self, length=length) + + +class Binary(LargeBinary): + + """Deprecated. Renamed to LargeBinary.""" + + def __init__(self, *arg, **kw): + util.warn_deprecated('The Binary type has been renamed to ' + 'LargeBinary.') + LargeBinary.__init__(self, *arg, **kw) + + +class SchemaType(SchemaEventTarget): + + """Mark a type as possibly requiring schema-level DDL for usage. + + Supports types that must be explicitly created/dropped (i.e. PG ENUM type) + as well as types that are complimented by table or schema level + constraints, triggers, and other rules. + + :class:`.SchemaType` classes can also be targets for the + :meth:`.DDLEvents.before_parent_attach` and + :meth:`.DDLEvents.after_parent_attach` events, where the events fire off + surrounding the association of the type object with a parent + :class:`.Column`. + + .. seealso:: + + :class:`.Enum` + + :class:`.Boolean` + + + """ + + def __init__(self, name=None, schema=None, metadata=None, + inherit_schema=False, quote=None, _create_events=True): + if name is not None: + self.name = quoted_name(name, quote) + else: + self.name = None + self.schema = schema + self.metadata = metadata + self.inherit_schema = inherit_schema + self._create_events = _create_events + + if _create_events and self.metadata: + event.listen( + self.metadata, + "before_create", + util.portable_instancemethod(self._on_metadata_create) + ) + event.listen( + self.metadata, + "after_drop", + util.portable_instancemethod(self._on_metadata_drop) + ) + + def _translate_schema(self, effective_schema, map_): + return map_.get(effective_schema, effective_schema) + + def _set_parent(self, column): + column._on_table_attach(util.portable_instancemethod(self._set_table)) + + def _variant_mapping_for_set_table(self, column): + if isinstance(column.type, Variant): + variant_mapping = column.type.mapping.copy() + variant_mapping['_default'] = column.type.impl + else: + variant_mapping = None + return variant_mapping + + def _set_table(self, column, table): + if self.inherit_schema: + self.schema = table.schema + + if not self._create_events: + return + + variant_mapping = self._variant_mapping_for_set_table(column) + + event.listen( + table, + "before_create", + util.portable_instancemethod( + self._on_table_create, + {"variant_mapping": variant_mapping}) + ) + event.listen( + table, + "after_drop", + util.portable_instancemethod( + self._on_table_drop, + {"variant_mapping": variant_mapping}) + ) + if self.metadata is None: + # TODO: what's the difference between self.metadata + # and table.metadata here ? + event.listen( + table.metadata, + "before_create", + util.portable_instancemethod( + self._on_metadata_create, + {"variant_mapping": variant_mapping}) + ) + event.listen( + table.metadata, + "after_drop", + util.portable_instancemethod( + self._on_metadata_drop, + {"variant_mapping": variant_mapping}) + ) + + def copy(self, **kw): + return self.adapt(self.__class__, _create_events=True) + + def adapt(self, impltype, **kw): + schema = kw.pop('schema', self.schema) + metadata = kw.pop('metadata', self.metadata) + _create_events = kw.pop('_create_events', False) + + return impltype(name=self.name, + schema=schema, + inherit_schema=self.inherit_schema, + metadata=metadata, + _create_events=_create_events, + **kw) + + @property + def bind(self): + return self.metadata and self.metadata.bind or None + + def create(self, bind=None, checkfirst=False): + """Issue CREATE ddl for this type, if applicable.""" + + if bind is None: + bind = _bind_or_error(self) + t = self.dialect_impl(bind.dialect) + if t.__class__ is not self.__class__ and isinstance(t, SchemaType): + t.create(bind=bind, checkfirst=checkfirst) + + def drop(self, bind=None, checkfirst=False): + """Issue DROP ddl for this type, if applicable.""" + + if bind is None: + bind = _bind_or_error(self) + t = self.dialect_impl(bind.dialect) + if t.__class__ is not self.__class__ and isinstance(t, SchemaType): + t.drop(bind=bind, checkfirst=checkfirst) + + def _on_table_create(self, target, bind, **kw): + if not self._is_impl_for_variant(bind.dialect, kw): + return + + t = self.dialect_impl(bind.dialect) + if t.__class__ is not self.__class__ and isinstance(t, SchemaType): + t._on_table_create(target, bind, **kw) + + def _on_table_drop(self, target, bind, **kw): + if not self._is_impl_for_variant(bind.dialect, kw): + return + + t = self.dialect_impl(bind.dialect) + if t.__class__ is not self.__class__ and isinstance(t, SchemaType): + t._on_table_drop(target, bind, **kw) + + def _on_metadata_create(self, target, bind, **kw): + if not self._is_impl_for_variant(bind.dialect, kw): + return + + t = self.dialect_impl(bind.dialect) + if t.__class__ is not self.__class__ and isinstance(t, SchemaType): + t._on_metadata_create(target, bind, **kw) + + def _on_metadata_drop(self, target, bind, **kw): + if not self._is_impl_for_variant(bind.dialect, kw): + return + + t = self.dialect_impl(bind.dialect) + if t.__class__ is not self.__class__ and isinstance(t, SchemaType): + t._on_metadata_drop(target, bind, **kw) + + def _is_impl_for_variant(self, dialect, kw): + variant_mapping = kw.pop('variant_mapping', None) + if variant_mapping is None: + return True + + if dialect.name in variant_mapping and \ + variant_mapping[dialect.name] is self: + return True + elif dialect.name not in variant_mapping: + return variant_mapping['_default'] is self + + +class Enum(String, SchemaType): + + """Generic Enum Type. + + The :class:`.Enum` type provides a set of possible string values + which the column is constrained towards. + + The :class:`.Enum` type will make use of the backend's native "ENUM" + type if one is available; otherwise, it uses a VARCHAR datatype and + produces a CHECK constraint. Use of the backend-native enum type + can be disabled using the :paramref:`.Enum.native_enum` flag, and + the production of the CHECK constraint is configurable using the + :paramref:`.Enum.create_constraint` flag. + + The :class:`.Enum` type also provides in-Python validation of string + values during both read and write operations. When reading a value + from the database in a result set, the string value is always checked + against the list of possible values and a ``LookupError`` is raised + if no match is found. When passing a value to the database as a + plain string within a SQL statement, if the + :paramref:`.Enum.validate_strings` parameter is + set to True, a ``LookupError`` is raised for any string value that's + not located in the given list of possible values; note that this + impacts usage of LIKE expressions with enumerated values (an unusual + use case). + + .. versionchanged:: 1.1 the :class:`.Enum` type now provides in-Python + validation of input values as well as on data being returned by + the database. + + The source of enumerated values may be a list of string values, or + alternatively a PEP-435-compliant enumerated class. For the purposes + of the :class:`.Enum` datatype, this class need only provide a + ``__members__`` method. + + When using an enumerated class, the enumerated objects are used + both for input and output, rather than strings as is the case with + a plain-string enumerated type:: + + import enum + class MyEnum(enum.Enum): + one = 1 + two = 2 + three = 3 + + + t = Table( + 'data', MetaData(), + Column('value', Enum(MyEnum)) + ) + + connection.execute(t.insert(), {"value": MyEnum.two}) + assert connection.scalar(t.select()) is MyEnum.two + + Above, the string names of each element, e.g. "one", "two", "three", + are persisted to the database; the values of the Python Enum, here + indicated as integers, are **not** used; the value of each enum can + therefore be any kind of Python object whether or not it is persistable. + + .. versionadded:: 1.1 - support for PEP-435-style enumerated + classes. + + + .. seealso:: + + :class:`~.postgresql.ENUM` - PostgreSQL-specific type, + which has additional functionality. + + """ + + __visit_name__ = 'enum' + + def __init__(self, *enums, **kw): + r"""Construct an enum. + + Keyword arguments which don't apply to a specific backend are ignored + by that backend. + + :param \*enums: either exactly one PEP-435 compliant enumerated type + or one or more string or unicode enumeration labels. If unicode + labels are present, the `convert_unicode` flag is auto-enabled. + + .. versionadded:: 1.1 a PEP-435 style enumerated class may be + passed. + + :param convert_unicode: Enable unicode-aware bind parameter and + result-set processing for this Enum's data. This is set + automatically based on the presence of unicode label strings. + + :param create_constraint: defaults to True. When creating a non-native + enumerated type, also build a CHECK constraint on the database + against the valid values. + + .. versionadded:: 1.1 - added :paramref:`.Enum.create_constraint` + which provides the option to disable the production of the + CHECK constraint for a non-native enumerated type. + + :param metadata: Associate this type directly with a ``MetaData`` + object. For types that exist on the target database as an + independent schema construct (PostgreSQL), this type will be + created and dropped within ``create_all()`` and ``drop_all()`` + operations. If the type is not associated with any ``MetaData`` + object, it will associate itself with each ``Table`` in which it is + used, and will be created when any of those individual tables are + created, after a check is performed for its existence. The type is + only dropped when ``drop_all()`` is called for that ``Table`` + object's metadata, however. + + :param name: The name of this type. This is required for PostgreSQL + and any future supported database which requires an explicitly + named type, or an explicitly named constraint in order to generate + the type and/or a table that uses it. If a PEP-435 enumerated + class was used, its name (converted to lower case) is used by + default. + + :param native_enum: Use the database's native ENUM type when + available. Defaults to True. When False, uses VARCHAR + check + constraint for all backends. + + :param schema: Schema name of this type. For types that exist on the + target database as an independent schema construct (PostgreSQL), + this parameter specifies the named schema in which the type is + present. + + .. note:: + + The ``schema`` of the :class:`.Enum` type does not + by default make use of the ``schema`` established on the + owning :class:`.Table`. If this behavior is desired, + set the ``inherit_schema`` flag to ``True``. + + :param quote: Set explicit quoting preferences for the type's name. + + :param inherit_schema: When ``True``, the "schema" from the owning + :class:`.Table` will be copied to the "schema" attribute of this + :class:`.Enum`, replacing whatever value was passed for the + ``schema`` attribute. This also takes effect when using the + :meth:`.Table.tometadata` operation. + + :param validate_strings: when True, string values that are being + passed to the database in a SQL statement will be checked + for validity against the list of enumerated values. Unrecognized + values will result in a ``LookupError`` being raised. + + .. versionadded:: 1.1.0b2 + + """ + + values, objects = self._parse_into_values(enums, kw) + self._setup_for_values(values, objects, kw) + + self.native_enum = kw.pop('native_enum', True) + convert_unicode = kw.pop('convert_unicode', None) + self.create_constraint = kw.pop('create_constraint', True) + self.validate_strings = kw.pop('validate_strings', False) + + if convert_unicode is None: + for e in self.enums: + if isinstance(e, util.text_type): + convert_unicode = True + break + else: + convert_unicode = False + + if self.enums: + length = max(len(x) for x in self.enums) + else: + length = 0 + self._valid_lookup[None] = self._object_lookup[None] = None + + String.__init__(self, + length=length, + convert_unicode=convert_unicode, + ) + SchemaType.__init__(self, **kw) + + def _parse_into_values(self, enums, kw): + if len(enums) == 1 and hasattr(enums[0], '__members__'): + self.enum_class = enums[0] + values = list(self.enum_class.__members__) + objects = [self.enum_class.__members__[k] for k in values] + kw.setdefault('name', self.enum_class.__name__.lower()) + + return values, objects + else: + self.enum_class = None + return enums, enums + + def _setup_for_values(self, values, objects, kw): + self.enums = list(values) + + self._valid_lookup = dict( + zip(objects, values) + ) + self._object_lookup = dict( + (value, key) for key, value in self._valid_lookup.items() + ) + self._valid_lookup.update( + [(value, value) for value in self._valid_lookup.values()] + ) + + def _db_value_for_elem(self, elem): + try: + return self._valid_lookup[elem] + except KeyError: + # for unknown string values, we return as is. While we can + # validate these if we wanted, that does not allow for lesser-used + # end-user use cases, such as using a LIKE comparison with an enum, + # or for an application that wishes to apply string tests to an + # ENUM (see [ticket:3725]). While we can decide to differentiate + # here between an INSERT statement and a criteria used in a SELECT, + # for now we're staying conservative w/ behavioral changes (perhaps + # someone has a trigger that handles strings on INSERT) + if not self.validate_strings and \ + isinstance(elem, compat.string_types): + return elem + else: + raise LookupError( + '"%s" is not among the defined enum values' % elem) + + class Comparator(String.Comparator): + + def _adapt_expression(self, op, other_comparator): + op, typ = super(Enum.Comparator, self)._adapt_expression( + op, other_comparator) + if op is operators.concat_op: + typ = String( + self.type.length, + convert_unicode=self.type.convert_unicode) + return op, typ + + comparator_factory = Comparator + + def _object_value_for_elem(self, elem): + try: + return self._object_lookup[elem] + except KeyError: + raise LookupError( + '"%s" is not among the defined enum values' % elem) + + def __repr__(self): + return util.generic_repr(self, + additional_kw=[('native_enum', True)], + to_inspect=[Enum, SchemaType], + ) + + def _should_create_constraint(self, compiler, **kw): + if not self._is_impl_for_variant(compiler.dialect, kw): + return False + return not self.native_enum or \ + not compiler.dialect.supports_native_enum + + @util.dependencies("sqlalchemy.sql.schema") + def _set_table(self, schema, column, table): + if self.native_enum: + SchemaType._set_table(self, column, table) + + if not self.create_constraint: + return + + variant_mapping = self._variant_mapping_for_set_table(column) + + e = schema.CheckConstraint( + type_coerce(column, self).in_(self.enums), + name=_defer_name(self.name), + _create_rule=util.portable_instancemethod( + self._should_create_constraint, + {"variant_mapping": variant_mapping}), + _type_bound=True + ) + assert e.table is table + + def copy(self, **kw): + return SchemaType.copy(self, **kw) + + def adapt(self, impltype, **kw): + schema = kw.pop('schema', self.schema) + metadata = kw.pop('metadata', self.metadata) + _create_events = kw.pop('_create_events', False) + if issubclass(impltype, Enum): + if self.enum_class is not None: + args = [self.enum_class] + else: + args = self.enums + return impltype(name=self.name, + schema=schema, + metadata=metadata, + convert_unicode=self.convert_unicode, + native_enum=self.native_enum, + inherit_schema=self.inherit_schema, + validate_strings=self.validate_strings, + _create_events=_create_events, + *args, + **kw) + else: + # TODO: why would we be here? + return super(Enum, self).adapt(impltype, **kw) + + def literal_processor(self, dialect): + parent_processor = super(Enum, self).literal_processor(dialect) + + def process(value): + value = self._db_value_for_elem(value) + if parent_processor: + value = parent_processor(value) + return value + return process + + def bind_processor(self, dialect): + def process(value): + value = self._db_value_for_elem(value) + if parent_processor: + value = parent_processor(value) + return value + + parent_processor = super(Enum, self).bind_processor(dialect) + return process + + def result_processor(self, dialect, coltype): + parent_processor = super(Enum, self).result_processor( + dialect, coltype) + + def process(value): + if parent_processor: + value = parent_processor(value) + + value = self._object_value_for_elem(value) + return value + + return process + + @property + def python_type(self): + if self.enum_class: + return self.enum_class + else: + return super(Enum, self).python_type + + +class PickleType(TypeDecorator): + """Holds Python objects, which are serialized using pickle. + + PickleType builds upon the Binary type to apply Python's + ``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on + the way out, allowing any pickleable Python object to be stored as + a serialized binary field. + + To allow ORM change events to propagate for elements associated + with :class:`.PickleType`, see :ref:`mutable_toplevel`. + + """ + + impl = LargeBinary + + def __init__(self, protocol=pickle.HIGHEST_PROTOCOL, + pickler=None, comparator=None): + """ + Construct a PickleType. + + :param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``. + + :param pickler: defaults to cPickle.pickle or pickle.pickle if + cPickle is not available. May be any object with + pickle-compatible ``dumps` and ``loads`` methods. + + :param comparator: a 2-arg callable predicate used + to compare values of this type. If left as ``None``, + the Python "equals" operator is used to compare values. + + """ + self.protocol = protocol + self.pickler = pickler or pickle + self.comparator = comparator + super(PickleType, self).__init__() + + def __reduce__(self): + return PickleType, (self.protocol, + None, + self.comparator) + + def bind_processor(self, dialect): + impl_processor = self.impl.bind_processor(dialect) + dumps = self.pickler.dumps + protocol = self.protocol + if impl_processor: + def process(value): + if value is not None: + value = dumps(value, protocol) + return impl_processor(value) + else: + def process(value): + if value is not None: + value = dumps(value, protocol) + return value + return process + + def result_processor(self, dialect, coltype): + impl_processor = self.impl.result_processor(dialect, coltype) + loads = self.pickler.loads + if impl_processor: + def process(value): + value = impl_processor(value) + if value is None: + return None + return loads(value) + else: + def process(value): + if value is None: + return None + return loads(value) + return process + + def compare_values(self, x, y): + if self.comparator: + return self.comparator(x, y) + else: + return x == y + + +class Boolean(TypeEngine, SchemaType): + + """A bool datatype. + + Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on + the Python side deals in ``True`` or ``False``. + + """ + + __visit_name__ = 'boolean' + + def __init__( + self, create_constraint=True, name=None, _create_events=True): + """Construct a Boolean. + + :param create_constraint: defaults to True. If the boolean + is generated as an int/smallint, also create a CHECK constraint + on the table that ensures 1 or 0 as a value. + + :param name: if a CHECK constraint is generated, specify + the name of the constraint. + + """ + self.create_constraint = create_constraint + self.name = name + self._create_events = _create_events + + def _should_create_constraint(self, compiler, **kw): + if not self._is_impl_for_variant(compiler.dialect, kw): + return False + return not compiler.dialect.supports_native_boolean + + @util.dependencies("sqlalchemy.sql.schema") + def _set_table(self, schema, column, table): + if not self.create_constraint: + return + + variant_mapping = self._variant_mapping_for_set_table(column) + + e = schema.CheckConstraint( + type_coerce(column, self).in_([0, 1]), + name=_defer_name(self.name), + _create_rule=util.portable_instancemethod( + self._should_create_constraint, + {"variant_mapping": variant_mapping}), + _type_bound=True + ) + assert e.table is table + + @property + def python_type(self): + return bool + + def literal_processor(self, dialect): + if dialect.supports_native_boolean: + def process(value): + return "true" if value else "false" + else: + def process(value): + return str(1 if value else 0) + return process + + def bind_processor(self, dialect): + if dialect.supports_native_boolean: + return None + else: + return processors.boolean_to_int + + def result_processor(self, dialect, coltype): + if dialect.supports_native_boolean: + return None + else: + return processors.int_to_boolean + + +class Interval(_DateAffinity, TypeDecorator): + + """A type for ``datetime.timedelta()`` objects. + + The Interval type deals with ``datetime.timedelta`` objects. In + PostgreSQL, the native ``INTERVAL`` type is used; for others, the + value is stored as a date which is relative to the "epoch" + (Jan. 1, 1970). + + Note that the ``Interval`` type does not currently provide date arithmetic + operations on platforms which do not support interval types natively. Such + operations usually require transformation of both sides of the expression + (such as, conversion of both sides into integer epoch values first) which + currently is a manual procedure (such as via + :attr:`~sqlalchemy.sql.expression.func`). + + """ + + impl = DateTime + epoch = dt.datetime.utcfromtimestamp(0) + + def __init__(self, native=True, + second_precision=None, + day_precision=None): + """Construct an Interval object. + + :param native: when True, use the actual + INTERVAL type provided by the database, if + supported (currently PostgreSQL, Oracle). + Otherwise, represent the interval data as + an epoch value regardless. + + :param second_precision: For native interval types + which support a "fractional seconds precision" parameter, + i.e. Oracle and PostgreSQL + + :param day_precision: for native interval types which + support a "day precision" parameter, i.e. Oracle. + + """ + super(Interval, self).__init__() + self.native = native + self.second_precision = second_precision + self.day_precision = day_precision + + def adapt(self, cls, **kw): + if self.native and hasattr(cls, '_adapt_from_generic_interval'): + return cls._adapt_from_generic_interval(self, **kw) + else: + return self.__class__( + native=self.native, + second_precision=self.second_precision, + day_precision=self.day_precision, + **kw) + + @property + def python_type(self): + return dt.timedelta + + def bind_processor(self, dialect): + impl_processor = self.impl.bind_processor(dialect) + epoch = self.epoch + if impl_processor: + def process(value): + if value is not None: + value = epoch + value + return impl_processor(value) + else: + def process(value): + if value is not None: + value = epoch + value + return value + return process + + def result_processor(self, dialect, coltype): + impl_processor = self.impl.result_processor(dialect, coltype) + epoch = self.epoch + if impl_processor: + def process(value): + value = impl_processor(value) + if value is None: + return None + return value - epoch + else: + def process(value): + if value is None: + return None + return value - epoch + return process + + @util.memoized_property + def _expression_adaptations(self): + return { + operators.add: { + Date: DateTime, + Interval: self.__class__, + DateTime: DateTime, + Time: Time, + }, + operators.sub: { + Interval: self.__class__ + }, + operators.mul: { + Numeric: self.__class__ + }, + operators.truediv: { + Numeric: self.__class__ + }, + operators.div: { + Numeric: self.__class__ + } + } + + @property + def _type_affinity(self): + return Interval + + def coerce_compared_value(self, op, value): + """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" + + return self.impl.coerce_compared_value(op, value) + + +class JSON(Indexable, TypeEngine): + """Represent a SQL JSON type. + + .. note:: :class:`.types.JSON` is provided as a facade for vendor-specific + JSON types. Since it supports JSON SQL operations, it only + works on backends that have an actual JSON type, currently + PostgreSQL as well as certain versions of MySQL. + + :class:`.types.JSON` is part of the Core in support of the growing + popularity of native JSON datatypes. + + The :class:`.types.JSON` type stores arbitrary JSON format data, e.g.:: + + data_table = Table('data_table', metadata, + Column('id', Integer, primary_key=True), + Column('data', JSON) + ) + + with engine.connect() as conn: + conn.execute( + data_table.insert(), + data = {"key1": "value1", "key2": "value2"} + ) + + The base :class:`.types.JSON` provides these two operations: + + * Keyed index operations:: + + data_table.c.data['some key'] + + * Integer index operations:: + + data_table.c.data[3] + + * Path index operations:: + + data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')] + + Additional operations are available from the dialect-specific versions + of :class:`.types.JSON`, such as :class:`.postgresql.JSON` and + :class:`.postgresql.JSONB`, each of which offer more operators than + just the basic type. + + Index operations return an expression object whose type defaults to + :class:`.JSON` by default, so that further JSON-oriented instructions + may be called upon the result type. Note that there are backend-specific + idiosyncracies here, including that the Postgresql database does not generally + compare a "json" to a "json" structure without type casts. These idiosyncracies + can be accommodated in a backend-neutral way by by making explicit use + of the :func:`.cast` and :func:`.type_coerce` constructs. + Comparison of specific index elements of a :class:`.JSON` object + to other objects work best if the **left hand side is CAST to a string** + and the **right hand side is rendered as a json string**; a future SQLAlchemy + feature such as a generic "astext" modifier may simplify this at some point: + + * **Compare an element of a JSON structure to a string**:: + + from sqlalchemy import cast, type_coerce + from sqlalchemy import String, JSON + + cast( + data_table.c.data['some_key'], String + ) == '"some_value"' + + cast( + data_table.c.data['some_key'], String + ) == type_coerce("some_value", JSON) + + * **Compare an element of a JSON structure to an integer**:: + + from sqlalchemy import cast, type_coerce + from sqlalchemy import String, JSON + + cast(data_table.c.data['some_key'], String) == '55' + + cast( + data_table.c.data['some_key'], String + ) == type_coerce(55, JSON) + + * **Compare an element of a JSON structure to some other JSON structure** - note + that Python dictionaries are typically not ordered so care should be taken + here to assert that the JSON structures are identical:: + + from sqlalchemy import cast, type_coerce + from sqlalchemy import String, JSON + import json + + cast( + data_table.c.data['some_key'], String + ) == json.dumps({"foo": "bar"}) + + cast( + data_table.c.data['some_key'], String + ) == type_coerce({"foo": "bar"}, JSON) + + The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not + detect in-place mutations to the structure. In order to detect these, the + :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will + allow "in-place" changes to the datastructure to produce events which + will be detected by the unit of work. See the example at :class:`.HSTORE` + for a simple example involving a dictionary. + + When working with NULL values, the :class:`.JSON` type recommends the + use of two specific constants in order to differentiate between a column + that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string + of ``"null"``. To insert or select against a value that is SQL NULL, + use the constant :func:`.null`:: + + from sqlalchemy import null + conn.execute(table.insert(), json_value=null()) + + To insert or select against a value that is JSON ``"null"``, use the + constant :attr:`.JSON.NULL`:: + + conn.execute(table.insert(), json_value=JSON.NULL) + + The :class:`.JSON` type supports a flag + :paramref:`.JSON.none_as_null` which when set to True will result + in the Python constant ``None`` evaluating to the value of SQL + NULL, and when set to False results in the Python constant + ``None`` evaluating to the value of JSON ``"null"``. The Python + value ``None`` may be used in conjunction with either + :attr:`.JSON.NULL` and :func:`.null` in order to indicate NULL + values, but care must be taken as to the value of the + :paramref:`.JSON.none_as_null` in these cases. + + .. seealso:: + + :class:`.postgresql.JSON` + + :class:`.postgresql.JSONB` + + :class:`.mysql.JSON` + + .. versionadded:: 1.1 + + + """ + __visit_name__ = 'JSON' + + hashable = False + NULL = util.symbol('JSON_NULL') + """Describe the json value of NULL. + + This value is used to force the JSON value of ``"null"`` to be + used as the value. A value of Python ``None`` will be recognized + either as SQL NULL or JSON ``"null"``, based on the setting + of the :paramref:`.JSON.none_as_null` flag; the :attr:`.JSON.NULL` + constant can be used to always resolve to JSON ``"null"`` regardless + of this setting. This is in contrast to the :func:`.sql.null` construct, + which always resolves to SQL NULL. E.g.:: + + from sqlalchemy import null + from sqlalchemy.dialects.postgresql import JSON + + obj1 = MyObject(json_value=null()) # will *always* insert SQL NULL + obj2 = MyObject(json_value=JSON.NULL) # will *always* insert JSON string "null" + + session.add_all([obj1, obj2]) + session.commit() + + """ + + def __init__(self, none_as_null=False): + """Construct a :class:`.types.JSON` type. + + :param none_as_null=False: if True, persist the value ``None`` as a + SQL NULL value, not the JSON encoding of ``null``. Note that + when this flag is False, the :func:`.null` construct can still + be used to persist a NULL value:: + + from sqlalchemy import null + conn.execute(table.insert(), data=null()) + + .. note:: + + :paramref:`.JSON.none_as_null` does **not** apply to the + values passed to :paramref:`.Column.default` and + :paramref:`.Column.server_default`; a value of ``None`` passed for + these parameters means "no default present". + + .. seealso:: + + :attr:`.types.JSON.NULL` + + """ + self.none_as_null = none_as_null + + class JSONElementType(TypeEngine): + """common function for index / path elements in a JSON expression.""" + + _integer = Integer() + _string = String() + + def string_bind_processor(self, dialect): + return self._string._cached_bind_processor(dialect) + + def string_literal_processor(self, dialect): + return self._string._cached_literal_processor(dialect) + + def bind_processor(self, dialect): + int_processor = self._integer._cached_bind_processor(dialect) + string_processor = self.string_bind_processor(dialect) + + def process(value): + if int_processor and isinstance(value, int): + value = int_processor(value) + elif string_processor and isinstance(value, util.string_types): + value = string_processor(value) + return value + + return process + + def literal_processor(self, dialect): + int_processor = self._integer._cached_literal_processor(dialect) + string_processor = self.string_literal_processor(dialect) + + def process(value): + if int_processor and isinstance(value, int): + value = int_processor(value) + elif string_processor and isinstance(value, util.string_types): + value = string_processor(value) + return value + + return process + + class JSONIndexType(JSONElementType): + """Placeholder for the datatype of a JSON index value. + + This allows execution-time processing of JSON index values + for special syntaxes. + + """ + + class JSONPathType(JSONElementType): + """Placeholder type for JSON path operations. + + This allows execution-time processing of a path-based + index value into a specific SQL syntax. + + """ + + class Comparator(Indexable.Comparator, Concatenable.Comparator): + """Define comparison operations for :class:`.types.JSON`.""" + + @util.dependencies('sqlalchemy.sql.default_comparator') + def _setup_getitem(self, default_comparator, index): + if not isinstance(index, util.string_types) and \ + isinstance(index, collections.Sequence): + index = default_comparator._check_literal( + self.expr, operators.json_path_getitem_op, + index, bindparam_type=JSON.JSONPathType + ) + + operator = operators.json_path_getitem_op + else: + index = default_comparator._check_literal( + self.expr, operators.json_getitem_op, + index, bindparam_type=JSON.JSONIndexType + ) + operator = operators.json_getitem_op + + return operator, index, self.type + + comparator_factory = Comparator + + @property + def python_type(self): + return dict + + @property + def should_evaluate_none(self): + return not self.none_as_null + + @util.memoized_property + def _str_impl(self): + return String(convert_unicode=True) + + def bind_processor(self, dialect): + string_process = self._str_impl.bind_processor(dialect) + + json_serializer = dialect._json_serializer or json.dumps + + def process(value): + if value is self.NULL: + value = None + elif isinstance(value, elements.Null) or ( + value is None and self.none_as_null + ): + return None + + serialized = json_serializer(value) + if string_process: + serialized = string_process(serialized) + return serialized + + return process + + def result_processor(self, dialect, coltype): + string_process = self._str_impl.result_processor(dialect, coltype) + json_deserializer = dialect._json_deserializer or json.loads + + def process(value): + if value is None: + return None + if string_process: + value = string_process(value) + return json_deserializer(value) + return process + + +class ARRAY(Indexable, Concatenable, TypeEngine): + """Represent a SQL Array type. + + .. note:: This type serves as the basis for all ARRAY operations. + However, currently **only the PostgreSQL backend has support + for SQL arrays in SQLAlchemy**. It is recommended to use the + :class:`.postgresql.ARRAY` type directly when using ARRAY types + with PostgreSQL, as it provides additional operators specific + to that backend. + + :class:`.types.ARRAY` is part of the Core in support of various SQL standard + functions such as :class:`.array_agg` which explicitly involve arrays; + however, with the exception of the PostgreSQL backend and possibly + some third-party dialects, no other SQLAlchemy built-in dialect has + support for this type. + + An :class:`.types.ARRAY` type is constructed given the "type" + of element:: + + mytable = Table("mytable", metadata, + Column("data", ARRAY(Integer)) + ) + + The above type represents an N-dimensional array, + meaning a supporting backend such as PostgreSQL will interpret values + with any number of dimensions automatically. To produce an INSERT + construct that passes in a 1-dimensional array of integers:: + + connection.execute( + mytable.insert(), + data=[1,2,3] + ) + + The :class:`.types.ARRAY` type can be constructed given a fixed number + of dimensions:: + + mytable = Table("mytable", metadata, + Column("data", ARRAY(Integer, dimensions=2)) + ) + + Sending a number of dimensions is optional, but recommended if the + datatype is to represent arrays of more than one dimension. This number + is used: + + * When emitting the type declaration itself to the database, e.g. + ``INTEGER[][]`` + + * When translating Python values to database values, and vice versa, e.g. + an ARRAY of :class:`.Unicode` objects uses this number to efficiently + access the string values inside of array structures without resorting + to per-row type inspection + + * When used with the Python ``getitem`` accessor, the number of dimensions + serves to define the kind of type that the ``[]`` operator should + return, e.g. for an ARRAY of INTEGER with two dimensions:: + + >>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1) + >>> expr = expr[6] # returns Integer + + For 1-dimensional arrays, an :class:`.types.ARRAY` instance with no + dimension parameter will generally assume single-dimensional behaviors. + + SQL expressions of type :class:`.types.ARRAY` have support for "index" and + "slice" behavior. The Python ``[]`` operator works normally here, given + integer indexes or slices. Arrays default to 1-based indexing. + The operator produces binary expression + constructs which will produce the appropriate SQL, both for + SELECT statements:: + + select([mytable.c.data[5], mytable.c.data[2:7]]) + + as well as UPDATE statements when the :meth:`.Update.values` method + is used:: + + mytable.update().values({ + mytable.c.data[5]: 7, + mytable.c.data[2:7]: [1, 2, 3] + }) + + The :class:`.types.ARRAY` type also provides for the operators + :meth:`.types.ARRAY.Comparator.any` and :meth:`.types.ARRAY.Comparator.all`. + The PostgreSQL-specific version of :class:`.types.ARRAY` also provides additional + operators. + + .. versionadded:: 1.1.0 + + .. seealso:: + + :class:`.postgresql.ARRAY` + + """ + __visit_name__ = 'ARRAY' + + zero_indexes = False + """if True, Python zero-based indexes should be interpreted as one-based + on the SQL expression side.""" + + class Comparator(Indexable.Comparator, Concatenable.Comparator): + + """Define comparison operations for :class:`.types.ARRAY`. + + More operators are available on the dialect-specific form + of this type. See :class:`.postgresql.ARRAY.Comparator`. + + """ + + def _setup_getitem(self, index): + if isinstance(index, slice): + return_type = self.type + if self.type.zero_indexes: + index = slice( + index.start + 1, + index.stop + 1, + index.step + ) + index = Slice( + _literal_as_binds( + index.start, name=self.expr.key, + type_=type_api.INTEGERTYPE), + _literal_as_binds( + index.stop, name=self.expr.key, + type_=type_api.INTEGERTYPE), + _literal_as_binds( + index.step, name=self.expr.key, + type_=type_api.INTEGERTYPE) + ) + else: + if self.type.zero_indexes: + index += 1 + if self.type.dimensions is None or self.type.dimensions == 1: + return_type = self.type.item_type + else: + adapt_kw = {'dimensions': self.type.dimensions - 1} + return_type = self.type.adapt( + self.type.__class__, **adapt_kw) + + return operators.getitem, index, return_type + + @util.dependencies("sqlalchemy.sql.elements") + def any(self, elements, other, operator=None): + """Return ``other operator ANY (array)`` clause. + + Argument places are switched, because ANY requires array + expression to be on the right hand-side. + + E.g.:: + + from sqlalchemy.sql import operators + + conn.execute( + select([table.c.data]).where( + table.c.data.any(7, operator=operators.lt) + ) + ) + + :param other: expression to be compared + :param operator: an operator object from the + :mod:`sqlalchemy.sql.operators` + package, defaults to :func:`.operators.eq`. + + .. seealso:: + + :func:`.sql.expression.any_` + + :meth:`.types.ARRAY.Comparator.all` + + """ + operator = operator if operator else operators.eq + return operator( + elements._literal_as_binds(other), + elements.CollectionAggregate._create_any(self.expr) + ) + + @util.dependencies("sqlalchemy.sql.elements") + def all(self, elements, other, operator=None): + """Return ``other operator ALL (array)`` clause. + + Argument places are switched, because ALL requires array + expression to be on the right hand-side. + + E.g.:: + + from sqlalchemy.sql import operators + + conn.execute( + select([table.c.data]).where( + table.c.data.all(7, operator=operators.lt) + ) + ) + + :param other: expression to be compared + :param operator: an operator object from the + :mod:`sqlalchemy.sql.operators` + package, defaults to :func:`.operators.eq`. + + .. seealso:: + + :func:`.sql.expression.all_` + + :meth:`.types.ARRAY.Comparator.any` + + """ + operator = operator if operator else operators.eq + return operator( + elements._literal_as_binds(other), + elements.CollectionAggregate._create_all(self.expr) + ) + + comparator_factory = Comparator + + def __init__(self, item_type, as_tuple=False, dimensions=None, + zero_indexes=False): + """Construct an :class:`.types.ARRAY`. + + E.g.:: + + Column('myarray', ARRAY(Integer)) + + Arguments are: + + :param item_type: The data type of items of this array. Note that + dimensionality is irrelevant here, so multi-dimensional arrays like + ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as + ``ARRAY(ARRAY(Integer))`` or such. + + :param as_tuple=False: Specify whether return results + should be converted to tuples from lists. This parameter is + not generally needed as a Python list corresponds well + to a SQL array. + + :param dimensions: if non-None, the ARRAY will assume a fixed + number of dimensions. This impacts how the array is declared + on the database, how it goes about interpreting Python and + result values, as well as how expression behavior in conjunction + with the "getitem" operator works. See the description at + :class:`.types.ARRAY` for additional detail. + + :param zero_indexes=False: when True, index values will be converted + between Python zero-based and SQL one-based indexes, e.g. + a value of one will be added to all index values before passing + to the database. + + """ + if isinstance(item_type, ARRAY): + raise ValueError("Do not nest ARRAY types; ARRAY(basetype) " + "handles multi-dimensional arrays of basetype") + if isinstance(item_type, type): + item_type = item_type() + self.item_type = item_type + self.as_tuple = as_tuple + self.dimensions = dimensions + self.zero_indexes = zero_indexes + + @property + def hashable(self): + return self.as_tuple + + @property + def python_type(self): + return list + + def compare_values(self, x, y): + return x == y + + +class REAL(Float): + + """The SQL REAL type.""" + + __visit_name__ = 'REAL' + + +class FLOAT(Float): + + """The SQL FLOAT type.""" + + __visit_name__ = 'FLOAT' + + +class NUMERIC(Numeric): + + """The SQL NUMERIC type.""" + + __visit_name__ = 'NUMERIC' + + +class DECIMAL(Numeric): + + """The SQL DECIMAL type.""" + + __visit_name__ = 'DECIMAL' + + +class INTEGER(Integer): + + """The SQL INT or INTEGER type.""" + + __visit_name__ = 'INTEGER' +INT = INTEGER + + +class SMALLINT(SmallInteger): + + """The SQL SMALLINT type.""" + + __visit_name__ = 'SMALLINT' + + +class BIGINT(BigInteger): + + """The SQL BIGINT type.""" + + __visit_name__ = 'BIGINT' + + +class TIMESTAMP(DateTime): + + """The SQL TIMESTAMP type. + + :class:`~.types.TIMESTAMP` datatypes have support for timezone + storage on some backends, such as PostgreSQL and Oracle. Use the + :paramref:`~types.TIMESTAMP.timezone` argument in order to enable + "TIMESTAMP WITH TIMEZONE" for these backends. + + """ + + __visit_name__ = 'TIMESTAMP' + + def __init__(self, timezone=False): + """Construct a new :class:`.TIMESTAMP`. + + :param timezone: boolean. Indicates that the TIMESTAMP type should + enable timezone support, if available on the target database. + On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE". + If the target database does not support timezones, this flag is + ignored. + + + """ + super(TIMESTAMP, self).__init__(timezone=timezone) + + def get_dbapi_type(self, dbapi): + return dbapi.TIMESTAMP + + +class DATETIME(DateTime): + + """The SQL DATETIME type.""" + + __visit_name__ = 'DATETIME' + + +class DATE(Date): + + """The SQL DATE type.""" + + __visit_name__ = 'DATE' + + +class TIME(Time): + + """The SQL TIME type.""" + + __visit_name__ = 'TIME' + + +class TEXT(Text): + + """The SQL TEXT type.""" + + __visit_name__ = 'TEXT' + + +class CLOB(Text): + + """The CLOB type. + + This type is found in Oracle and Informix. + """ + + __visit_name__ = 'CLOB' + + +class VARCHAR(String): + + """The SQL VARCHAR type.""" + + __visit_name__ = 'VARCHAR' + + +class NVARCHAR(Unicode): + + """The SQL NVARCHAR type.""" + + __visit_name__ = 'NVARCHAR' + + +class CHAR(String): + + """The SQL CHAR type.""" + + __visit_name__ = 'CHAR' + + +class NCHAR(Unicode): + + """The SQL NCHAR type.""" + + __visit_name__ = 'NCHAR' + + +class BLOB(LargeBinary): + + """The SQL BLOB type.""" + + __visit_name__ = 'BLOB' + + +class BINARY(_Binary): + + """The SQL BINARY type.""" + + __visit_name__ = 'BINARY' + + +class VARBINARY(_Binary): + + """The SQL VARBINARY type.""" + + __visit_name__ = 'VARBINARY' + + +class BOOLEAN(Boolean): + + """The SQL BOOLEAN type.""" + + __visit_name__ = 'BOOLEAN' + + +class NullType(TypeEngine): + + """An unknown type. + + :class:`.NullType` is used as a default type for those cases where + a type cannot be determined, including: + + * During table reflection, when the type of a column is not recognized + by the :class:`.Dialect` + * When constructing SQL expressions using plain Python objects of + unknown types (e.g. ``somecolumn == my_special_object``) + * When a new :class:`.Column` is created, and the given type is passed + as ``None`` or is not passed at all. + + The :class:`.NullType` can be used within SQL expression invocation + without issue, it just has no behavior either at the expression + construction level or at the bind-parameter/result processing level. + :class:`.NullType` will result in a :exc:`.CompileError` if the compiler + is asked to render the type itself, such as if it is used in a + :func:`.cast` operation or within a schema creation operation such as that + invoked by :meth:`.MetaData.create_all` or the :class:`.CreateTable` + construct. + + """ + __visit_name__ = 'null' + + _isnull = True + + hashable = False + + def literal_processor(self, dialect): + def process(value): + return "NULL" + return process + + class Comparator(TypeEngine.Comparator): + + def _adapt_expression(self, op, other_comparator): + if isinstance(other_comparator, NullType.Comparator) or \ + not operators.is_commutative(op): + return op, self.expr.type + else: + return other_comparator._adapt_expression(op, self) + comparator_factory = Comparator + + +class MatchType(Boolean): + """Refers to the return type of the MATCH operator. + + As the :meth:`.ColumnOperators.match` is probably the most open-ended + operator in generic SQLAlchemy Core, we can't assume the return type + at SQL evaluation time, as MySQL returns a floating point, not a boolean, + and other backends might do something different. So this type + acts as a placeholder, currently subclassing :class:`.Boolean`. + The type allows dialects to inject result-processing functionality + if needed, and on MySQL will return floating-point values. + + .. versionadded:: 1.0.0 + + """ + +NULLTYPE = NullType() +BOOLEANTYPE = Boolean() +STRINGTYPE = String() +INTEGERTYPE = Integer() +MATCHTYPE = MatchType() + +_type_map = { + int: Integer(), + float: Numeric(), + bool: BOOLEANTYPE, + decimal.Decimal: Numeric(), + dt.date: Date(), + dt.datetime: DateTime(), + dt.time: Time(), + dt.timedelta: Interval(), + util.NoneType: NULLTYPE +} + +if util.py3k: + _type_map[bytes] = LargeBinary() + _type_map[str] = Unicode() +else: + _type_map[unicode] = Unicode() + _type_map[str] = String() + +_type_map_get = _type_map.get + + +def _resolve_value_to_type(value): + _result_type = _type_map_get(type(value), False) + if _result_type is False: + # use inspect() to detect SQLAlchemy built-in + # objects. + insp = inspection.inspect(value, False) + if ( + insp is not None and + # foil mock.Mock() and other impostors by ensuring + # the inspection target itself self-inspects + insp.__class__ in inspection._registrars + ): + raise exc.ArgumentError( + "Object %r is not legal as a SQL literal value" % value) + return NULLTYPE + else: + return _result_type + +# back-assign to type_api +from . import type_api +type_api.BOOLEANTYPE = BOOLEANTYPE +type_api.STRINGTYPE = STRINGTYPE +type_api.INTEGERTYPE = INTEGERTYPE +type_api.NULLTYPE = NULLTYPE +type_api.MATCHTYPE = MATCHTYPE +type_api.INDEXABLE = Indexable +type_api._resolve_value_to_type = _resolve_value_to_type +TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE diff --git a/app/lib/sqlalchemy/sql/type_api.py b/app/lib/sqlalchemy/sql/type_api.py new file mode 100644 index 0000000..4b561a7 --- /dev/null +++ b/app/lib/sqlalchemy/sql/type_api.py @@ -0,0 +1,1307 @@ +# sql/types_api.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Base types API. + +""" + + +from .. import exc, util +from . import operators +from .visitors import Visitable, VisitableType +from .base import SchemaEventTarget + +# these are back-assigned by sqltypes. +BOOLEANTYPE = None +INTEGERTYPE = None +NULLTYPE = None +STRINGTYPE = None +MATCHTYPE = None +INDEXABLE = None +_resolve_value_to_type = None + + +class TypeEngine(Visitable): + """The ultimate base class for all SQL datatypes. + + Common subclasses of :class:`.TypeEngine` include + :class:`.String`, :class:`.Integer`, and :class:`.Boolean`. + + For an overview of the SQLAlchemy typing system, see + :ref:`types_toplevel`. + + .. seealso:: + + :ref:`types_toplevel` + + """ + + _sqla_type = True + _isnull = False + + class Comparator(operators.ColumnOperators): + """Base class for custom comparison operations defined at the + type level. See :attr:`.TypeEngine.comparator_factory`. + + + """ + __slots__ = 'expr', 'type' + + default_comparator = None + + def __init__(self, expr): + self.expr = expr + self.type = expr.type + + @util.dependencies('sqlalchemy.sql.default_comparator') + def operate(self, default_comparator, op, *other, **kwargs): + o = default_comparator.operator_lookup[op.__name__] + return o[0](self.expr, op, *(other + o[1:]), **kwargs) + + @util.dependencies('sqlalchemy.sql.default_comparator') + def reverse_operate(self, default_comparator, op, other, **kwargs): + o = default_comparator.operator_lookup[op.__name__] + return o[0](self.expr, op, other, + reverse=True, *o[1:], **kwargs) + + def _adapt_expression(self, op, other_comparator): + """evaluate the return type of , + and apply any adaptations to the given operator. + + This method determines the type of a resulting binary expression + given two source types and an operator. For example, two + :class:`.Column` objects, both of the type :class:`.Integer`, will + produce a :class:`.BinaryExpression` that also has the type + :class:`.Integer` when compared via the addition (``+``) operator. + However, using the addition operator with an :class:`.Integer` + and a :class:`.Date` object will produce a :class:`.Date`, assuming + "days delta" behavior by the database (in reality, most databases + other than PostgreSQL don't accept this particular operation). + + The method returns a tuple of the form , . + The resulting operator and type will be those applied to the + resulting :class:`.BinaryExpression` as the final operator and the + right-hand side of the expression. + + Note that only a subset of operators make usage of + :meth:`._adapt_expression`, + including math operators and user-defined operators, but not + boolean comparison or special SQL keywords like MATCH or BETWEEN. + + """ + return op, self.type + + def __reduce__(self): + return _reconstitute_comparator, (self.expr, ) + + hashable = True + """Flag, if False, means values from this type aren't hashable. + + Used by the ORM when uniquing result lists. + + """ + + comparator_factory = Comparator + """A :class:`.TypeEngine.Comparator` class which will apply + to operations performed by owning :class:`.ColumnElement` objects. + + The :attr:`.comparator_factory` attribute is a hook consulted by + the core expression system when column and SQL expression operations + are performed. When a :class:`.TypeEngine.Comparator` class is + associated with this attribute, it allows custom re-definition of + all existing operators, as well as definition of new operators. + Existing operators include those provided by Python operator overloading + such as :meth:`.operators.ColumnOperators.__add__` and + :meth:`.operators.ColumnOperators.__eq__`, + those provided as standard + attributes of :class:`.operators.ColumnOperators` such as + :meth:`.operators.ColumnOperators.like` + and :meth:`.operators.ColumnOperators.in_`. + + Rudimentary usage of this hook is allowed through simple subclassing + of existing types, or alternatively by using :class:`.TypeDecorator`. + See the documentation section :ref:`types_operators` for examples. + + .. versionadded:: 0.8 The expression system was enhanced to support + customization of operators on a per-type level. + + """ + + should_evaluate_none = False + """If True, the Python constant ``None`` is considered to be handled + explicitly by this type. + + The ORM uses this flag to indicate that a positive value of ``None`` + is passed to the column in an INSERT statement, rather than omitting + the column from the INSERT statement which has the effect of firing + off column-level defaults. It also allows types which have special + behavior for Python None, such as a JSON type, to indicate that + they'd like to handle the None value explicitly. + + To set this flag on an existing type, use the + :meth:`.TypeEngine.evaluates_none` method. + + .. seealso:: + + :meth:`.TypeEngine.evaluates_none` + + .. versionadded:: 1.1 + + + """ + + def evaluates_none(self): + """Return a copy of this type which has the :attr:`.should_evaluate_none` + flag set to True. + + E.g.:: + + Table( + 'some_table', metadata, + Column( + String(50).evaluates_none(), + nullable=True, + server_default='no value') + ) + + The ORM uses this flag to indicate that a positive value of ``None`` + is passed to the column in an INSERT statement, rather than omitting + the column from the INSERT statement which has the effect of firing + off column-level defaults. It also allows for types which have + special behavior associated with the Python None value to indicate + that the value doesn't necessarily translate into SQL NULL; a + prime example of this is a JSON type which may wish to persist the + JSON value ``'null'``. + + In all cases, the actual NULL SQL value can be always be + persisted in any column by using + the :obj:`~.expression.null` SQL construct in an INSERT statement + or associated with an ORM-mapped attribute. + + .. note:: + + The "evaulates none" flag does **not** apply to a value + of ``None`` passed to :paramref:`.Column.default` or + :paramref:`.Column.server_default`; in these cases, ``None`` + still means "no default". + + .. versionadded:: 1.1 + + .. seealso:: + + :ref:`session_forcing_null` - in the ORM documentation + + :paramref:`.postgresql.JSON.none_as_null` - PostgreSQL JSON + interaction with this flag. + + :attr:`.TypeEngine.should_evaluate_none` - class-level flag + + """ + typ = self.copy() + typ.should_evaluate_none = True + return typ + + def copy(self, **kw): + return self.adapt(self.__class__) + + def compare_against_backend(self, dialect, conn_type): + """Compare this type against the given backend type. + + This function is currently not implemented for SQLAlchemy + types, and for all built in types will return ``None``. However, + it can be implemented by a user-defined type + where it can be consumed by schema comparison tools such as + Alembic autogenerate. + + A future release of SQLAlchemy will potentially impement this method + for builtin types as well. + + The function should return True if this type is equivalent to the + given type; the type is typically reflected from the database + so should be database specific. The dialect in use is also + passed. It can also return False to assert that the type is + not equivalent. + + :param dialect: a :class:`.Dialect` that is involved in the comparison. + + :param conn_type: the type object reflected from the backend. + + .. versionadded:: 1.0.3 + + """ + return None + + def copy_value(self, value): + return value + + def literal_processor(self, dialect): + """Return a conversion function for processing literal values that are + to be rendered directly without using binds. + + This function is used when the compiler makes use of the + "literal_binds" flag, typically used in DDL generation as well + as in certain scenarios where backends don't accept bound parameters. + + .. versionadded:: 0.9.0 + + """ + return None + + def bind_processor(self, dialect): + """Return a conversion function for processing bind values. + + Returns a callable which will receive a bind parameter value + as the sole positional argument and will return a value to + send to the DB-API. + + If processing is not necessary, the method should return ``None``. + + :param dialect: Dialect instance in use. + + """ + return None + + def result_processor(self, dialect, coltype): + """Return a conversion function for processing result row values. + + Returns a callable which will receive a result row column + value as the sole positional argument and will return a value + to return to the user. + + If processing is not necessary, the method should return ``None``. + + :param dialect: Dialect instance in use. + + :param coltype: DBAPI coltype argument received in cursor.description. + + """ + return None + + def column_expression(self, colexpr): + """Given a SELECT column expression, return a wrapping SQL expression. + + This is typically a SQL function that wraps a column expression + as rendered in the columns clause of a SELECT statement. + It is used for special data types that require + columns to be wrapped in some special database function in order + to coerce the value before being sent back to the application. + It is the SQL analogue of the :meth:`.TypeEngine.result_processor` + method. + + The method is evaluated at statement compile time, as opposed + to statement construction time. + + See also: + + :ref:`types_sql_value_processing` + + """ + + return None + + @util.memoized_property + def _has_column_expression(self): + """memoized boolean, check if column_expression is implemented. + + Allows the method to be skipped for the vast majority of expression + types that don't use this feature. + + """ + + return self.__class__.column_expression.__code__ \ + is not TypeEngine.column_expression.__code__ + + def bind_expression(self, bindvalue): + """"Given a bind value (i.e. a :class:`.BindParameter` instance), + return a SQL expression in its place. + + This is typically a SQL function that wraps the existing bound + parameter within the statement. It is used for special data types + that require literals being wrapped in some special database function + in order to coerce an application-level value into a database-specific + format. It is the SQL analogue of the + :meth:`.TypeEngine.bind_processor` method. + + The method is evaluated at statement compile time, as opposed + to statement construction time. + + Note that this method, when implemented, should always return + the exact same structure, without any conditional logic, as it + may be used in an executemany() call against an arbitrary number + of bound parameter sets. + + See also: + + :ref:`types_sql_value_processing` + + """ + return None + + @util.memoized_property + def _has_bind_expression(self): + """memoized boolean, check if bind_expression is implemented. + + Allows the method to be skipped for the vast majority of expression + types that don't use this feature. + + """ + + return self.__class__.bind_expression.__code__ \ + is not TypeEngine.bind_expression.__code__ + + def compare_values(self, x, y): + """Compare two values for equality.""" + + return x == y + + def get_dbapi_type(self, dbapi): + """Return the corresponding type object from the underlying DB-API, if + any. + + This can be useful for calling ``setinputsizes()``, for example. + + """ + return None + + @property + def python_type(self): + """Return the Python type object expected to be returned + by instances of this type, if known. + + Basically, for those types which enforce a return type, + or are known across the board to do such for all common + DBAPIs (like ``int`` for example), will return that type. + + If a return type is not defined, raises + ``NotImplementedError``. + + Note that any type also accommodates NULL in SQL which + means you can also get back ``None`` from any type + in practice. + + """ + raise NotImplementedError() + + def with_variant(self, type_, dialect_name): + """Produce a new type object that will utilize the given + type when applied to the dialect of the given name. + + e.g.:: + + from sqlalchemy.types import String + from sqlalchemy.dialects import mysql + + s = String() + + s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql') + + The construction of :meth:`.TypeEngine.with_variant` is always + from the "fallback" type to that which is dialect specific. + The returned type is an instance of :class:`.Variant`, which + itself provides a :meth:`.Variant.with_variant` + that can be called repeatedly. + + :param type_: a :class:`.TypeEngine` that will be selected + as a variant from the originating type, when a dialect + of the given name is in use. + :param dialect_name: base name of the dialect which uses + this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) + + .. versionadded:: 0.7.2 + + """ + return Variant(self, {dialect_name: to_instance(type_)}) + + @util.memoized_property + def _type_affinity(self): + """Return a rudimental 'affinity' value expressing the general class + of type.""" + + typ = None + for t in self.__class__.__mro__: + if t in (TypeEngine, UserDefinedType): + return typ + elif issubclass(t, (TypeEngine, UserDefinedType)): + typ = t + else: + return self.__class__ + + def dialect_impl(self, dialect): + """Return a dialect-specific implementation for this + :class:`.TypeEngine`. + + """ + try: + return dialect._type_memos[self]['impl'] + except KeyError: + return self._dialect_info(dialect)['impl'] + + def _cached_literal_processor(self, dialect): + """Return a dialect-specific literal processor for this type.""" + try: + return dialect._type_memos[self]['literal'] + except KeyError: + d = self._dialect_info(dialect) + d['literal'] = lp = d['impl'].literal_processor(dialect) + return lp + + def _cached_bind_processor(self, dialect): + """Return a dialect-specific bind processor for this type.""" + + try: + return dialect._type_memos[self]['bind'] + except KeyError: + d = self._dialect_info(dialect) + d['bind'] = bp = d['impl'].bind_processor(dialect) + return bp + + def _cached_result_processor(self, dialect, coltype): + """Return a dialect-specific result processor for this type.""" + + try: + return dialect._type_memos[self][coltype] + except KeyError: + d = self._dialect_info(dialect) + # key assumption: DBAPI type codes are + # constants. Else this dictionary would + # grow unbounded. + d[coltype] = rp = d['impl'].result_processor(dialect, coltype) + return rp + + def _dialect_info(self, dialect): + """Return a dialect-specific registry which + caches a dialect-specific implementation, bind processing + function, and one or more result processing functions.""" + + if self in dialect._type_memos: + return dialect._type_memos[self] + else: + impl = self._gen_dialect_impl(dialect) + if impl is self: + impl = self.adapt(type(self)) + # this can't be self, else we create a cycle + assert impl is not self + dialect._type_memos[self] = d = {'impl': impl} + return d + + def _gen_dialect_impl(self, dialect): + return dialect.type_descriptor(self) + + def adapt(self, cls, **kw): + """Produce an "adapted" form of this type, given an "impl" class + to work with. + + This method is used internally to associate generic + types with "implementation" types that are specific to a particular + dialect. + """ + return util.constructor_copy(self, cls, **kw) + + def coerce_compared_value(self, op, value): + """Suggest a type for a 'coerced' Python value in an expression. + + Given an operator and value, gives the type a chance + to return a type which the value should be coerced into. + + The default behavior here is conservative; if the right-hand + side is already coerced into a SQL type based on its + Python type, it is usually left alone. + + End-user functionality extension here should generally be via + :class:`.TypeDecorator`, which provides more liberal behavior in that + it defaults to coercing the other side of the expression into this + type, thus applying special Python conversions above and beyond those + needed by the DBAPI to both ides. It also provides the public method + :meth:`.TypeDecorator.coerce_compared_value` which is intended for + end-user customization of this behavior. + + """ + _coerced_type = _resolve_value_to_type(value) + if _coerced_type is NULLTYPE or _coerced_type._type_affinity \ + is self._type_affinity: + return self + else: + return _coerced_type + + def _compare_type_affinity(self, other): + return self._type_affinity is other._type_affinity + + def compile(self, dialect=None): + """Produce a string-compiled form of this :class:`.TypeEngine`. + + When called with no arguments, uses a "default" dialect + to produce a string result. + + :param dialect: a :class:`.Dialect` instance. + + """ + # arg, return value is inconsistent with + # ClauseElement.compile()....this is a mistake. + + if not dialect: + dialect = self._default_dialect() + + return dialect.type_compiler.process(self) + + @util.dependencies("sqlalchemy.engine.default") + def _default_dialect(self, default): + if self.__class__.__module__.startswith("sqlalchemy.dialects"): + tokens = self.__class__.__module__.split(".")[0:3] + mod = ".".join(tokens) + return getattr(__import__(mod).dialects, tokens[-1]).dialect() + else: + return default.DefaultDialect() + + def __str__(self): + if util.py2k: + return unicode(self.compile()).\ + encode('ascii', 'backslashreplace') + else: + return str(self.compile()) + + def __repr__(self): + return util.generic_repr(self) + + +class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType): + pass + + +class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)): + """Base for user defined types. + + This should be the base of new types. Note that + for most cases, :class:`.TypeDecorator` is probably + more appropriate:: + + import sqlalchemy.types as types + + class MyType(types.UserDefinedType): + def __init__(self, precision = 8): + self.precision = precision + + def get_col_spec(self, **kw): + return "MYTYPE(%s)" % self.precision + + def bind_processor(self, dialect): + def process(value): + return value + return process + + def result_processor(self, dialect, coltype): + def process(value): + return value + return process + + Once the type is made, it's immediately usable:: + + table = Table('foo', meta, + Column('id', Integer, primary_key=True), + Column('data', MyType(16)) + ) + + The ``get_col_spec()`` method will in most cases receive a keyword + argument ``type_expression`` which refers to the owning expression + of the type as being compiled, such as a :class:`.Column` or + :func:`.cast` construct. This keyword is only sent if the method + accepts keyword arguments (e.g. ``**kw``) in its argument signature; + introspection is used to check for this in order to support legacy + forms of this function. + + .. versionadded:: 1.0.0 the owning expression is passed to + the ``get_col_spec()`` method via the keyword argument + ``type_expression``, if it receives ``**kw`` in its signature. + + """ + __visit_name__ = "user_defined" + + ensure_kwarg = 'get_col_spec' + + class Comparator(TypeEngine.Comparator): + __slots__ = () + + def _adapt_expression(self, op, other_comparator): + if hasattr(self.type, 'adapt_operator'): + util.warn_deprecated( + "UserDefinedType.adapt_operator is deprecated. Create " + "a UserDefinedType.Comparator subclass instead which " + "generates the desired expression constructs, given a " + "particular operator." + ) + return self.type.adapt_operator(op), self.type + else: + return op, self.type + + comparator_factory = Comparator + + def coerce_compared_value(self, op, value): + """Suggest a type for a 'coerced' Python value in an expression. + + Default behavior for :class:`.UserDefinedType` is the + same as that of :class:`.TypeDecorator`; by default it returns + ``self``, assuming the compared value should be coerced into + the same type as this one. See + :meth:`.TypeDecorator.coerce_compared_value` for more detail. + + .. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value` + now returns ``self`` by default, rather than falling onto the + more fundamental behavior of + :meth:`.TypeEngine.coerce_compared_value`. + + """ + + return self + + +class TypeDecorator(SchemaEventTarget, TypeEngine): + """Allows the creation of types which add additional functionality + to an existing type. + + This method is preferred to direct subclassing of SQLAlchemy's + built-in types as it ensures that all required functionality of + the underlying type is kept in place. + + Typical usage:: + + import sqlalchemy.types as types + + class MyType(types.TypeDecorator): + '''Prefixes Unicode values with "PREFIX:" on the way in and + strips it off on the way out. + ''' + + impl = types.Unicode + + def process_bind_param(self, value, dialect): + return "PREFIX:" + value + + def process_result_value(self, value, dialect): + return value[7:] + + def copy(self, **kw): + return MyType(self.impl.length) + + The class-level "impl" attribute is required, and can reference any + TypeEngine class. Alternatively, the load_dialect_impl() method + can be used to provide different type classes based on the dialect + given; in this case, the "impl" variable can reference + ``TypeEngine`` as a placeholder. + + Types that receive a Python type that isn't similar to the ultimate type + used may want to define the :meth:`TypeDecorator.coerce_compared_value` + method. This is used to give the expression system a hint when coercing + Python objects into bind parameters within expressions. Consider this + expression:: + + mytable.c.somecol + datetime.date(2009, 5, 15) + + Above, if "somecol" is an ``Integer`` variant, it makes sense that + we're doing date arithmetic, where above is usually interpreted + by databases as adding a number of days to the given date. + The expression system does the right thing by not attempting to + coerce the "date()" value into an integer-oriented bind parameter. + + However, in the case of ``TypeDecorator``, we are usually changing an + incoming Python type to something new - ``TypeDecorator`` by default will + "coerce" the non-typed side to be the same type as itself. Such as below, + we define an "epoch" type that stores a date value as an integer:: + + class MyEpochType(types.TypeDecorator): + impl = types.Integer + + epoch = datetime.date(1970, 1, 1) + + def process_bind_param(self, value, dialect): + return (value - self.epoch).days + + def process_result_value(self, value, dialect): + return self.epoch + timedelta(days=value) + + Our expression of ``somecol + date`` with the above type will coerce the + "date" on the right side to also be treated as ``MyEpochType``. + + This behavior can be overridden via the + :meth:`~TypeDecorator.coerce_compared_value` method, which returns a type + that should be used for the value of the expression. Below we set it such + that an integer value will be treated as an ``Integer``, and any other + value is assumed to be a date and will be treated as a ``MyEpochType``:: + + def coerce_compared_value(self, op, value): + if isinstance(value, int): + return Integer() + else: + return self + + .. warning:: + + Note that the **behavior of coerce_compared_value is not inherited + by default from that of the base type**. + If the :class:`.TypeDecorator` is augmenting a + type that requires special logic for certain types of operators, + this method **must** be overridden. A key example is when decorating + the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types; + the default rules of :meth:`.TypeEngine.coerce_compared_value` should + be used in order to deal with operators like index operations:: + + class MyJsonType(TypeDecorator): + impl = postgresql.JSON + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + Without the above step, index operations such as ``mycol['foo']`` + will cause the index value ``'foo'`` to be JSON encoded. + + """ + + __visit_name__ = "type_decorator" + + def __init__(self, *args, **kwargs): + """Construct a :class:`.TypeDecorator`. + + Arguments sent here are passed to the constructor + of the class assigned to the ``impl`` class level attribute, + assuming the ``impl`` is a callable, and the resulting + object is assigned to the ``self.impl`` instance attribute + (thus overriding the class attribute of the same name). + + If the class level ``impl`` is not a callable (the unusual case), + it will be assigned to the same instance attribute 'as-is', + ignoring those arguments passed to the constructor. + + Subclasses can override this to customize the generation + of ``self.impl`` entirely. + + """ + + if not hasattr(self.__class__, 'impl'): + raise AssertionError("TypeDecorator implementations " + "require a class-level variable " + "'impl' which refers to the class of " + "type being decorated") + self.impl = to_instance(self.__class__.impl, *args, **kwargs) + + coerce_to_is_types = (util.NoneType, ) + """Specify those Python types which should be coerced at the expression + level to "IS " when compared using ``==`` (and same for + ``IS NOT`` in conjunction with ``!=``. + + For most SQLAlchemy types, this includes ``NoneType``, as well as + ``bool``. + + :class:`.TypeDecorator` modifies this list to only include ``NoneType``, + as typedecorator implementations that deal with boolean types are common. + + Custom :class:`.TypeDecorator` classes can override this attribute to + return an empty tuple, in which case no values will be coerced to + constants. + + .. versionadded:: 0.8.2 + Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier + control of ``__eq__()`` ``__ne__()`` operations. + + """ + + class Comparator(TypeEngine.Comparator): + __slots__ = () + + def operate(self, op, *other, **kwargs): + kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types + return super(TypeDecorator.Comparator, self).operate( + op, *other, **kwargs) + + def reverse_operate(self, op, other, **kwargs): + kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types + return super(TypeDecorator.Comparator, self).reverse_operate( + op, other, **kwargs) + + @property + def comparator_factory(self): + if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__: + return self.impl.comparator_factory + else: + return type("TDComparator", + (TypeDecorator.Comparator, + self.impl.comparator_factory), + {}) + + def _gen_dialect_impl(self, dialect): + """ + #todo + """ + adapted = dialect.type_descriptor(self) + if adapted is not self: + return adapted + + # otherwise adapt the impl type, link + # to a copy of this TypeDecorator and return + # that. + typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect) + tt = self.copy() + if not isinstance(tt, self.__class__): + raise AssertionError('Type object %s does not properly ' + 'implement the copy() method, it must ' + 'return an object of type %s' % + (self, self.__class__)) + tt.impl = typedesc + return tt + + @property + def _type_affinity(self): + """ + #todo + """ + return self.impl._type_affinity + + def _set_parent(self, column): + """Support SchemaEventTarget""" + + super(TypeDecorator, self)._set_parent(column) + + if isinstance(self.impl, SchemaEventTarget): + self.impl._set_parent(column) + + def _set_parent_with_dispatch(self, parent): + """Support SchemaEventTarget""" + + super(TypeDecorator, self)._set_parent_with_dispatch(parent) + + if isinstance(self.impl, SchemaEventTarget): + self.impl._set_parent_with_dispatch(parent) + + def type_engine(self, dialect): + """Return a dialect-specific :class:`.TypeEngine` instance + for this :class:`.TypeDecorator`. + + In most cases this returns a dialect-adapted form of + the :class:`.TypeEngine` type represented by ``self.impl``. + Makes usage of :meth:`dialect_impl` but also traverses + into wrapped :class:`.TypeDecorator` instances. + Behavior can be customized here by overriding + :meth:`load_dialect_impl`. + + """ + adapted = dialect.type_descriptor(self) + if not isinstance(adapted, type(self)): + return adapted + elif isinstance(self.impl, TypeDecorator): + return self.impl.type_engine(dialect) + else: + return self.load_dialect_impl(dialect) + + def load_dialect_impl(self, dialect): + """Return a :class:`.TypeEngine` object corresponding to a dialect. + + This is an end-user override hook that can be used to provide + differing types depending on the given dialect. It is used + by the :class:`.TypeDecorator` implementation of :meth:`type_engine` + to help determine what type should ultimately be returned + for a given :class:`.TypeDecorator`. + + By default returns ``self.impl``. + + """ + return self.impl + + def __getattr__(self, key): + """Proxy all other undefined accessors to the underlying + implementation.""" + return getattr(self.impl, key) + + def process_literal_param(self, value, dialect): + """Receive a literal parameter value to be rendered inline within + a statement. + + This method is used when the compiler renders a + literal value without using binds, typically within DDL + such as in the "server default" of a column or an expression + within a CHECK constraint. + + The returned string will be rendered into the output string. + + .. versionadded:: 0.9.0 + + """ + raise NotImplementedError() + + def process_bind_param(self, value, dialect): + """Receive a bound parameter value to be converted. + + Subclasses override this method to return the + value that should be passed along to the underlying + :class:`.TypeEngine` object, and from there to the + DBAPI ``execute()`` method. + + The operation could be anything desired to perform custom + behavior, such as transforming or serializing data. + This could also be used as a hook for validating logic. + + This operation should be designed with the reverse operation + in mind, which would be the process_result_value method of + this class. + + :param value: Data to operate upon, of any type expected by + this method in the subclass. Can be ``None``. + :param dialect: the :class:`.Dialect` in use. + + """ + + raise NotImplementedError() + + def process_result_value(self, value, dialect): + """Receive a result-row column value to be converted. + + Subclasses should implement this method to operate on data + fetched from the database. + + Subclasses override this method to return the + value that should be passed back to the application, + given a value that is already processed by + the underlying :class:`.TypeEngine` object, originally + from the DBAPI cursor method ``fetchone()`` or similar. + + The operation could be anything desired to perform custom + behavior, such as transforming or serializing data. + This could also be used as a hook for validating logic. + + :param value: Data to operate upon, of any type expected by + this method in the subclass. Can be ``None``. + :param dialect: the :class:`.Dialect` in use. + + This operation should be designed to be reversible by + the "process_bind_param" method of this class. + + """ + + raise NotImplementedError() + + @util.memoized_property + def _has_bind_processor(self): + """memoized boolean, check if process_bind_param is implemented. + + Allows the base process_bind_param to raise + NotImplementedError without needing to test an expensive + exception throw. + + """ + + return self.__class__.process_bind_param.__code__ \ + is not TypeDecorator.process_bind_param.__code__ + + @util.memoized_property + def _has_literal_processor(self): + """memoized boolean, check if process_literal_param is implemented. + + + """ + + return self.__class__.process_literal_param.__code__ \ + is not TypeDecorator.process_literal_param.__code__ + + def literal_processor(self, dialect): + """Provide a literal processing function for the given + :class:`.Dialect`. + + Subclasses here will typically override + :meth:`.TypeDecorator.process_literal_param` instead of this method + directly. + + By default, this method makes use of + :meth:`.TypeDecorator.process_bind_param` if that method is + implemented, where :meth:`.TypeDecorator.process_literal_param` is + not. The rationale here is that :class:`.TypeDecorator` typically + deals with Python conversions of data that are above the layer of + database presentation. With the value converted by + :meth:`.TypeDecorator.process_bind_param`, the underlying type will + then handle whether it needs to be presented to the DBAPI as a bound + parameter or to the database as an inline SQL value. + + .. versionadded:: 0.9.0 + + """ + if self._has_literal_processor: + process_param = self.process_literal_param + elif self._has_bind_processor: + # the bind processor should normally be OK + # for TypeDecorator since it isn't doing DB-level + # handling, the handling here won't be different for bound vs. + # literals. + process_param = self.process_bind_param + else: + process_param = None + + if process_param: + impl_processor = self.impl.literal_processor(dialect) + if impl_processor: + def process(value): + return impl_processor(process_param(value, dialect)) + else: + def process(value): + return process_param(value, dialect) + + return process + else: + return self.impl.literal_processor(dialect) + + def bind_processor(self, dialect): + """Provide a bound value processing function for the + given :class:`.Dialect`. + + This is the method that fulfills the :class:`.TypeEngine` + contract for bound value conversion. :class:`.TypeDecorator` + will wrap a user-defined implementation of + :meth:`process_bind_param` here. + + User-defined code can override this method directly, + though its likely best to use :meth:`process_bind_param` so that + the processing provided by ``self.impl`` is maintained. + + :param dialect: Dialect instance in use. + + This method is the reverse counterpart to the + :meth:`result_processor` method of this class. + + """ + if self._has_bind_processor: + process_param = self.process_bind_param + impl_processor = self.impl.bind_processor(dialect) + if impl_processor: + def process(value): + return impl_processor(process_param(value, dialect)) + + else: + def process(value): + return process_param(value, dialect) + + return process + else: + return self.impl.bind_processor(dialect) + + @util.memoized_property + def _has_result_processor(self): + """memoized boolean, check if process_result_value is implemented. + + Allows the base process_result_value to raise + NotImplementedError without needing to test an expensive + exception throw. + + """ + return self.__class__.process_result_value.__code__ \ + is not TypeDecorator.process_result_value.__code__ + + def result_processor(self, dialect, coltype): + """Provide a result value processing function for the given + :class:`.Dialect`. + + This is the method that fulfills the :class:`.TypeEngine` + contract for result value conversion. :class:`.TypeDecorator` + will wrap a user-defined implementation of + :meth:`process_result_value` here. + + User-defined code can override this method directly, + though its likely best to use :meth:`process_result_value` so that + the processing provided by ``self.impl`` is maintained. + + :param dialect: Dialect instance in use. + :param coltype: A SQLAlchemy data type + + This method is the reverse counterpart to the + :meth:`bind_processor` method of this class. + + """ + if self._has_result_processor: + process_value = self.process_result_value + impl_processor = self.impl.result_processor(dialect, + coltype) + if impl_processor: + def process(value): + return process_value(impl_processor(value), dialect) + + else: + def process(value): + return process_value(value, dialect) + + return process + else: + return self.impl.result_processor(dialect, coltype) + + def coerce_compared_value(self, op, value): + """Suggest a type for a 'coerced' Python value in an expression. + + By default, returns self. This method is called by + the expression system when an object using this type is + on the left or right side of an expression against a plain Python + object which does not yet have a SQLAlchemy type assigned:: + + expr = table.c.somecolumn + 35 + + Where above, if ``somecolumn`` uses this type, this method will + be called with the value ``operator.add`` + and ``35``. The return value is whatever SQLAlchemy type should + be used for ``35`` for this particular operation. + + """ + return self + + def copy(self, **kw): + """Produce a copy of this :class:`.TypeDecorator` instance. + + This is a shallow copy and is provided to fulfill part of + the :class:`.TypeEngine` contract. It usually does not + need to be overridden unless the user-defined :class:`.TypeDecorator` + has local state that should be deep-copied. + + """ + + instance = self.__class__.__new__(self.__class__) + instance.__dict__.update(self.__dict__) + return instance + + def get_dbapi_type(self, dbapi): + """Return the DBAPI type object represented by this + :class:`.TypeDecorator`. + + By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the + underlying "impl". + """ + return self.impl.get_dbapi_type(dbapi) + + def compare_values(self, x, y): + """Given two values, compare them for equality. + + By default this calls upon :meth:`.TypeEngine.compare_values` + of the underlying "impl", which in turn usually + uses the Python equals operator ``==``. + + This function is used by the ORM to compare + an original-loaded value with an intercepted + "changed" value, to determine if a net change + has occurred. + + """ + return self.impl.compare_values(x, y) + + def __repr__(self): + return util.generic_repr(self, to_inspect=self.impl) + + +class Variant(TypeDecorator): + """A wrapping type that selects among a variety of + implementations based on dialect in use. + + The :class:`.Variant` type is typically constructed + using the :meth:`.TypeEngine.with_variant` method. + + .. versionadded:: 0.7.2 + + .. seealso:: :meth:`.TypeEngine.with_variant` for an example of use. + + """ + + def __init__(self, base, mapping): + """Construct a new :class:`.Variant`. + + :param base: the base 'fallback' type + :param mapping: dictionary of string dialect names to + :class:`.TypeEngine` instances. + + """ + self.impl = base + self.mapping = mapping + + def coerce_compared_value(self, operator, value): + result = self.impl.coerce_compared_value(operator, value) + if result is self.impl: + return self + else: + return result + + def load_dialect_impl(self, dialect): + if dialect.name in self.mapping: + return self.mapping[dialect.name] + else: + return self.impl + + def _set_parent(self, column): + """Support SchemaEventTarget""" + + if isinstance(self.impl, SchemaEventTarget): + self.impl._set_parent(column) + for impl in self.mapping.values(): + if isinstance(impl, SchemaEventTarget): + impl._set_parent(column) + + def _set_parent_with_dispatch(self, parent): + """Support SchemaEventTarget""" + + if isinstance(self.impl, SchemaEventTarget): + self.impl._set_parent_with_dispatch(parent) + for impl in self.mapping.values(): + if isinstance(impl, SchemaEventTarget): + impl._set_parent_with_dispatch(parent) + + def with_variant(self, type_, dialect_name): + """Return a new :class:`.Variant` which adds the given + type + dialect name to the mapping, in addition to the + mapping present in this :class:`.Variant`. + + :param type_: a :class:`.TypeEngine` that will be selected + as a variant from the originating type, when a dialect + of the given name is in use. + :param dialect_name: base name of the dialect which uses + this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) + + """ + + if dialect_name in self.mapping: + raise exc.ArgumentError( + "Dialect '%s' is already present in " + "the mapping for this Variant" % dialect_name) + mapping = self.mapping.copy() + mapping[dialect_name] = type_ + return Variant(self.impl, mapping) + + @property + def comparator_factory(self): + """express comparison behavior in terms of the base type""" + return self.impl.comparator_factory + + +def _reconstitute_comparator(expression): + return expression.comparator + + +def to_instance(typeobj, *arg, **kw): + if typeobj is None: + return NULLTYPE + + if util.callable(typeobj): + return typeobj(*arg, **kw) + else: + return typeobj + + +def adapt_type(typeobj, colspecs): + if isinstance(typeobj, type): + typeobj = typeobj() + for t in typeobj.__class__.__mro__[0:-1]: + try: + impltype = colspecs[t] + break + except KeyError: + pass + else: + # couldn't adapt - so just return the type itself + # (it may be a user-defined type) + return typeobj + # if we adapted the given generic type to a database-specific type, + # but it turns out the originally given "generic" type + # is actually a subclass of our resulting type, then we were already + # given a more specific type than that required; so use that. + if (issubclass(typeobj.__class__, impltype)): + return typeobj + return typeobj.adapt(impltype) diff --git a/app/lib/sqlalchemy/sql/util.py b/app/lib/sqlalchemy/sql/util.py new file mode 100644 index 0000000..281d5f6 --- /dev/null +++ b/app/lib/sqlalchemy/sql/util.py @@ -0,0 +1,762 @@ +# sql/util.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""High level utilities which build upon other modules here. + +""" + +from .. import exc, util +from .base import _from_objects, ColumnSet +from . import operators, visitors +from itertools import chain +from collections import deque + +from .elements import BindParameter, ColumnClause, ColumnElement, \ + Null, UnaryExpression, literal_column, Label, _label_reference, \ + _textual_label_reference +from .selectable import ScalarSelect, Join, FromClause, FromGrouping +from .schema import Column + +join_condition = util.langhelpers.public_factory( + Join._join_condition, + ".sql.util.join_condition") + +# names that are still being imported from the outside +from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate +from .elements import _find_columns +from .ddl import sort_tables + + +def find_join_source(clauses, join_to): + """Given a list of FROM clauses and a selectable, + return the first index and element from the list of + clauses which can be joined against the selectable. returns + None, None if no match is found. + + e.g.:: + + clause1 = table1.join(table2) + clause2 = table4.join(table5) + + join_to = table2.join(table3) + + find_join_source([clause1, clause2], join_to) == clause1 + + """ + + selectables = list(_from_objects(join_to)) + for i, f in enumerate(clauses): + for s in selectables: + if f.is_derived_from(s): + return i, f + else: + return None, None + + +def visit_binary_product(fn, expr): + """Produce a traversal of the given expression, delivering + column comparisons to the given function. + + The function is of the form:: + + def my_fn(binary, left, right) + + For each binary expression located which has a + comparison operator, the product of "left" and + "right" will be delivered to that function, + in terms of that binary. + + Hence an expression like:: + + and_( + (a + b) == q + func.sum(e + f), + j == r + ) + + would have the traversal:: + + a q + a e + a f + b q + b e + b f + j r + + That is, every combination of "left" and + "right" that doesn't further contain + a binary comparison is passed as pairs. + + """ + stack = [] + + def visit(element): + if isinstance(element, ScalarSelect): + # we don't want to dig into correlated subqueries, + # those are just column elements by themselves + yield element + elif element.__visit_name__ == 'binary' and \ + operators.is_comparison(element.operator): + stack.insert(0, element) + for l in visit(element.left): + for r in visit(element.right): + fn(stack[0], l, r) + stack.pop(0) + for elem in element.get_children(): + visit(elem) + else: + if isinstance(element, ColumnClause): + yield element + for elem in element.get_children(): + for e in visit(elem): + yield e + list(visit(expr)) + + +def find_tables(clause, check_columns=False, + include_aliases=False, include_joins=False, + include_selects=False, include_crud=False): + """locate Table objects within the given expression.""" + + tables = [] + _visitors = {} + + if include_selects: + _visitors['select'] = _visitors['compound_select'] = tables.append + + if include_joins: + _visitors['join'] = tables.append + + if include_aliases: + _visitors['alias'] = tables.append + + if include_crud: + _visitors['insert'] = _visitors['update'] = \ + _visitors['delete'] = lambda ent: tables.append(ent.table) + + if check_columns: + def visit_column(column): + tables.append(column.table) + _visitors['column'] = visit_column + + _visitors['table'] = tables.append + + visitors.traverse(clause, {'column_collections': False}, _visitors) + return tables + + +def unwrap_order_by(clause): + """Break up an 'order by' expression into individual column-expressions, + without DESC/ASC/NULLS FIRST/NULLS LAST""" + + cols = util.column_set() + result = [] + stack = deque([clause]) + while stack: + t = stack.popleft() + if isinstance(t, ColumnElement) and \ + ( + not isinstance(t, UnaryExpression) or + not operators.is_ordering_modifier(t.modifier) + ): + if isinstance(t, _label_reference): + t = t.element + if isinstance(t, (_textual_label_reference)): + continue + if t not in cols: + cols.add(t) + result.append(t) + else: + for c in t.get_children(): + stack.append(c) + return result + + +def unwrap_label_reference(element): + def replace(elem): + if isinstance(elem, (_label_reference, _textual_label_reference)): + return elem.element + + return visitors.replacement_traverse( + element, {}, replace + ) + + +def expand_column_list_from_order_by(collist, order_by): + """Given the columns clause and ORDER BY of a selectable, + return a list of column expressions that can be added to the collist + corresponding to the ORDER BY, without repeating those already + in the collist. + + """ + cols_already_present = set([ + col.element if col._order_by_label_element is not None + else col for col in collist + ]) + + return [ + col for col in + chain(*[ + unwrap_order_by(o) + for o in order_by + ]) + if col not in cols_already_present + ] + + +def clause_is_present(clause, search): + """Given a target clause and a second to search within, return True + if the target is plainly present in the search without any + subqueries or aliases involved. + + Basically descends through Joins. + + """ + + for elem in surface_selectables(search): + if clause == elem: # use == here so that Annotated's compare + return True + else: + return False + + +def surface_selectables(clause): + stack = [clause] + while stack: + elem = stack.pop() + yield elem + if isinstance(elem, Join): + stack.extend((elem.left, elem.right)) + elif isinstance(elem, FromGrouping): + stack.append(elem.element) + + +def surface_column_elements(clause): + """traverse and yield only outer-exposed column elements, such as would + be addressable in the WHERE clause of a SELECT if this element were + in the columns clause.""" + + stack = deque([clause]) + while stack: + elem = stack.popleft() + yield elem + for sub in elem.get_children(): + if isinstance(sub, FromGrouping): + continue + stack.append(sub) + + +def selectables_overlap(left, right): + """Return True if left/right have some overlapping selectable""" + + return bool( + set(surface_selectables(left)).intersection( + surface_selectables(right) + ) + ) + + +def bind_values(clause): + """Return an ordered list of "bound" values in the given clause. + + E.g.:: + + >>> expr = and_( + ... table.c.foo==5, table.c.foo==7 + ... ) + >>> bind_values(expr) + [5, 7] + """ + + v = [] + + def visit_bindparam(bind): + v.append(bind.effective_value) + + visitors.traverse(clause, {}, {'bindparam': visit_bindparam}) + return v + + +def _quote_ddl_expr(element): + if isinstance(element, util.string_types): + element = element.replace("'", "''") + return "'%s'" % element + else: + return repr(element) + + +class _repr_base(object): + _LIST = 0 + _TUPLE = 1 + _DICT = 2 + + __slots__ = 'max_chars', + + def trunc(self, value): + rep = repr(value) + lenrep = len(rep) + if lenrep > self.max_chars: + segment_length = self.max_chars // 2 + rep = ( + rep[0:segment_length] + + (" ... (%d characters truncated) ... " + % (lenrep - self.max_chars)) + + rep[-segment_length:] + ) + return rep + + +class _repr_row(_repr_base): + """Provide a string view of a row.""" + + __slots__ = 'row', + + def __init__(self, row, max_chars=300): + self.row = row + self.max_chars = max_chars + + def __repr__(self): + trunc = self.trunc + return "(%s%s)" % ( + ", ".join(trunc(value) for value in self.row), + "," if len(self.row) == 1 else "" + ) + + +class _repr_params(_repr_base): + """Provide a string view of bound parameters. + + Truncates display to a given numnber of 'multi' parameter sets, + as well as long values to a given number of characters. + + """ + + __slots__ = 'params', 'batches', + + def __init__(self, params, batches, max_chars=300): + self.params = params + self.batches = batches + self.max_chars = max_chars + + def __repr__(self): + if isinstance(self.params, list): + typ = self._LIST + ismulti = self.params and isinstance( + self.params[0], (list, dict, tuple)) + elif isinstance(self.params, tuple): + typ = self._TUPLE + ismulti = self.params and isinstance( + self.params[0], (list, dict, tuple)) + elif isinstance(self.params, dict): + typ = self._DICT + ismulti = False + else: + return self.trunc(self.params) + + if ismulti and len(self.params) > self.batches: + msg = " ... displaying %i of %i total bound parameter sets ... " + return ' '.join(( + self._repr_multi(self.params[:self.batches - 2], typ)[0:-1], + msg % (self.batches, len(self.params)), + self._repr_multi(self.params[-2:], typ)[1:] + )) + elif ismulti: + return self._repr_multi(self.params, typ) + else: + return self._repr_params(self.params, typ) + + def _repr_multi(self, multi_params, typ): + if multi_params: + if isinstance(multi_params[0], list): + elem_type = self._LIST + elif isinstance(multi_params[0], tuple): + elem_type = self._TUPLE + elif isinstance(multi_params[0], dict): + elem_type = self._DICT + else: + assert False, \ + "Unknown parameter type %s" % (type(multi_params[0])) + + elements = ", ".join( + self._repr_params(params, elem_type) + for params in multi_params) + else: + elements = "" + + if typ == self._LIST: + return "[%s]" % elements + else: + return "(%s)" % elements + + def _repr_params(self, params, typ): + trunc = self.trunc + if typ is self._DICT: + return "{%s}" % ( + ", ".join( + "%r: %s" % (key, trunc(value)) + for key, value in params.items() + ) + ) + elif typ is self._TUPLE: + return "(%s%s)" % ( + ", ".join(trunc(value) for value in params), + "," if len(params) == 1 else "" + + ) + else: + return "[%s]" % ( + ", ".join(trunc(value) for value in params) + ) + + +def adapt_criterion_to_null(crit, nulls): + """given criterion containing bind params, convert selected elements + to IS NULL. + + """ + + def visit_binary(binary): + if isinstance(binary.left, BindParameter) \ + and binary.left._identifying_key in nulls: + # reverse order if the NULL is on the left side + binary.left = binary.right + binary.right = Null() + binary.operator = operators.is_ + binary.negate = operators.isnot + elif isinstance(binary.right, BindParameter) \ + and binary.right._identifying_key in nulls: + binary.right = Null() + binary.operator = operators.is_ + binary.negate = operators.isnot + + return visitors.cloned_traverse(crit, {}, {'binary': visit_binary}) + + +def splice_joins(left, right, stop_on=None): + if left is None: + return right + + stack = [(right, None)] + + adapter = ClauseAdapter(left) + ret = None + while stack: + (right, prevright) = stack.pop() + if isinstance(right, Join) and right is not stop_on: + right = right._clone() + right._reset_exported() + right.onclause = adapter.traverse(right.onclause) + stack.append((right.left, right)) + else: + right = adapter.traverse(right) + if prevright is not None: + prevright.left = right + if ret is None: + ret = right + + return ret + + +def reduce_columns(columns, *clauses, **kw): + r"""given a list of columns, return a 'reduced' set based on natural + equivalents. + + the set is reduced to the smallest list of columns which have no natural + equivalent present in the list. A "natural equivalent" means that two + columns will ultimately represent the same value because they are related + by a foreign key. + + \*clauses is an optional list of join clauses which will be traversed + to further identify columns that are "equivalent". + + \**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys + whose tables are not yet configured, or columns that aren't yet present. + + This function is primarily used to determine the most minimal "primary + key" from a selectable, by reducing the set of primary key columns present + in the selectable to just those that are not repeated. + + """ + ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False) + only_synonyms = kw.pop('only_synonyms', False) + + columns = util.ordered_column_set(columns) + + omit = util.column_set() + for col in columns: + for fk in chain(*[c.foreign_keys for c in col.proxy_set]): + for c in columns: + if c is col: + continue + try: + fk_col = fk.column + except exc.NoReferencedColumnError: + # TODO: add specific coverage here + # to test/sql/test_selectable ReduceTest + if ignore_nonexistent_tables: + continue + else: + raise + except exc.NoReferencedTableError: + # TODO: add specific coverage here + # to test/sql/test_selectable ReduceTest + if ignore_nonexistent_tables: + continue + else: + raise + if fk_col.shares_lineage(c) and \ + (not only_synonyms or + c.name == col.name): + omit.add(col) + break + + if clauses: + def visit_binary(binary): + if binary.operator == operators.eq: + cols = util.column_set( + chain(*[c.proxy_set for c in columns.difference(omit)])) + if binary.left in cols and binary.right in cols: + for c in reversed(columns): + if c.shares_lineage(binary.right) and \ + (not only_synonyms or + c.name == binary.left.name): + omit.add(c) + break + for clause in clauses: + if clause is not None: + visitors.traverse(clause, {}, {'binary': visit_binary}) + + return ColumnSet(columns.difference(omit)) + + +def criterion_as_pairs(expression, consider_as_foreign_keys=None, + consider_as_referenced_keys=None, any_operator=False): + """traverse an expression and locate binary criterion pairs.""" + + if consider_as_foreign_keys and consider_as_referenced_keys: + raise exc.ArgumentError("Can only specify one of " + "'consider_as_foreign_keys' or " + "'consider_as_referenced_keys'") + + def col_is(a, b): + # return a is b + return a.compare(b) + + def visit_binary(binary): + if not any_operator and binary.operator is not operators.eq: + return + if not isinstance(binary.left, ColumnElement) or \ + not isinstance(binary.right, ColumnElement): + return + + if consider_as_foreign_keys: + if binary.left in consider_as_foreign_keys and \ + (col_is(binary.right, binary.left) or + binary.right not in consider_as_foreign_keys): + pairs.append((binary.right, binary.left)) + elif binary.right in consider_as_foreign_keys and \ + (col_is(binary.left, binary.right) or + binary.left not in consider_as_foreign_keys): + pairs.append((binary.left, binary.right)) + elif consider_as_referenced_keys: + if binary.left in consider_as_referenced_keys and \ + (col_is(binary.right, binary.left) or + binary.right not in consider_as_referenced_keys): + pairs.append((binary.left, binary.right)) + elif binary.right in consider_as_referenced_keys and \ + (col_is(binary.left, binary.right) or + binary.left not in consider_as_referenced_keys): + pairs.append((binary.right, binary.left)) + else: + if isinstance(binary.left, Column) and \ + isinstance(binary.right, Column): + if binary.left.references(binary.right): + pairs.append((binary.right, binary.left)) + elif binary.right.references(binary.left): + pairs.append((binary.left, binary.right)) + pairs = [] + visitors.traverse(expression, {}, {'binary': visit_binary}) + return pairs + + +class ClauseAdapter(visitors.ReplacingCloningVisitor): + """Clones and modifies clauses based on column correspondence. + + E.g.:: + + table1 = Table('sometable', metadata, + Column('col1', Integer), + Column('col2', Integer) + ) + table2 = Table('someothertable', metadata, + Column('col1', Integer), + Column('col2', Integer) + ) + + condition = table1.c.col1 == table2.c.col1 + + make an alias of table1:: + + s = table1.alias('foo') + + calling ``ClauseAdapter(s).traverse(condition)`` converts + condition to read:: + + s.c.col1 == table2.c.col1 + + """ + + def __init__(self, selectable, equivalents=None, + include_fn=None, exclude_fn=None, + adapt_on_names=False, anonymize_labels=False): + self.__traverse_options__ = { + 'stop_on': [selectable], + 'anonymize_labels': anonymize_labels} + self.selectable = selectable + self.include_fn = include_fn + self.exclude_fn = exclude_fn + self.equivalents = util.column_dict(equivalents or {}) + self.adapt_on_names = adapt_on_names + + def _corresponding_column(self, col, require_embedded, + _seen=util.EMPTY_SET): + newcol = self.selectable.corresponding_column( + col, + require_embedded=require_embedded) + if newcol is None and col in self.equivalents and col not in _seen: + for equiv in self.equivalents[col]: + newcol = self._corresponding_column( + equiv, require_embedded=require_embedded, + _seen=_seen.union([col])) + if newcol is not None: + return newcol + if self.adapt_on_names and newcol is None: + newcol = self.selectable.c.get(col.name) + return newcol + + def replace(self, col): + if isinstance(col, FromClause) and \ + self.selectable.is_derived_from(col): + return self.selectable + elif not isinstance(col, ColumnElement): + return None + elif self.include_fn and not self.include_fn(col): + return None + elif self.exclude_fn and self.exclude_fn(col): + return None + else: + return self._corresponding_column(col, True) + + +class ColumnAdapter(ClauseAdapter): + """Extends ClauseAdapter with extra utility functions. + + Key aspects of ColumnAdapter include: + + * Expressions that are adapted are stored in a persistent + .columns collection; so that an expression E adapted into + an expression E1, will return the same object E1 when adapted + a second time. This is important in particular for things like + Label objects that are anonymized, so that the ColumnAdapter can + be used to present a consistent "adapted" view of things. + + * Exclusion of items from the persistent collection based on + include/exclude rules, but also independent of hash identity. + This because "annotated" items all have the same hash identity as their + parent. + + * "wrapping" capability is added, so that the replacement of an expression + E can proceed through a series of adapters. This differs from the + visitor's "chaining" feature in that the resulting object is passed + through all replacing functions unconditionally, rather than stopping + at the first one that returns non-None. + + * An adapt_required option, used by eager loading to indicate that + We don't trust a result row column that is not translated. + This is to prevent a column from being interpreted as that + of the child row in a self-referential scenario, see + inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency + + """ + + def __init__(self, selectable, equivalents=None, + chain_to=None, adapt_required=False, + include_fn=None, exclude_fn=None, + adapt_on_names=False, + allow_label_resolve=True, + anonymize_labels=False): + ClauseAdapter.__init__(self, selectable, equivalents, + include_fn=include_fn, exclude_fn=exclude_fn, + adapt_on_names=adapt_on_names, + anonymize_labels=anonymize_labels) + + if chain_to: + self.chain(chain_to) + self.columns = util.populate_column_dict(self._locate_col) + if self.include_fn or self.exclude_fn: + self.columns = self._IncludeExcludeMapping(self, self.columns) + self.adapt_required = adapt_required + self.allow_label_resolve = allow_label_resolve + self._wrap = None + + class _IncludeExcludeMapping(object): + def __init__(self, parent, columns): + self.parent = parent + self.columns = columns + + def __getitem__(self, key): + if ( + self.parent.include_fn and not self.parent.include_fn(key) + ) or ( + self.parent.exclude_fn and self.parent.exclude_fn(key) + ): + if self.parent._wrap: + return self.parent._wrap.columns[key] + else: + return key + return self.columns[key] + + def wrap(self, adapter): + ac = self.__class__.__new__(self.__class__) + ac.__dict__.update(self.__dict__) + ac._wrap = adapter + ac.columns = util.populate_column_dict(ac._locate_col) + if ac.include_fn or ac.exclude_fn: + ac.columns = self._IncludeExcludeMapping(ac, ac.columns) + + return ac + + def traverse(self, obj): + return self.columns[obj] + + adapt_clause = traverse + adapt_list = ClauseAdapter.copy_and_process + + def _locate_col(self, col): + + c = ClauseAdapter.traverse(self, col) + + if self._wrap: + c2 = self._wrap._locate_col(c) + if c2 is not None: + c = c2 + + if self.adapt_required and c is col: + return None + + c._allow_label_resolve = self.allow_label_resolve + + return c + + def __getstate__(self): + d = self.__dict__.copy() + del d['columns'] + return d + + def __setstate__(self, state): + self.__dict__.update(state) + self.columns = util.PopulateDict(self._locate_col) diff --git a/app/lib/sqlalchemy/sql/visitors.py b/app/lib/sqlalchemy/sql/visitors.py new file mode 100644 index 0000000..7f09518 --- /dev/null +++ b/app/lib/sqlalchemy/sql/visitors.py @@ -0,0 +1,328 @@ +# sql/visitors.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Visitor/traversal interface and library functions. + +SQLAlchemy schema and expression constructs rely on a Python-centric +version of the classic "visitor" pattern as the primary way in which +they apply functionality. The most common use of this pattern +is statement compilation, where individual expression classes match +up to rendering methods that produce a string result. Beyond this, +the visitor system is also used to inspect expressions for various +information and patterns, as well as for usage in +some kinds of expression transformation. Other kinds of transformation +use a non-visitor traversal system. + +For many examples of how the visit system is used, see the +sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules. +For an introduction to clause adaption, see +http://techspot.zzzeek.org/2008/01/23/expression-transformations/ + +""" + +from collections import deque +from .. import util +import operator +from .. import exc + +__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor', + 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate', + 'iterate_depthfirst', 'traverse_using', 'traverse', + 'traverse_depthfirst', + 'cloned_traverse', 'replacement_traverse'] + + +class VisitableType(type): + """Metaclass which assigns a `_compiler_dispatch` method to classes + having a `__visit_name__` attribute. + + The _compiler_dispatch attribute becomes an instance method which + looks approximately like the following:: + + def _compiler_dispatch (self, visitor, **kw): + '''Look for an attribute named "visit_" + self.__visit_name__ + on the visitor, and call it with the same kw params.''' + visit_attr = 'visit_%s' % self.__visit_name__ + return getattr(visitor, visit_attr)(self, **kw) + + Classes having no __visit_name__ attribute will remain unaffected. + """ + + def __init__(cls, clsname, bases, clsdict): + if clsname != 'Visitable' and \ + hasattr(cls, '__visit_name__'): + _generate_dispatch(cls) + + super(VisitableType, cls).__init__(clsname, bases, clsdict) + + +def _generate_dispatch(cls): + """Return an optimized visit dispatch function for the cls + for use by the compiler. + """ + if '__visit_name__' in cls.__dict__: + visit_name = cls.__visit_name__ + if isinstance(visit_name, str): + # There is an optimization opportunity here because the + # the string name of the class's __visit_name__ is known at + # this early stage (import time) so it can be pre-constructed. + getter = operator.attrgetter("visit_%s" % visit_name) + + def _compiler_dispatch(self, visitor, **kw): + try: + meth = getter(visitor) + except AttributeError: + raise exc.UnsupportedCompilationError(visitor, cls) + else: + return meth(self, **kw) + else: + # The optimization opportunity is lost for this case because the + # __visit_name__ is not yet a string. As a result, the visit + # string has to be recalculated with each compilation. + def _compiler_dispatch(self, visitor, **kw): + visit_attr = 'visit_%s' % self.__visit_name__ + try: + meth = getattr(visitor, visit_attr) + except AttributeError: + raise exc.UnsupportedCompilationError(visitor, cls) + else: + return meth(self, **kw) + + _compiler_dispatch.__doc__ = \ + """Look for an attribute named "visit_" + self.__visit_name__ + on the visitor, and call it with the same kw params. + """ + cls._compiler_dispatch = _compiler_dispatch + + +class Visitable(util.with_metaclass(VisitableType, object)): + """Base class for visitable objects, applies the + ``VisitableType`` metaclass. + + """ + + +class ClauseVisitor(object): + """Base class for visitor objects which can traverse using + the traverse() function. + + """ + + __traverse_options__ = {} + + def traverse_single(self, obj, **kw): + for v in self._visitor_iterator: + meth = getattr(v, "visit_%s" % obj.__visit_name__, None) + if meth: + return meth(obj, **kw) + + def iterate(self, obj): + """traverse the given expression structure, returning an iterator + of all elements. + + """ + return iterate(obj, self.__traverse_options__) + + def traverse(self, obj): + """traverse and visit the given expression structure.""" + + return traverse(obj, self.__traverse_options__, self._visitor_dict) + + @util.memoized_property + def _visitor_dict(self): + visitors = {} + + for name in dir(self): + if name.startswith('visit_'): + visitors[name[6:]] = getattr(self, name) + return visitors + + @property + def _visitor_iterator(self): + """iterate through this visitor and each 'chained' visitor.""" + + v = self + while v: + yield v + v = getattr(v, '_next', None) + + def chain(self, visitor): + """'chain' an additional ClauseVisitor onto this ClauseVisitor. + + the chained visitor will receive all visit events after this one. + + """ + tail = list(self._visitor_iterator)[-1] + tail._next = visitor + return self + + +class CloningVisitor(ClauseVisitor): + """Base class for visitor objects which can traverse using + the cloned_traverse() function. + + """ + + def copy_and_process(self, list_): + """Apply cloned traversal to the given list of elements, and return + the new list. + + """ + return [self.traverse(x) for x in list_] + + def traverse(self, obj): + """traverse and visit the given expression structure.""" + + return cloned_traverse( + obj, self.__traverse_options__, self._visitor_dict) + + +class ReplacingCloningVisitor(CloningVisitor): + """Base class for visitor objects which can traverse using + the replacement_traverse() function. + + """ + + def replace(self, elem): + """receive pre-copied elements during a cloning traversal. + + If the method returns a new element, the element is used + instead of creating a simple copy of the element. Traversal + will halt on the newly returned element if it is re-encountered. + """ + return None + + def traverse(self, obj): + """traverse and visit the given expression structure.""" + + def replace(elem): + for v in self._visitor_iterator: + e = v.replace(elem) + if e is not None: + return e + return replacement_traverse(obj, self.__traverse_options__, replace) + + +def iterate(obj, opts): + """traverse the given expression structure, returning an iterator. + + traversal is configured to be breadth-first. + + """ + # fasttrack for atomic elements like columns + children = obj.get_children(**opts) + if not children: + return [obj] + + traversal = deque() + stack = deque([obj]) + while stack: + t = stack.popleft() + traversal.append(t) + for c in t.get_children(**opts): + stack.append(c) + return iter(traversal) + + +def iterate_depthfirst(obj, opts): + """traverse the given expression structure, returning an iterator. + + traversal is configured to be depth-first. + + """ + # fasttrack for atomic elements like columns + children = obj.get_children(**opts) + if not children: + return [obj] + + stack = deque([obj]) + traversal = deque() + while stack: + t = stack.pop() + traversal.appendleft(t) + for c in t.get_children(**opts): + stack.append(c) + return iter(traversal) + + +def traverse_using(iterator, obj, visitors): + """visit the given expression structure using the given iterator of + objects. + + """ + for target in iterator: + meth = visitors.get(target.__visit_name__, None) + if meth: + meth(target) + return obj + + +def traverse(obj, opts, visitors): + """traverse and visit the given expression structure using the default + iterator. + + """ + return traverse_using(iterate(obj, opts), obj, visitors) + + +def traverse_depthfirst(obj, opts, visitors): + """traverse and visit the given expression structure using the + depth-first iterator. + + """ + return traverse_using(iterate_depthfirst(obj, opts), obj, visitors) + + +def cloned_traverse(obj, opts, visitors): + """clone the given expression structure, allowing + modifications by visitors.""" + + cloned = {} + stop_on = set(opts.get('stop_on', [])) + + def clone(elem): + if elem in stop_on: + return elem + else: + if id(elem) not in cloned: + cloned[id(elem)] = newelem = elem._clone() + newelem._copy_internals(clone=clone) + meth = visitors.get(newelem.__visit_name__, None) + if meth: + meth(newelem) + return cloned[id(elem)] + + if obj is not None: + obj = clone(obj) + return obj + + +def replacement_traverse(obj, opts, replace): + """clone the given expression structure, allowing element + replacement by a given replacement function.""" + + cloned = {} + stop_on = set([id(x) for x in opts.get('stop_on', [])]) + + def clone(elem, **kw): + if id(elem) in stop_on or \ + 'no_replacement_traverse' in elem._annotations: + return elem + else: + newelem = replace(elem) + if newelem is not None: + stop_on.add(id(newelem)) + return newelem + else: + if elem not in cloned: + cloned[elem] = newelem = elem._clone() + newelem._copy_internals(clone=clone, **kw) + return cloned[elem] + + if obj is not None: + obj = clone(obj, **opts) + return obj diff --git a/app/lib/sqlalchemy/testing/__init__.py b/app/lib/sqlalchemy/testing/__init__.py new file mode 100644 index 0000000..ff00fbf --- /dev/null +++ b/app/lib/sqlalchemy/testing/__init__.py @@ -0,0 +1,36 @@ +# testing/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +from .warnings import assert_warnings + +from . import config + +from .exclusions import db_spec, _is_excluded, fails_if, skip_if, future,\ + fails_on, fails_on_everything_except, skip, only_on, exclude, \ + against as _against, _server_version, only_if, fails + + +def against(*queries): + return _against(config._current, *queries) + +from .assertions import emits_warning, emits_warning_on, uses_deprecated, \ + eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \ + assert_raises_message, AssertsCompiledSQL, ComparesTables, \ + AssertsExecutionResults, expect_deprecated, expect_warnings, \ + in_, not_in_, eq_ignore_whitespace, eq_regex, is_true, is_false + +from .util import run_as_contextmanager, rowset, fail, \ + provide_metadata, adict, force_drop_names, \ + teardown_events + +crashes = skip + +from .config import db +from .config import requirements as requires + +from . import mock diff --git a/app/lib/sqlalchemy/testing/assertions.py b/app/lib/sqlalchemy/testing/assertions.py new file mode 100644 index 0000000..3ee3893 --- /dev/null +++ b/app/lib/sqlalchemy/testing/assertions.py @@ -0,0 +1,520 @@ +# testing/assertions.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from __future__ import absolute_import + +from . import util as testutil +from sqlalchemy import pool, orm, util +from sqlalchemy.engine import default, url +from sqlalchemy.util import decorator, compat +from sqlalchemy import types as sqltypes, schema, exc as sa_exc +import warnings +import re +from .exclusions import db_spec +from . import assertsql +from . import config +from .util import fail +import contextlib +from . import mock + + +def expect_warnings(*messages, **kw): + """Context manager which expects one or more warnings. + + With no arguments, squelches all SAWarnings emitted via + sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise + pass string expressions that will match selected warnings via regex; + all non-matching warnings are sent through. + + The expect version **asserts** that the warnings were in fact seen. + + Note that the test suite sets SAWarning warnings to raise exceptions. + + """ + return _expect_warnings(sa_exc.SAWarning, messages, **kw) + + +@contextlib.contextmanager +def expect_warnings_on(db, *messages, **kw): + """Context manager which expects one or more warnings on specific + dialects. + + The expect version **asserts** that the warnings were in fact seen. + + """ + spec = db_spec(db) + + if isinstance(db, util.string_types) and not spec(config._current): + yield + else: + with expect_warnings(*messages, **kw): + yield + + +def emits_warning(*messages): + """Decorator form of expect_warnings(). + + Note that emits_warning does **not** assert that the warnings + were in fact seen. + + """ + + @decorator + def decorate(fn, *args, **kw): + with expect_warnings(assert_=False, *messages): + return fn(*args, **kw) + + return decorate + + +def expect_deprecated(*messages, **kw): + return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw) + + +def emits_warning_on(db, *messages): + """Mark a test as emitting a warning on a specific dialect. + + With no arguments, squelches all SAWarning failures. Or pass one or more + strings; these will be matched to the root of the warning description by + warnings.filterwarnings(). + + Note that emits_warning_on does **not** assert that the warnings + were in fact seen. + + """ + @decorator + def decorate(fn, *args, **kw): + with expect_warnings_on(db, assert_=False, *messages): + return fn(*args, **kw) + + return decorate + + +def uses_deprecated(*messages): + """Mark a test as immune from fatal deprecation warnings. + + With no arguments, squelches all SADeprecationWarning failures. + Or pass one or more strings; these will be matched to the root + of the warning description by warnings.filterwarnings(). + + As a special case, you may pass a function name prefixed with // + and it will be re-written as needed to match the standard warning + verbiage emitted by the sqlalchemy.util.deprecated decorator. + + Note that uses_deprecated does **not** assert that the warnings + were in fact seen. + + """ + + @decorator + def decorate(fn, *args, **kw): + with expect_deprecated(*messages, assert_=False): + return fn(*args, **kw) + return decorate + + +@contextlib.contextmanager +def _expect_warnings(exc_cls, messages, regex=True, assert_=True, + py2konly=False): + + if regex: + filters = [re.compile(msg, re.I | re.S) for msg in messages] + else: + filters = messages + + seen = set(filters) + + real_warn = warnings.warn + + def our_warn(msg, exception, *arg, **kw): + if not issubclass(exception, exc_cls): + return real_warn(msg, exception, *arg, **kw) + + if not filters: + return + + for filter_ in filters: + if (regex and filter_.match(msg)) or \ + (not regex and filter_ == msg): + seen.discard(filter_) + break + else: + real_warn(msg, exception, *arg, **kw) + + with mock.patch("warnings.warn", our_warn): + yield + + if assert_ and (not py2konly or not compat.py3k): + assert not seen, "Warnings were not seen: %s" % \ + ", ".join("%r" % (s.pattern if regex else s) for s in seen) + + +def global_cleanup_assertions(): + """Check things that have to be finalized at the end of a test suite. + + Hardcoded at the moment, a modular system can be built here + to support things like PG prepared transactions, tables all + dropped, etc. + + """ + _assert_no_stray_pool_connections() + +_STRAY_CONNECTION_FAILURES = 0 + + +def _assert_no_stray_pool_connections(): + global _STRAY_CONNECTION_FAILURES + + # lazy gc on cPython means "do nothing." pool connections + # shouldn't be in cycles, should go away. + testutil.lazy_gc() + + # however, once in awhile, on an EC2 machine usually, + # there's a ref in there. usually just one. + if pool._refs: + + # OK, let's be somewhat forgiving. + _STRAY_CONNECTION_FAILURES += 1 + + print("Encountered a stray connection in test cleanup: %s" + % str(pool._refs)) + # then do a real GC sweep. We shouldn't even be here + # so a single sweep should really be doing it, otherwise + # there's probably a real unreachable cycle somewhere. + testutil.gc_collect() + + # if we've already had two of these occurrences, or + # after a hard gc sweep we still have pool._refs?! + # now we have to raise. + if pool._refs: + err = str(pool._refs) + + # but clean out the pool refs collection directly, + # reset the counter, + # so the error doesn't at least keep happening. + pool._refs.clear() + _STRAY_CONNECTION_FAILURES = 0 + warnings.warn( + "Stray connection refused to leave " + "after gc.collect(): %s" % err) + elif _STRAY_CONNECTION_FAILURES > 10: + assert False, "Encountered more than 10 stray connections" + _STRAY_CONNECTION_FAILURES = 0 + + +def eq_regex(a, b, msg=None): + assert re.match(b, a), msg or "%r !~ %r" % (a, b) + + +def eq_(a, b, msg=None): + """Assert a == b, with repr messaging on failure.""" + assert a == b, msg or "%r != %r" % (a, b) + + +def ne_(a, b, msg=None): + """Assert a != b, with repr messaging on failure.""" + assert a != b, msg or "%r == %r" % (a, b) + + +def le_(a, b, msg=None): + """Assert a <= b, with repr messaging on failure.""" + assert a <= b, msg or "%r != %r" % (a, b) + + +def is_true(a, msg=None): + is_(a, True, msg=msg) + + +def is_false(a, msg=None): + is_(a, False, msg=msg) + + +def is_(a, b, msg=None): + """Assert a is b, with repr messaging on failure.""" + assert a is b, msg or "%r is not %r" % (a, b) + + +def is_not_(a, b, msg=None): + """Assert a is not b, with repr messaging on failure.""" + assert a is not b, msg or "%r is %r" % (a, b) + + +def in_(a, b, msg=None): + """Assert a in b, with repr messaging on failure.""" + assert a in b, msg or "%r not in %r" % (a, b) + + +def not_in_(a, b, msg=None): + """Assert a in not b, with repr messaging on failure.""" + assert a not in b, msg or "%r is in %r" % (a, b) + + +def startswith_(a, fragment, msg=None): + """Assert a.startswith(fragment), with repr messaging on failure.""" + assert a.startswith(fragment), msg or "%r does not start with %r" % ( + a, fragment) + + +def eq_ignore_whitespace(a, b, msg=None): + a = re.sub(r'^\s+?|\n', "", a) + a = re.sub(r' {2,}', " ", a) + b = re.sub(r'^\s+?|\n', "", b) + b = re.sub(r' {2,}', " ", b) + + assert a == b, msg or "%r != %r" % (a, b) + + +def assert_raises(except_cls, callable_, *args, **kw): + try: + callable_(*args, **kw) + success = False + except except_cls: + success = True + + # assert outside the block so it works for AssertionError too ! + assert success, "Callable did not raise an exception" + + +def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): + try: + callable_(*args, **kwargs) + assert False, "Callable did not raise an exception" + except except_cls as e: + assert re.search( + msg, util.text_type(e), re.UNICODE), "%r !~ %s" % (msg, e) + print(util.text_type(e).encode('utf-8')) + + +class AssertsCompiledSQL(object): + def assert_compile(self, clause, result, params=None, + checkparams=None, dialect=None, + checkpositional=None, + check_prefetch=None, + use_default_dialect=False, + allow_dialect_select=False, + literal_binds=False, + schema_translate_map=None): + if use_default_dialect: + dialect = default.DefaultDialect() + elif allow_dialect_select: + dialect = None + else: + if dialect is None: + dialect = getattr(self, '__dialect__', None) + + if dialect is None: + dialect = config.db.dialect + elif dialect == 'default': + dialect = default.DefaultDialect() + elif dialect == 'default_enhanced': + dialect = default.StrCompileDialect() + elif isinstance(dialect, util.string_types): + dialect = url.URL(dialect).get_dialect()() + + kw = {} + compile_kwargs = {} + + if schema_translate_map: + kw['schema_translate_map'] = schema_translate_map + + if params is not None: + kw['column_keys'] = list(params) + + if literal_binds: + compile_kwargs['literal_binds'] = True + + if isinstance(clause, orm.Query): + context = clause._compile_context() + context.statement.use_labels = True + clause = context.statement + + if compile_kwargs: + kw['compile_kwargs'] = compile_kwargs + + c = clause.compile(dialect=dialect, **kw) + + param_str = repr(getattr(c, 'params', {})) + + if util.py3k: + param_str = param_str.encode('utf-8').decode('ascii', 'ignore') + print( + ("\nSQL String:\n" + + util.text_type(c) + + param_str).encode('utf-8')) + else: + print( + "\nSQL String:\n" + + util.text_type(c).encode('utf-8') + + param_str) + + cc = re.sub(r'[\n\t]', '', util.text_type(c)) + + eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect)) + + if checkparams is not None: + eq_(c.construct_params(params), checkparams) + if checkpositional is not None: + p = c.construct_params(params) + eq_(tuple([p[x] for x in c.positiontup]), checkpositional) + if check_prefetch is not None: + eq_(c.prefetch, check_prefetch) + + +class ComparesTables(object): + + def assert_tables_equal(self, table, reflected_table, strict_types=False): + assert len(table.c) == len(reflected_table.c) + for c, reflected_c in zip(table.c, reflected_table.c): + eq_(c.name, reflected_c.name) + assert reflected_c is reflected_table.c[c.name] + eq_(c.primary_key, reflected_c.primary_key) + eq_(c.nullable, reflected_c.nullable) + + if strict_types: + msg = "Type '%s' doesn't correspond to type '%s'" + assert isinstance(reflected_c.type, type(c.type)), \ + msg % (reflected_c.type, c.type) + else: + self.assert_types_base(reflected_c, c) + + if isinstance(c.type, sqltypes.String): + eq_(c.type.length, reflected_c.type.length) + + eq_( + set([f.column.name for f in c.foreign_keys]), + set([f.column.name for f in reflected_c.foreign_keys]) + ) + if c.server_default: + assert isinstance(reflected_c.server_default, + schema.FetchedValue) + + assert len(table.primary_key) == len(reflected_table.primary_key) + for c in table.primary_key: + assert reflected_table.primary_key.columns[c.name] is not None + + def assert_types_base(self, c1, c2): + assert c1.type._compare_type_affinity(c2.type),\ + "On column %r, type '%s' doesn't correspond to type '%s'" % \ + (c1.name, c1.type, c2.type) + + +class AssertsExecutionResults(object): + def assert_result(self, result, class_, *objects): + result = list(result) + print(repr(result)) + self.assert_list(result, class_, objects) + + def assert_list(self, result, class_, list): + self.assert_(len(result) == len(list), + "result list is not the same size as test list, " + + "for class " + class_.__name__) + for i in range(0, len(list)): + self.assert_row(class_, result[i], list[i]) + + def assert_row(self, class_, rowobj, desc): + self.assert_(rowobj.__class__ is class_, + "item class is not " + repr(class_)) + for key, value in desc.items(): + if isinstance(value, tuple): + if isinstance(value[1], list): + self.assert_list(getattr(rowobj, key), value[0], value[1]) + else: + self.assert_row(value[0], getattr(rowobj, key), value[1]) + else: + self.assert_(getattr(rowobj, key) == value, + "attribute %s value %s does not match %s" % ( + key, getattr(rowobj, key), value)) + + def assert_unordered_result(self, result, cls, *expected): + """As assert_result, but the order of objects is not considered. + + The algorithm is very expensive but not a big deal for the small + numbers of rows that the test suite manipulates. + """ + + class immutabledict(dict): + def __hash__(self): + return id(self) + + found = util.IdentitySet(result) + expected = set([immutabledict(e) for e in expected]) + + for wrong in util.itertools_filterfalse(lambda o: + isinstance(o, cls), found): + fail('Unexpected type "%s", expected "%s"' % ( + type(wrong).__name__, cls.__name__)) + + if len(found) != len(expected): + fail('Unexpected object count "%s", expected "%s"' % ( + len(found), len(expected))) + + NOVALUE = object() + + def _compare_item(obj, spec): + for key, value in spec.items(): + if isinstance(value, tuple): + try: + self.assert_unordered_result( + getattr(obj, key), value[0], *value[1]) + except AssertionError: + return False + else: + if getattr(obj, key, NOVALUE) != value: + return False + return True + + for expected_item in expected: + for found_item in found: + if _compare_item(found_item, expected_item): + found.remove(found_item) + break + else: + fail( + "Expected %s instance with attributes %s not found." % ( + cls.__name__, repr(expected_item))) + return True + + def sql_execution_asserter(self, db=None): + if db is None: + from . import db as db + + return assertsql.assert_engine(db) + + def assert_sql_execution(self, db, callable_, *rules): + with self.sql_execution_asserter(db) as asserter: + callable_() + asserter.assert_(*rules) + + def assert_sql(self, db, callable_, rules): + + newrules = [] + for rule in rules: + if isinstance(rule, dict): + newrule = assertsql.AllOf(*[ + assertsql.CompiledSQL(k, v) for k, v in rule.items() + ]) + else: + newrule = assertsql.CompiledSQL(*rule) + newrules.append(newrule) + + self.assert_sql_execution(db, callable_, *newrules) + + def assert_sql_count(self, db, callable_, count): + self.assert_sql_execution( + db, callable_, assertsql.CountStatements(count)) + + @contextlib.contextmanager + def assert_execution(self, *rules): + assertsql.asserter.add_rules(rules) + try: + yield + assertsql.asserter.statement_complete() + finally: + assertsql.asserter.clear_rules() + + def assert_statement_count(self, count): + return self.assert_execution(assertsql.CountStatements(count)) diff --git a/app/lib/sqlalchemy/testing/assertsql.py b/app/lib/sqlalchemy/testing/assertsql.py new file mode 100644 index 0000000..e39b631 --- /dev/null +++ b/app/lib/sqlalchemy/testing/assertsql.py @@ -0,0 +1,377 @@ +# testing/assertsql.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from ..engine.default import DefaultDialect +from .. import util +import re +import collections +import contextlib +from .. import event +from sqlalchemy.schema import _DDLCompiles +from sqlalchemy.engine.util import _distill_params +from sqlalchemy.engine import url + + +class AssertRule(object): + + is_consumed = False + errormessage = None + consume_statement = True + + def process_statement(self, execute_observed): + pass + + def no_more_statements(self): + assert False, 'All statements are complete, but pending '\ + 'assertion rules remain' + + +class SQLMatchRule(AssertRule): + pass + + +class CursorSQL(SQLMatchRule): + consume_statement = False + + def __init__(self, statement, params=None): + self.statement = statement + self.params = params + + def process_statement(self, execute_observed): + stmt = execute_observed.statements[0] + if self.statement != stmt.statement or ( + self.params is not None and self.params != stmt.parameters): + self.errormessage = \ + "Testing for exact SQL %s parameters %s received %s %s" % ( + self.statement, self.params, + stmt.statement, stmt.parameters + ) + else: + execute_observed.statements.pop(0) + self.is_consumed = True + if not execute_observed.statements: + self.consume_statement = True + + +class CompiledSQL(SQLMatchRule): + + def __init__(self, statement, params=None, dialect='default'): + self.statement = statement + self.params = params + self.dialect = dialect + + def _compare_sql(self, execute_observed, received_statement): + stmt = re.sub(r'[\n\t]', '', self.statement) + return received_statement == stmt + + def _compile_dialect(self, execute_observed): + if self.dialect == 'default': + return DefaultDialect() + else: + # ugh + if self.dialect == 'postgresql': + params = {'implicit_returning': True} + else: + params = {} + return url.URL(self.dialect).get_dialect()(**params) + + def _received_statement(self, execute_observed): + """reconstruct the statement and params in terms + of a target dialect, which for CompiledSQL is just DefaultDialect.""" + + context = execute_observed.context + compare_dialect = self._compile_dialect(execute_observed) + if isinstance(context.compiled.statement, _DDLCompiles): + compiled = \ + context.compiled.statement.compile( + dialect=compare_dialect, + schema_translate_map=context. + execution_options.get('schema_translate_map')) + else: + compiled = ( + context.compiled.statement.compile( + dialect=compare_dialect, + column_keys=context.compiled.column_keys, + inline=context.compiled.inline, + schema_translate_map=context. + execution_options.get('schema_translate_map')) + ) + _received_statement = re.sub(r'[\n\t]', '', util.text_type(compiled)) + parameters = execute_observed.parameters + + if not parameters: + _received_parameters = [compiled.construct_params()] + else: + _received_parameters = [ + compiled.construct_params(m) for m in parameters] + + return _received_statement, _received_parameters + + def process_statement(self, execute_observed): + context = execute_observed.context + + _received_statement, _received_parameters = \ + self._received_statement(execute_observed) + params = self._all_params(context) + + equivalent = self._compare_sql(execute_observed, _received_statement) + + if equivalent: + if params is not None: + all_params = list(params) + all_received = list(_received_parameters) + while all_params and all_received: + param = dict(all_params.pop(0)) + + for idx, received in enumerate(list(all_received)): + # do a positive compare only + for param_key in param: + # a key in param did not match current + # 'received' + if param_key not in received or \ + received[param_key] != param[param_key]: + break + else: + # all keys in param matched 'received'; + # onto next param + del all_received[idx] + break + else: + # param did not match any entry + # in all_received + equivalent = False + break + if all_params or all_received: + equivalent = False + + if equivalent: + self.is_consumed = True + self.errormessage = None + else: + self.errormessage = self._failure_message(params) % { + 'received_statement': _received_statement, + 'received_parameters': _received_parameters + } + + def _all_params(self, context): + if self.params: + if util.callable(self.params): + params = self.params(context) + else: + params = self.params + if not isinstance(params, list): + params = [params] + return params + else: + return None + + def _failure_message(self, expected_params): + return ( + 'Testing for compiled statement %r partial params %r, ' + 'received %%(received_statement)r with params ' + '%%(received_parameters)r' % ( + self.statement.replace('%', '%%'), expected_params + ) + ) + + +class RegexSQL(CompiledSQL): + def __init__(self, regex, params=None): + SQLMatchRule.__init__(self) + self.regex = re.compile(regex) + self.orig_regex = regex + self.params = params + self.dialect = 'default' + + def _failure_message(self, expected_params): + return ( + 'Testing for compiled statement ~%r partial params %r, ' + 'received %%(received_statement)r with params ' + '%%(received_parameters)r' % ( + self.orig_regex, expected_params + ) + ) + + def _compare_sql(self, execute_observed, received_statement): + return bool(self.regex.match(received_statement)) + + +class DialectSQL(CompiledSQL): + def _compile_dialect(self, execute_observed): + return execute_observed.context.dialect + + def _compare_no_space(self, real_stmt, received_stmt): + stmt = re.sub(r'[\n\t]', '', real_stmt) + return received_stmt == stmt + + def _received_statement(self, execute_observed): + received_stmt, received_params = super(DialectSQL, self).\ + _received_statement(execute_observed) + + # TODO: why do we need this part? + for real_stmt in execute_observed.statements: + if self._compare_no_space(real_stmt.statement, received_stmt): + break + else: + raise AssertionError( + "Can't locate compiled statement %r in list of " + "statements actually invoked" % received_stmt) + + return received_stmt, execute_observed.context.compiled_parameters + + def _compare_sql(self, execute_observed, received_statement): + stmt = re.sub(r'[\n\t]', '', self.statement) + # convert our comparison statement to have the + # paramstyle of the received + paramstyle = execute_observed.context.dialect.paramstyle + if paramstyle == 'pyformat': + stmt = re.sub( + r':([\w_]+)', r"%(\1)s", stmt) + else: + # positional params + repl = None + if paramstyle == 'qmark': + repl = "?" + elif paramstyle == 'format': + repl = r"%s" + elif paramstyle == 'numeric': + repl = None + stmt = re.sub(r':([\w_]+)', repl, stmt) + + return received_statement == stmt + + +class CountStatements(AssertRule): + + def __init__(self, count): + self.count = count + self._statement_count = 0 + + def process_statement(self, execute_observed): + self._statement_count += 1 + + def no_more_statements(self): + if self.count != self._statement_count: + assert False, 'desired statement count %d does not match %d' \ + % (self.count, self._statement_count) + + +class AllOf(AssertRule): + + def __init__(self, *rules): + self.rules = set(rules) + + def process_statement(self, execute_observed): + for rule in list(self.rules): + rule.errormessage = None + rule.process_statement(execute_observed) + if rule.is_consumed: + self.rules.discard(rule) + if not self.rules: + self.is_consumed = True + break + elif not rule.errormessage: + # rule is not done yet + self.errormessage = None + break + else: + self.errormessage = list(self.rules)[0].errormessage + + +class Or(AllOf): + + def process_statement(self, execute_observed): + for rule in self.rules: + rule.process_statement(execute_observed) + if rule.is_consumed: + self.is_consumed = True + break + else: + self.errormessage = list(self.rules)[0].errormessage + + +class SQLExecuteObserved(object): + def __init__(self, context, clauseelement, multiparams, params): + self.context = context + self.clauseelement = clauseelement + self.parameters = _distill_params(multiparams, params) + self.statements = [] + + +class SQLCursorExecuteObserved( + collections.namedtuple( + "SQLCursorExecuteObserved", + ["statement", "parameters", "context", "executemany"]) +): + pass + + +class SQLAsserter(object): + def __init__(self): + self.accumulated = [] + + def _close(self): + self._final = self.accumulated + del self.accumulated + + def assert_(self, *rules): + rules = list(rules) + observed = list(self._final) + + while observed and rules: + rule = rules[0] + rule.process_statement(observed[0]) + if rule.is_consumed: + rules.pop(0) + elif rule.errormessage: + assert False, rule.errormessage + + if rule.consume_statement: + observed.pop(0) + + if not observed and rules: + rules[0].no_more_statements() + elif not rules and observed: + assert False, "Additional SQL statements remain" + + +@contextlib.contextmanager +def assert_engine(engine): + asserter = SQLAsserter() + + orig = [] + + @event.listens_for(engine, "before_execute") + def connection_execute(conn, clauseelement, multiparams, params): + # grab the original statement + params before any cursor + # execution + orig[:] = clauseelement, multiparams, params + + @event.listens_for(engine, "after_cursor_execute") + def cursor_execute(conn, cursor, statement, parameters, + context, executemany): + if not context: + return + # then grab real cursor statements and associate them all + # around a single context + if asserter.accumulated and \ + asserter.accumulated[-1].context is context: + obs = asserter.accumulated[-1] + else: + obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2]) + asserter.accumulated.append(obs) + obs.statements.append( + SQLCursorExecuteObserved( + statement, parameters, context, executemany) + ) + + try: + yield asserter + finally: + event.remove(engine, "after_cursor_execute", cursor_execute) + event.remove(engine, "before_execute", connection_execute) + asserter._close() diff --git a/app/lib/sqlalchemy/testing/config.py b/app/lib/sqlalchemy/testing/config.py new file mode 100644 index 0000000..64be3ac --- /dev/null +++ b/app/lib/sqlalchemy/testing/config.py @@ -0,0 +1,97 @@ +# testing/config.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import collections + +requirements = None +db = None +db_url = None +db_opts = None +file_config = None +test_schema = None +test_schema_2 = None +_current = None + +try: + from unittest import SkipTest as _skip_test_exception +except ImportError: + _skip_test_exception = None + + +class Config(object): + def __init__(self, db, db_opts, options, file_config): + self.db = db + self.db_opts = db_opts + self.options = options + self.file_config = file_config + self.test_schema = "test_schema" + self.test_schema_2 = "test_schema_2" + + _stack = collections.deque() + _configs = {} + + @classmethod + def register(cls, db, db_opts, options, file_config): + """add a config as one of the global configs. + + If there are no configs set up yet, this config also + gets set as the "_current". + """ + cfg = Config(db, db_opts, options, file_config) + + cls._configs[cfg.db.name] = cfg + cls._configs[(cfg.db.name, cfg.db.dialect)] = cfg + cls._configs[cfg.db] = cfg + return cfg + + @classmethod + def set_as_current(cls, config, namespace): + global db, _current, db_url, test_schema, test_schema_2, db_opts + _current = config + db_url = config.db.url + db_opts = config.db_opts + test_schema = config.test_schema + test_schema_2 = config.test_schema_2 + namespace.db = db = config.db + + @classmethod + def push_engine(cls, db, namespace): + assert _current, "Can't push without a default Config set up" + cls.push( + Config( + db, _current.db_opts, _current.options, _current.file_config), + namespace + ) + + @classmethod + def push(cls, config, namespace): + cls._stack.append(_current) + cls.set_as_current(config, namespace) + + @classmethod + def reset(cls, namespace): + if cls._stack: + cls.set_as_current(cls._stack[0], namespace) + cls._stack.clear() + + @classmethod + def all_configs(cls): + for cfg in set(cls._configs.values()): + yield cfg + + @classmethod + def all_dbs(cls): + for cfg in cls.all_configs(): + yield cfg.db + + def skip_test(self, msg): + skip_test(msg) + + +def skip_test(msg): + raise _skip_test_exception(msg) + diff --git a/app/lib/sqlalchemy/testing/engines.py b/app/lib/sqlalchemy/testing/engines.py new file mode 100644 index 0000000..dacd5a7 --- /dev/null +++ b/app/lib/sqlalchemy/testing/engines.py @@ -0,0 +1,349 @@ +# testing/engines.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from __future__ import absolute_import + +import weakref +from . import config +from .util import decorator +from .. import event, pool +import re +import warnings + + +class ConnectionKiller(object): + + def __init__(self): + self.proxy_refs = weakref.WeakKeyDictionary() + self.testing_engines = weakref.WeakKeyDictionary() + self.conns = set() + + def add_engine(self, engine): + self.testing_engines[engine] = True + + def connect(self, dbapi_conn, con_record): + self.conns.add((dbapi_conn, con_record)) + + def checkout(self, dbapi_con, con_record, con_proxy): + self.proxy_refs[con_proxy] = True + + def invalidate(self, dbapi_con, con_record, exception): + self.conns.discard((dbapi_con, con_record)) + + def _safe(self, fn): + try: + fn() + except Exception as e: + warnings.warn( + "testing_reaper couldn't " + "rollback/close connection: %s" % e) + + def rollback_all(self): + for rec in list(self.proxy_refs): + if rec is not None and rec.is_valid: + self._safe(rec.rollback) + + def close_all(self): + for rec in list(self.proxy_refs): + if rec is not None and rec.is_valid: + self._safe(rec._close) + + def _after_test_ctx(self): + # this can cause a deadlock with pg8000 - pg8000 acquires + # prepared statement lock inside of rollback() - if async gc + # is collecting in finalize_fairy, deadlock. + # not sure if this should be if pypy/jython only. + # note that firebird/fdb definitely needs this though + for conn, rec in list(self.conns): + self._safe(conn.rollback) + + def _stop_test_ctx(self): + if config.options.low_connections: + self._stop_test_ctx_minimal() + else: + self._stop_test_ctx_aggressive() + + def _stop_test_ctx_minimal(self): + self.close_all() + + self.conns = set() + + for rec in list(self.testing_engines): + if rec is not config.db: + rec.dispose() + + def _stop_test_ctx_aggressive(self): + self.close_all() + for conn, rec in list(self.conns): + self._safe(conn.close) + rec.connection = None + + self.conns = set() + for rec in list(self.testing_engines): + rec.dispose() + + def assert_all_closed(self): + for rec in self.proxy_refs: + if rec.is_valid: + assert False + +testing_reaper = ConnectionKiller() + + +def drop_all_tables(metadata, bind): + testing_reaper.close_all() + if hasattr(bind, 'close'): + bind.close() + + if not config.db.dialect.supports_alter: + from . import assertions + with assertions.expect_warnings( + "Can't sort tables", assert_=False): + metadata.drop_all(bind) + else: + metadata.drop_all(bind) + + +@decorator +def assert_conns_closed(fn, *args, **kw): + try: + fn(*args, **kw) + finally: + testing_reaper.assert_all_closed() + + +@decorator +def rollback_open_connections(fn, *args, **kw): + """Decorator that rolls back all open connections after fn execution.""" + + try: + fn(*args, **kw) + finally: + testing_reaper.rollback_all() + + +@decorator +def close_first(fn, *args, **kw): + """Decorator that closes all connections before fn execution.""" + + testing_reaper.close_all() + fn(*args, **kw) + + +@decorator +def close_open_connections(fn, *args, **kw): + """Decorator that closes all connections after fn execution.""" + try: + fn(*args, **kw) + finally: + testing_reaper.close_all() + + +def all_dialects(exclude=None): + import sqlalchemy.databases as d + for name in d.__all__: + # TEMPORARY + if exclude and name in exclude: + continue + mod = getattr(d, name, None) + if not mod: + mod = getattr(__import__( + 'sqlalchemy.databases.%s' % name).databases, name) + yield mod.dialect() + + +class ReconnectFixture(object): + + def __init__(self, dbapi): + self.dbapi = dbapi + self.connections = [] + + def __getattr__(self, key): + return getattr(self.dbapi, key) + + def connect(self, *args, **kwargs): + conn = self.dbapi.connect(*args, **kwargs) + self.connections.append(conn) + return conn + + def _safe(self, fn): + try: + fn() + except Exception as e: + warnings.warn( + "ReconnectFixture couldn't " + "close connection: %s" % e) + + def shutdown(self): + # TODO: this doesn't cover all cases + # as nicely as we'd like, namely MySQLdb. + # would need to implement R. Brewer's + # proxy server idea to get better + # coverage. + for c in list(self.connections): + self._safe(c.close) + self.connections = [] + + +def reconnecting_engine(url=None, options=None): + url = url or config.db.url + dbapi = config.db.dialect.dbapi + if not options: + options = {} + options['module'] = ReconnectFixture(dbapi) + engine = testing_engine(url, options) + _dispose = engine.dispose + + def dispose(): + engine.dialect.dbapi.shutdown() + _dispose() + + engine.test_shutdown = engine.dialect.dbapi.shutdown + engine.dispose = dispose + return engine + + +def testing_engine(url=None, options=None): + """Produce an engine configured by --options with optional overrides.""" + + from sqlalchemy import create_engine + from sqlalchemy.engine.url import make_url + + if not options: + use_reaper = True + else: + use_reaper = options.pop('use_reaper', True) + + url = url or config.db.url + + url = make_url(url) + if options is None: + if config.db is None or url.drivername == config.db.url.drivername: + options = config.db_opts + else: + options = {} + elif config.db is not None and url.drivername == config.db.url.drivername: + default_opt = config.db_opts.copy() + default_opt.update(options) + + engine = create_engine(url, **options) + engine._has_events = True # enable event blocks, helps with profiling + + if isinstance(engine.pool, pool.QueuePool): + engine.pool._timeout = 0 + engine.pool._max_overflow = 0 + if use_reaper: + event.listen(engine.pool, 'connect', testing_reaper.connect) + event.listen(engine.pool, 'checkout', testing_reaper.checkout) + event.listen(engine.pool, 'invalidate', testing_reaper.invalidate) + testing_reaper.add_engine(engine) + + return engine + + +def mock_engine(dialect_name=None): + """Provides a mocking engine based on the current testing.db. + + This is normally used to test DDL generation flow as emitted + by an Engine. + + It should not be used in other cases, as assert_compile() and + assert_sql_execution() are much better choices with fewer + moving parts. + + """ + + from sqlalchemy import create_engine + + if not dialect_name: + dialect_name = config.db.name + + buffer = [] + + def executor(sql, *a, **kw): + buffer.append(sql) + + def assert_sql(stmts): + recv = [re.sub(r'[\n\t]', '', str(s)) for s in buffer] + assert recv == stmts, recv + + def print_sql(): + d = engine.dialect + return "\n".join( + str(s.compile(dialect=d)) + for s in engine.mock + ) + + engine = create_engine(dialect_name + '://', + strategy='mock', executor=executor) + assert not hasattr(engine, 'mock') + engine.mock = buffer + engine.assert_sql = assert_sql + engine.print_sql = print_sql + return engine + + +class DBAPIProxyCursor(object): + """Proxy a DBAPI cursor. + + Tests can provide subclasses of this to intercept + DBAPI-level cursor operations. + + """ + + def __init__(self, engine, conn, *args, **kwargs): + self.engine = engine + self.connection = conn + self.cursor = conn.cursor(*args, **kwargs) + + def execute(self, stmt, parameters=None, **kw): + if parameters: + return self.cursor.execute(stmt, parameters, **kw) + else: + return self.cursor.execute(stmt, **kw) + + def executemany(self, stmt, params, **kw): + return self.cursor.executemany(stmt, params, **kw) + + def __getattr__(self, key): + return getattr(self.cursor, key) + + +class DBAPIProxyConnection(object): + """Proxy a DBAPI connection. + + Tests can provide subclasses of this to intercept + DBAPI-level connection operations. + + """ + + def __init__(self, engine, cursor_cls): + self.conn = self._sqla_unwrap = engine.pool._creator() + self.engine = engine + self.cursor_cls = cursor_cls + + def cursor(self, *args, **kwargs): + return self.cursor_cls(self.engine, self.conn, *args, **kwargs) + + def close(self): + self.conn.close() + + def __getattr__(self, key): + return getattr(self.conn, key) + + +def proxying_engine(conn_cls=DBAPIProxyConnection, + cursor_cls=DBAPIProxyCursor): + """Produce an engine that provides proxy hooks for + common methods. + + """ + def mock_conn(): + return conn_cls(config.db, cursor_cls) + return testing_engine(options={'creator': mock_conn}) + + diff --git a/app/lib/sqlalchemy/testing/entities.py b/app/lib/sqlalchemy/testing/entities.py new file mode 100644 index 0000000..ebd9fb5 --- /dev/null +++ b/app/lib/sqlalchemy/testing/entities.py @@ -0,0 +1,101 @@ +# testing/entities.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import sqlalchemy as sa +from sqlalchemy import exc as sa_exc + +_repr_stack = set() + + +class BasicEntity(object): + + def __init__(self, **kw): + for key, value in kw.items(): + setattr(self, key, value) + + def __repr__(self): + if id(self) in _repr_stack: + return object.__repr__(self) + _repr_stack.add(id(self)) + try: + return "%s(%s)" % ( + (self.__class__.__name__), + ', '.join(["%s=%r" % (key, getattr(self, key)) + for key in sorted(self.__dict__.keys()) + if not key.startswith('_')])) + finally: + _repr_stack.remove(id(self)) + +_recursion_stack = set() + + +class ComparableEntity(BasicEntity): + + def __hash__(self): + return hash(self.__class__) + + def __ne__(self, other): + return not self.__eq__(other) + + def __eq__(self, other): + """'Deep, sparse compare. + + Deeply compare two entities, following the non-None attributes of the + non-persisted object, if possible. + + """ + if other is self: + return True + elif not self.__class__ == other.__class__: + return False + + if id(self) in _recursion_stack: + return True + _recursion_stack.add(id(self)) + + try: + # pick the entity that's not SA persisted as the source + try: + self_key = sa.orm.attributes.instance_state(self).key + except sa.orm.exc.NO_STATE: + self_key = None + + if other is None: + a = self + b = other + elif self_key is not None: + a = other + b = self + else: + a = self + b = other + + for attr in list(a.__dict__): + if attr.startswith('_'): + continue + value = getattr(a, attr) + + try: + # handle lazy loader errors + battr = getattr(b, attr) + except (AttributeError, sa_exc.UnboundExecutionError): + return False + + if hasattr(value, '__iter__'): + if hasattr(value, '__getitem__') and not hasattr( + value, 'keys'): + if list(value) != list(battr): + return False + else: + if set(value) != set(battr): + return False + else: + if value is not None and value != battr: + return False + return True + finally: + _recursion_stack.remove(id(self)) diff --git a/app/lib/sqlalchemy/testing/exclusions.py b/app/lib/sqlalchemy/testing/exclusions.py new file mode 100644 index 0000000..074b985 --- /dev/null +++ b/app/lib/sqlalchemy/testing/exclusions.py @@ -0,0 +1,443 @@ +# testing/exclusions.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +import operator +from ..util import decorator +from . import config +from .. import util +import inspect +import contextlib +from sqlalchemy.util.compat import inspect_getargspec + + +def skip_if(predicate, reason=None): + rule = compound() + pred = _as_predicate(predicate, reason) + rule.skips.add(pred) + return rule + + +def fails_if(predicate, reason=None): + rule = compound() + pred = _as_predicate(predicate, reason) + rule.fails.add(pred) + return rule + + +class compound(object): + def __init__(self): + self.fails = set() + self.skips = set() + self.tags = set() + + def __add__(self, other): + return self.add(other) + + def add(self, *others): + copy = compound() + copy.fails.update(self.fails) + copy.skips.update(self.skips) + copy.tags.update(self.tags) + for other in others: + copy.fails.update(other.fails) + copy.skips.update(other.skips) + copy.tags.update(other.tags) + return copy + + def not_(self): + copy = compound() + copy.fails.update(NotPredicate(fail) for fail in self.fails) + copy.skips.update(NotPredicate(skip) for skip in self.skips) + copy.tags.update(self.tags) + return copy + + @property + def enabled(self): + return self.enabled_for_config(config._current) + + def enabled_for_config(self, config): + for predicate in self.skips.union(self.fails): + if predicate(config): + return False + else: + return True + + def matching_config_reasons(self, config): + return [ + predicate._as_string(config) for predicate + in self.skips.union(self.fails) + if predicate(config) + ] + + def include_test(self, include_tags, exclude_tags): + return bool( + not self.tags.intersection(exclude_tags) and + (not include_tags or self.tags.intersection(include_tags)) + ) + + def _extend(self, other): + self.skips.update(other.skips) + self.fails.update(other.fails) + self.tags.update(other.tags) + + def __call__(self, fn): + if hasattr(fn, '_sa_exclusion_extend'): + fn._sa_exclusion_extend._extend(self) + return fn + + @decorator + def decorate(fn, *args, **kw): + return self._do(config._current, fn, *args, **kw) + decorated = decorate(fn) + decorated._sa_exclusion_extend = self + return decorated + + @contextlib.contextmanager + def fail_if(self): + all_fails = compound() + all_fails.fails.update(self.skips.union(self.fails)) + + try: + yield + except Exception as ex: + all_fails._expect_failure(config._current, ex) + else: + all_fails._expect_success(config._current) + + def _do(self, cfg, fn, *args, **kw): + for skip in self.skips: + if skip(cfg): + msg = "'%s' : %s" % ( + fn.__name__, + skip._as_string(cfg) + ) + config.skip_test(msg) + + try: + return_value = fn(*args, **kw) + except Exception as ex: + self._expect_failure(cfg, ex, name=fn.__name__) + else: + self._expect_success(cfg, name=fn.__name__) + return return_value + + def _expect_failure(self, config, ex, name='block'): + for fail in self.fails: + if fail(config): + print(("%s failed as expected (%s): %s " % ( + name, fail._as_string(config), str(ex)))) + break + else: + util.raise_from_cause(ex) + + def _expect_success(self, config, name='block'): + if not self.fails: + return + for fail in self.fails: + if not fail(config): + break + else: + raise AssertionError( + "Unexpected success for '%s' (%s)" % + ( + name, + " and ".join( + fail._as_string(config) + for fail in self.fails + ) + ) + ) + + +def requires_tag(tagname): + return tags([tagname]) + + +def tags(tagnames): + comp = compound() + comp.tags.update(tagnames) + return comp + + +def only_if(predicate, reason=None): + predicate = _as_predicate(predicate) + return skip_if(NotPredicate(predicate), reason) + + +def succeeds_if(predicate, reason=None): + predicate = _as_predicate(predicate) + return fails_if(NotPredicate(predicate), reason) + + +class Predicate(object): + @classmethod + def as_predicate(cls, predicate, description=None): + if isinstance(predicate, compound): + return cls.as_predicate(predicate.enabled_for_config, description) + elif isinstance(predicate, Predicate): + if description and predicate.description is None: + predicate.description = description + return predicate + elif isinstance(predicate, (list, set)): + return OrPredicate( + [cls.as_predicate(pred) for pred in predicate], + description) + elif isinstance(predicate, tuple): + return SpecPredicate(*predicate) + elif isinstance(predicate, util.string_types): + tokens = predicate.split(" ", 2) + op = spec = None + db = tokens.pop(0) + if tokens: + op = tokens.pop(0) + if tokens: + spec = tuple(int(d) for d in tokens.pop(0).split(".")) + return SpecPredicate(db, op, spec, description=description) + elif util.callable(predicate): + return LambdaPredicate(predicate, description) + else: + assert False, "unknown predicate type: %s" % predicate + + def _format_description(self, config, negate=False): + bool_ = self(config) + if negate: + bool_ = not negate + return self.description % { + "driver": config.db.url.get_driver_name() + if config else "", + "database": config.db.url.get_backend_name() + if config else "", + "doesnt_support": "doesn't support" if bool_ else "does support", + "does_support": "does support" if bool_ else "doesn't support" + } + + def _as_string(self, config=None, negate=False): + raise NotImplementedError() + + +class BooleanPredicate(Predicate): + def __init__(self, value, description=None): + self.value = value + self.description = description or "boolean %s" % value + + def __call__(self, config): + return self.value + + def _as_string(self, config, negate=False): + return self._format_description(config, negate=negate) + + +class SpecPredicate(Predicate): + def __init__(self, db, op=None, spec=None, description=None): + self.db = db + self.op = op + self.spec = spec + self.description = description + + _ops = { + '<': operator.lt, + '>': operator.gt, + '==': operator.eq, + '!=': operator.ne, + '<=': operator.le, + '>=': operator.ge, + 'in': operator.contains, + 'between': lambda val, pair: val >= pair[0] and val <= pair[1], + } + + def __call__(self, config): + engine = config.db + + if "+" in self.db: + dialect, driver = self.db.split('+') + else: + dialect, driver = self.db, None + + if dialect and engine.name != dialect: + return False + if driver is not None and engine.driver != driver: + return False + + if self.op is not None: + assert driver is None, "DBAPI version specs not supported yet" + + version = _server_version(engine) + oper = hasattr(self.op, '__call__') and self.op \ + or self._ops[self.op] + return oper(version, self.spec) + else: + return True + + def _as_string(self, config, negate=False): + if self.description is not None: + return self._format_description(config) + elif self.op is None: + if negate: + return "not %s" % self.db + else: + return "%s" % self.db + else: + if negate: + return "not %s %s %s" % ( + self.db, + self.op, + self.spec + ) + else: + return "%s %s %s" % ( + self.db, + self.op, + self.spec + ) + + +class LambdaPredicate(Predicate): + def __init__(self, lambda_, description=None, args=None, kw=None): + spec = inspect_getargspec(lambda_) + if not spec[0]: + self.lambda_ = lambda db: lambda_() + else: + self.lambda_ = lambda_ + self.args = args or () + self.kw = kw or {} + if description: + self.description = description + elif lambda_.__doc__: + self.description = lambda_.__doc__ + else: + self.description = "custom function" + + def __call__(self, config): + return self.lambda_(config) + + def _as_string(self, config, negate=False): + return self._format_description(config) + + +class NotPredicate(Predicate): + def __init__(self, predicate, description=None): + self.predicate = predicate + self.description = description + + def __call__(self, config): + return not self.predicate(config) + + def _as_string(self, config, negate=False): + if self.description: + return self._format_description(config, not negate) + else: + return self.predicate._as_string(config, not negate) + + +class OrPredicate(Predicate): + def __init__(self, predicates, description=None): + self.predicates = predicates + self.description = description + + def __call__(self, config): + for pred in self.predicates: + if pred(config): + return True + return False + + def _eval_str(self, config, negate=False): + if negate: + conjunction = " and " + else: + conjunction = " or " + return conjunction.join(p._as_string(config, negate=negate) + for p in self.predicates) + + def _negation_str(self, config): + if self.description is not None: + return "Not " + self._format_description(config) + else: + return self._eval_str(config, negate=True) + + def _as_string(self, config, negate=False): + if negate: + return self._negation_str(config) + else: + if self.description is not None: + return self._format_description(config) + else: + return self._eval_str(config) + + +_as_predicate = Predicate.as_predicate + + +def _is_excluded(db, op, spec): + return SpecPredicate(db, op, spec)(config._current) + + +def _server_version(engine): + """Return a server_version_info tuple.""" + + # force metadata to be retrieved + conn = engine.connect() + version = getattr(engine.dialect, 'server_version_info', ()) + conn.close() + return version + + +def db_spec(*dbs): + return OrPredicate( + [Predicate.as_predicate(db) for db in dbs] + ) + + +def open(): + return skip_if(BooleanPredicate(False, "mark as execute")) + + +def closed(): + return skip_if(BooleanPredicate(True, "marked as skip")) + + +def fails(reason=None): + return fails_if(BooleanPredicate(True, reason or "expected to fail")) + + +@decorator +def future(fn, *arg): + return fails_if(LambdaPredicate(fn), "Future feature") + + +def fails_on(db, reason=None): + return fails_if(Predicate.as_predicate(db), reason) + + +def fails_on_everything_except(*dbs): + return succeeds_if( + OrPredicate([ + Predicate.as_predicate(db) for db in dbs + ]) + ) + + +def skip(db, reason=None): + return skip_if(Predicate.as_predicate(db), reason) + + +def only_on(dbs, reason=None): + return only_if( + OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)]) + ) + + +def exclude(db, op, spec, reason=None): + return skip_if(SpecPredicate(db, op, spec), reason) + + +def against(config, *queries): + assert queries, "no queries sent!" + return OrPredicate([ + Predicate.as_predicate(query) + for query in queries + ])(config) diff --git a/app/lib/sqlalchemy/testing/fixtures.py b/app/lib/sqlalchemy/testing/fixtures.py new file mode 100644 index 0000000..8cd6e9f --- /dev/null +++ b/app/lib/sqlalchemy/testing/fixtures.py @@ -0,0 +1,386 @@ +# testing/fixtures.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from . import config +from . import assertions, schema +from .util import adict +from .. import util +from .engines import drop_all_tables +from .entities import BasicEntity, ComparableEntity +import sys +import sqlalchemy as sa +from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta + +# whether or not we use unittest changes things dramatically, +# as far as how py.test collection works. + + +class TestBase(object): + # A sequence of database names to always run, regardless of the + # constraints below. + __whitelist__ = () + + # A sequence of requirement names matching testing.requires decorators + __requires__ = () + + # A sequence of dialect names to exclude from the test class. + __unsupported_on__ = () + + # If present, test class is only runnable for the *single* specified + # dialect. If you need multiple, use __unsupported_on__ and invert. + __only_on__ = None + + # A sequence of no-arg callables. If any are True, the entire testcase is + # skipped. + __skip_if__ = None + + def assert_(self, val, msg=None): + assert val, msg + + # apparently a handful of tests are doing this....OK + def setup(self): + if hasattr(self, "setUp"): + self.setUp() + + def teardown(self): + if hasattr(self, "tearDown"): + self.tearDown() + + +class TablesTest(TestBase): + + # 'once', None + run_setup_bind = 'once' + + # 'once', 'each', None + run_define_tables = 'once' + + # 'once', 'each', None + run_create_tables = 'once' + + # 'once', 'each', None + run_inserts = 'each' + + # 'each', None + run_deletes = 'each' + + # 'once', None + run_dispose_bind = None + + bind = None + metadata = None + tables = None + other = None + + @classmethod + def setup_class(cls): + cls._init_class() + + cls._setup_once_tables() + + cls._setup_once_inserts() + + @classmethod + def _init_class(cls): + if cls.run_define_tables == 'each': + if cls.run_create_tables == 'once': + cls.run_create_tables = 'each' + assert cls.run_inserts in ('each', None) + + cls.other = adict() + cls.tables = adict() + + cls.bind = cls.setup_bind() + cls.metadata = sa.MetaData() + cls.metadata.bind = cls.bind + + @classmethod + def _setup_once_inserts(cls): + if cls.run_inserts == 'once': + cls._load_fixtures() + cls.insert_data() + + @classmethod + def _setup_once_tables(cls): + if cls.run_define_tables == 'once': + cls.define_tables(cls.metadata) + if cls.run_create_tables == 'once': + cls.metadata.create_all(cls.bind) + cls.tables.update(cls.metadata.tables) + + def _setup_each_tables(self): + if self.run_define_tables == 'each': + self.tables.clear() + if self.run_create_tables == 'each': + drop_all_tables(self.metadata, self.bind) + self.metadata.clear() + self.define_tables(self.metadata) + if self.run_create_tables == 'each': + self.metadata.create_all(self.bind) + self.tables.update(self.metadata.tables) + elif self.run_create_tables == 'each': + drop_all_tables(self.metadata, self.bind) + self.metadata.create_all(self.bind) + + def _setup_each_inserts(self): + if self.run_inserts == 'each': + self._load_fixtures() + self.insert_data() + + def _teardown_each_tables(self): + # no need to run deletes if tables are recreated on setup + if self.run_define_tables != 'each' and self.run_deletes == 'each': + with self.bind.connect() as conn: + for table in reversed(self.metadata.sorted_tables): + try: + conn.execute(table.delete()) + except sa.exc.DBAPIError as ex: + util.print_( + ("Error emptying table %s: %r" % (table, ex)), + file=sys.stderr) + + def setup(self): + self._setup_each_tables() + self._setup_each_inserts() + + def teardown(self): + self._teardown_each_tables() + + @classmethod + def _teardown_once_metadata_bind(cls): + if cls.run_create_tables: + drop_all_tables(cls.metadata, cls.bind) + + if cls.run_dispose_bind == 'once': + cls.dispose_bind(cls.bind) + + cls.metadata.bind = None + + if cls.run_setup_bind is not None: + cls.bind = None + + @classmethod + def teardown_class(cls): + cls._teardown_once_metadata_bind() + + @classmethod + def setup_bind(cls): + return config.db + + @classmethod + def dispose_bind(cls, bind): + if hasattr(bind, 'dispose'): + bind.dispose() + elif hasattr(bind, 'close'): + bind.close() + + @classmethod + def define_tables(cls, metadata): + pass + + @classmethod + def fixtures(cls): + return {} + + @classmethod + def insert_data(cls): + pass + + def sql_count_(self, count, fn): + self.assert_sql_count(self.bind, fn, count) + + def sql_eq_(self, callable_, statements): + self.assert_sql(self.bind, callable_, statements) + + @classmethod + def _load_fixtures(cls): + """Insert rows as represented by the fixtures() method.""" + headers, rows = {}, {} + for table, data in cls.fixtures().items(): + if len(data) < 2: + continue + if isinstance(table, util.string_types): + table = cls.tables[table] + headers[table] = data[0] + rows[table] = data[1:] + for table in cls.metadata.sorted_tables: + if table not in headers: + continue + cls.bind.execute( + table.insert(), + [dict(zip(headers[table], column_values)) + for column_values in rows[table]]) + +from sqlalchemy import event + + +class RemovesEvents(object): + @util.memoized_property + def _event_fns(self): + return set() + + def event_listen(self, target, name, fn): + self._event_fns.add((target, name, fn)) + event.listen(target, name, fn) + + def teardown(self): + for key in self._event_fns: + event.remove(*key) + super_ = super(RemovesEvents, self) + if hasattr(super_, "teardown"): + super_.teardown() + + +class _ORMTest(object): + + @classmethod + def teardown_class(cls): + sa.orm.session.Session.close_all() + sa.orm.clear_mappers() + + +class ORMTest(_ORMTest, TestBase): + pass + + +class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults): + # 'once', 'each', None + run_setup_classes = 'once' + + # 'once', 'each', None + run_setup_mappers = 'each' + + classes = None + + @classmethod + def setup_class(cls): + cls._init_class() + + if cls.classes is None: + cls.classes = adict() + + cls._setup_once_tables() + cls._setup_once_classes() + cls._setup_once_mappers() + cls._setup_once_inserts() + + @classmethod + def teardown_class(cls): + cls._teardown_once_class() + cls._teardown_once_metadata_bind() + + def setup(self): + self._setup_each_tables() + self._setup_each_classes() + self._setup_each_mappers() + self._setup_each_inserts() + + def teardown(self): + sa.orm.session.Session.close_all() + self._teardown_each_mappers() + self._teardown_each_classes() + self._teardown_each_tables() + + @classmethod + def _teardown_once_class(cls): + cls.classes.clear() + _ORMTest.teardown_class() + + @classmethod + def _setup_once_classes(cls): + if cls.run_setup_classes == 'once': + cls._with_register_classes(cls.setup_classes) + + @classmethod + def _setup_once_mappers(cls): + if cls.run_setup_mappers == 'once': + cls._with_register_classes(cls.setup_mappers) + + def _setup_each_mappers(self): + if self.run_setup_mappers == 'each': + self._with_register_classes(self.setup_mappers) + + def _setup_each_classes(self): + if self.run_setup_classes == 'each': + self._with_register_classes(self.setup_classes) + + @classmethod + def _with_register_classes(cls, fn): + """Run a setup method, framing the operation with a Base class + that will catch new subclasses to be established within + the "classes" registry. + + """ + cls_registry = cls.classes + + class FindFixture(type): + def __init__(cls, classname, bases, dict_): + cls_registry[classname] = cls + return type.__init__(cls, classname, bases, dict_) + + class _Base(util.with_metaclass(FindFixture, object)): + pass + + class Basic(BasicEntity, _Base): + pass + + class Comparable(ComparableEntity, _Base): + pass + + cls.Basic = Basic + cls.Comparable = Comparable + fn() + + def _teardown_each_mappers(self): + # some tests create mappers in the test bodies + # and will define setup_mappers as None - + # clear mappers in any case + if self.run_setup_mappers != 'once': + sa.orm.clear_mappers() + + def _teardown_each_classes(self): + if self.run_setup_classes != 'once': + self.classes.clear() + + @classmethod + def setup_classes(cls): + pass + + @classmethod + def setup_mappers(cls): + pass + + +class DeclarativeMappedTest(MappedTest): + run_setup_classes = 'once' + run_setup_mappers = 'once' + + @classmethod + def _setup_once_tables(cls): + pass + + @classmethod + def _with_register_classes(cls, fn): + cls_registry = cls.classes + + class FindFixtureDeclarative(DeclarativeMeta): + def __init__(cls, classname, bases, dict_): + cls_registry[classname] = cls + return DeclarativeMeta.__init__( + cls, classname, bases, dict_) + + class DeclarativeBasic(object): + __table_cls__ = schema.Table + + _DeclBase = declarative_base(metadata=cls.metadata, + metaclass=FindFixtureDeclarative, + cls=DeclarativeBasic) + cls.DeclarativeBasic = _DeclBase + fn() + + if cls.metadata.tables and cls.run_create_tables: + cls.metadata.create_all(config.db) diff --git a/app/lib/sqlalchemy/testing/mock.py b/app/lib/sqlalchemy/testing/mock.py new file mode 100644 index 0000000..05291ec --- /dev/null +++ b/app/lib/sqlalchemy/testing/mock.py @@ -0,0 +1,21 @@ +# testing/mock.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Import stub for mock library. +""" +from __future__ import absolute_import +from ..util import py33 + +if py33: + from unittest.mock import MagicMock, Mock, call, patch, ANY +else: + try: + from mock import MagicMock, Mock, call, patch, ANY + except ImportError: + raise ImportError( + "SQLAlchemy's test suite requires the " + "'mock' library as of 0.8.2.") diff --git a/app/lib/sqlalchemy/testing/pickleable.py b/app/lib/sqlalchemy/testing/pickleable.py new file mode 100644 index 0000000..c3ba828 --- /dev/null +++ b/app/lib/sqlalchemy/testing/pickleable.py @@ -0,0 +1,143 @@ +# testing/pickleable.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Classes used in pickling tests, need to be at the module level for +unpickling. +""" + +from . import fixtures + + +class User(fixtures.ComparableEntity): + pass + + +class Order(fixtures.ComparableEntity): + pass + + +class Dingaling(fixtures.ComparableEntity): + pass + + +class EmailUser(User): + pass + + +class Address(fixtures.ComparableEntity): + pass + + +# TODO: these are kind of arbitrary.... +class Child1(fixtures.ComparableEntity): + pass + + +class Child2(fixtures.ComparableEntity): + pass + + +class Parent(fixtures.ComparableEntity): + pass + + +class Screen(object): + + def __init__(self, obj, parent=None): + self.obj = obj + self.parent = parent + + +class Foo(object): + + def __init__(self, moredata): + self.data = 'im data' + self.stuff = 'im stuff' + self.moredata = moredata + + __hash__ = object.__hash__ + + def __eq__(self, other): + return other.data == self.data and \ + other.stuff == self.stuff and \ + other.moredata == self.moredata + + +class Bar(object): + + def __init__(self, x, y): + self.x = x + self.y = y + + __hash__ = object.__hash__ + + def __eq__(self, other): + return other.__class__ is self.__class__ and \ + other.x == self.x and \ + other.y == self.y + + def __str__(self): + return "Bar(%d, %d)" % (self.x, self.y) + + +class OldSchool: + + def __init__(self, x, y): + self.x = x + self.y = y + + def __eq__(self, other): + return other.__class__ is self.__class__ and \ + other.x == self.x and \ + other.y == self.y + + +class OldSchoolWithoutCompare: + + def __init__(self, x, y): + self.x = x + self.y = y + + +class BarWithoutCompare(object): + + def __init__(self, x, y): + self.x = x + self.y = y + + def __str__(self): + return "Bar(%d, %d)" % (self.x, self.y) + + +class NotComparable(object): + + def __init__(self, data): + self.data = data + + def __hash__(self): + return id(self) + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return NotImplemented + + +class BrokenComparable(object): + + def __init__(self, data): + self.data = data + + def __hash__(self): + return id(self) + + def __eq__(self, other): + raise NotImplementedError + + def __ne__(self, other): + raise NotImplementedError diff --git a/src/application/views/__init__.py b/app/lib/sqlalchemy/testing/plugin/__init__.py similarity index 100% rename from src/application/views/__init__.py rename to app/lib/sqlalchemy/testing/plugin/__init__.py diff --git a/app/lib/sqlalchemy/testing/plugin/bootstrap.py b/app/lib/sqlalchemy/testing/plugin/bootstrap.py new file mode 100644 index 0000000..497fcb7 --- /dev/null +++ b/app/lib/sqlalchemy/testing/plugin/bootstrap.py @@ -0,0 +1,44 @@ +""" +Bootstrapper for nose/pytest plugins. + +The entire rationale for this system is to get the modules in plugin/ +imported without importing all of the supporting library, so that we can +set up things for testing before coverage starts. + +The rationale for all of plugin/ being *in* the supporting library in the +first place is so that the testing and plugin suite is available to other +libraries, mainly external SQLAlchemy and Alembic dialects, to make use +of the same test environment and standard suites available to +SQLAlchemy/Alembic themselves without the need to ship/install a separate +package outside of SQLAlchemy. + +NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; +this should be removable when Alembic targets SQLAlchemy 1.0.0. + +""" + +import os +import sys + +bootstrap_file = locals()['bootstrap_file'] +to_bootstrap = locals()['to_bootstrap'] + + +def load_file_as_module(name): + path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) + if sys.version_info >= (3, 3): + from importlib import machinery + mod = machinery.SourceFileLoader(name, path).load_module() + else: + import imp + mod = imp.load_source(name, path) + return mod + +if to_bootstrap == "pytest": + sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") + sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin") +elif to_bootstrap == "nose": + sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") + sys.modules["sqla_noseplugin"] = load_file_as_module("noseplugin") +else: + raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa diff --git a/app/lib/sqlalchemy/testing/plugin/noseplugin.py b/app/lib/sqlalchemy/testing/plugin/noseplugin.py new file mode 100644 index 0000000..2cf95d8 --- /dev/null +++ b/app/lib/sqlalchemy/testing/plugin/noseplugin.py @@ -0,0 +1,107 @@ +# plugin/noseplugin.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Enhance nose with extra options and behaviors for running SQLAlchemy tests. + +Must be run via ./sqla_nose.py so that it is imported in the expected +way (e.g. as a package-less import). + +""" + +try: + # installed by bootstrap.py + import sqla_plugin_base as plugin_base +except ImportError: + # assume we're a package, use traditional import + from . import plugin_base + + +import os +import sys + +from nose.plugins import Plugin +import nose +fixtures = None + +py3k = sys.version_info >= (3, 0) + + +class NoseSQLAlchemy(Plugin): + enabled = True + + name = 'sqla_testing' + score = 100 + + def options(self, parser, env=os.environ): + Plugin.options(self, parser, env) + opt = parser.add_option + + def make_option(name, **kw): + callback_ = kw.pop("callback", None) + if callback_: + def wrap_(option, opt_str, value, parser): + callback_(opt_str, value, parser) + kw["callback"] = wrap_ + opt(name, **kw) + + plugin_base.setup_options(make_option) + plugin_base.read_config() + + def configure(self, options, conf): + super(NoseSQLAlchemy, self).configure(options, conf) + plugin_base.pre_begin(options) + + plugin_base.set_coverage_flag(options.enable_plugin_coverage) + + plugin_base.set_skip_test(nose.SkipTest) + + def begin(self): + global fixtures + from sqlalchemy.testing import fixtures # noqa + + plugin_base.post_begin() + + def describeTest(self, test): + return "" + + def wantFunction(self, fn): + return False + + def wantMethod(self, fn): + if py3k: + if not hasattr(fn.__self__, 'cls'): + return False + cls = fn.__self__.cls + else: + cls = fn.im_class + return plugin_base.want_method(cls, fn) + + def wantClass(self, cls): + return plugin_base.want_class(cls) + + def beforeTest(self, test): + if not hasattr(test.test, 'cls'): + return + plugin_base.before_test( + test, + test.test.cls.__module__, + test.test.cls, test.test.method.__name__) + + def afterTest(self, test): + plugin_base.after_test(test) + + def startContext(self, ctx): + if not isinstance(ctx, type) \ + or not issubclass(ctx, fixtures.TestBase): + return + plugin_base.start_test_class(ctx) + + def stopContext(self, ctx): + if not isinstance(ctx, type) \ + or not issubclass(ctx, fixtures.TestBase): + return + plugin_base.stop_test_class(ctx) diff --git a/app/lib/sqlalchemy/testing/plugin/plugin_base.py b/app/lib/sqlalchemy/testing/plugin/plugin_base.py new file mode 100644 index 0000000..996cf45 --- /dev/null +++ b/app/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -0,0 +1,565 @@ +# plugin/plugin_base.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Testing extensions. + +this module is designed to work as a testing-framework-agnostic library, +so that we can continue to support nose and also begin adding new +functionality via py.test. + +""" + +from __future__ import absolute_import + +import sys +import re + +py3k = sys.version_info >= (3, 0) + +if py3k: + import configparser +else: + import ConfigParser as configparser + +# late imports +fixtures = None +engines = None +exclusions = None +warnings = None +profiling = None +assertions = None +requirements = None +config = None +testing = None +util = None +file_config = None + + +logging = None +include_tags = set() +exclude_tags = set() +options = None + + +def setup_options(make_option): + make_option("--log-info", action="callback", type="string", callback=_log, + help="turn on info logging for (multiple OK)") + make_option("--log-debug", action="callback", + type="string", callback=_log, + help="turn on debug logging for (multiple OK)") + make_option("--db", action="append", type="string", dest="db", + help="Use prefab database uri. Multiple OK, " + "first one is run by default.") + make_option('--dbs', action='callback', callback=_list_dbs, + help="List available prefab dbs") + make_option("--dburi", action="append", type="string", dest="dburi", + help="Database uri. Multiple OK, " + "first one is run by default.") + make_option("--dropfirst", action="store_true", dest="dropfirst", + help="Drop all tables in the target database first") + make_option("--backend-only", action="store_true", dest="backend_only", + help="Run only tests marked with __backend__") + make_option("--low-connections", action="store_true", + dest="low_connections", + help="Use a low number of distinct connections - " + "i.e. for Oracle TNS") + make_option("--write-idents", type="string", dest="write_idents", + help="write out generated follower idents to , " + "when -n is used") + make_option("--reversetop", action="store_true", + dest="reversetop", default=False, + help="Use a random-ordering set implementation in the ORM " + "(helps reveal dependency issues)") + make_option("--requirements", action="callback", type="string", + callback=_requirements_opt, + help="requirements class for testing, overrides setup.cfg") + make_option("--with-cdecimal", action="store_true", + dest="cdecimal", default=False, + help="Monkeypatch the cdecimal library into Python 'decimal' " + "for all tests") + make_option("--include-tag", action="callback", callback=_include_tag, + type="string", + help="Include tests with tag ") + make_option("--exclude-tag", action="callback", callback=_exclude_tag, + type="string", + help="Exclude tests with tag ") + make_option("--write-profiles", action="store_true", + dest="write_profiles", default=False, + help="Write/update failing profiling data.") + make_option("--force-write-profiles", action="store_true", + dest="force_write_profiles", default=False, + help="Unconditionally write/update profiling data.") + + +def configure_follower(follower_ident): + """Configure required state for a follower. + + This invokes in the parent process and typically includes + database creation. + + """ + from sqlalchemy.testing import provision + provision.FOLLOWER_IDENT = follower_ident + + +def memoize_important_follower_config(dict_): + """Store important configuration we will need to send to a follower. + + This invokes in the parent process after normal config is set up. + + This is necessary as py.test seems to not be using forking, so we + start with nothing in memory, *but* it isn't running our argparse + callables, so we have to just copy all of that over. + + """ + dict_['memoized_config'] = { + 'include_tags': include_tags, + 'exclude_tags': exclude_tags + } + + +def restore_important_follower_config(dict_): + """Restore important configuration needed by a follower. + + This invokes in the follower process. + + """ + global include_tags, exclude_tags + include_tags.update(dict_['memoized_config']['include_tags']) + exclude_tags.update(dict_['memoized_config']['exclude_tags']) + + +def read_config(): + global file_config + file_config = configparser.ConfigParser() + file_config.read(['setup.cfg', 'test.cfg']) + + +def pre_begin(opt): + """things to set up early, before coverage might be setup.""" + global options + options = opt + for fn in pre_configure: + fn(options, file_config) + + +def set_coverage_flag(value): + options.has_coverage = value + +_skip_test_exception = None + + +def set_skip_test(exc): + global _skip_test_exception + _skip_test_exception = exc + + +def post_begin(): + """things to set up later, once we know coverage is running.""" + # Lazy setup of other options (post coverage) + for fn in post_configure: + fn(options, file_config) + + # late imports, has to happen after config as well + # as nose plugins like coverage + global util, fixtures, engines, exclusions, \ + assertions, warnings, profiling,\ + config, testing + from sqlalchemy import testing # noqa + from sqlalchemy.testing import fixtures, engines, exclusions # noqa + from sqlalchemy.testing import assertions, warnings, profiling # noqa + from sqlalchemy.testing import config # noqa + from sqlalchemy import util # noqa + warnings.setup_filters() + + + +def _log(opt_str, value, parser): + global logging + if not logging: + import logging + logging.basicConfig() + + if opt_str.endswith('-info'): + logging.getLogger(value).setLevel(logging.INFO) + elif opt_str.endswith('-debug'): + logging.getLogger(value).setLevel(logging.DEBUG) + + +def _list_dbs(*args): + print("Available --db options (use --dburi to override)") + for macro in sorted(file_config.options('db')): + print("%20s\t%s" % (macro, file_config.get('db', macro))) + sys.exit(0) + + +def _requirements_opt(opt_str, value, parser): + _setup_requirements(value) + + +def _exclude_tag(opt_str, value, parser): + exclude_tags.add(value.replace('-', '_')) + + +def _include_tag(opt_str, value, parser): + include_tags.add(value.replace('-', '_')) + +pre_configure = [] +post_configure = [] + + +def pre(fn): + pre_configure.append(fn) + return fn + + +def post(fn): + post_configure.append(fn) + return fn + + +@pre +def _setup_options(opt, file_config): + global options + options = opt + + +@pre +def _monkeypatch_cdecimal(options, file_config): + if options.cdecimal: + import cdecimal + sys.modules['decimal'] = cdecimal + + +@post +def _init_skiptest(options, file_config): + from sqlalchemy.testing import config + + config._skip_test_exception = _skip_test_exception + + +@post +def _engine_uri(options, file_config): + from sqlalchemy.testing import config + from sqlalchemy import testing + from sqlalchemy.testing import provision + + if options.dburi: + db_urls = list(options.dburi) + else: + db_urls = [] + + if options.db: + for db_token in options.db: + for db in re.split(r'[,\s]+', db_token): + if db not in file_config.options('db'): + raise RuntimeError( + "Unknown URI specifier '%s'. " + "Specify --dbs for known uris." + % db) + else: + db_urls.append(file_config.get('db', db)) + + if not db_urls: + db_urls.append(file_config.get('db', 'default')) + + config._current = None + for db_url in db_urls: + cfg = provision.setup_config( + db_url, options, file_config, provision.FOLLOWER_IDENT) + + if not config._current: + cfg.set_as_current(cfg, testing) + + +@post +def _requirements(options, file_config): + + requirement_cls = file_config.get('sqla_testing', "requirement_cls") + _setup_requirements(requirement_cls) + + +def _setup_requirements(argument): + from sqlalchemy.testing import config + from sqlalchemy import testing + + if config.requirements is not None: + return + + modname, clsname = argument.split(":") + + # importlib.import_module() only introduced in 2.7, a little + # late + mod = __import__(modname) + for component in modname.split(".")[1:]: + mod = getattr(mod, component) + req_cls = getattr(mod, clsname) + + config.requirements = testing.requires = req_cls() + + +@post +def _prep_testing_database(options, file_config): + from sqlalchemy.testing import config, util + from sqlalchemy.testing.exclusions import against + from sqlalchemy import schema, inspect + + if options.dropfirst: + for cfg in config.Config.all_configs(): + e = cfg.db + inspector = inspect(e) + try: + view_names = inspector.get_view_names() + except NotImplementedError: + pass + else: + for vname in view_names: + e.execute(schema._DropView( + schema.Table(vname, schema.MetaData()) + )) + + if config.requirements.schemas.enabled_for_config(cfg): + try: + view_names = inspector.get_view_names( + schema="test_schema") + except NotImplementedError: + pass + else: + for vname in view_names: + e.execute(schema._DropView( + schema.Table(vname, schema.MetaData(), + schema="test_schema") + )) + + util.drop_all_tables(e, inspector) + + if config.requirements.schemas.enabled_for_config(cfg): + util.drop_all_tables(e, inspector, schema=cfg.test_schema) + + if against(cfg, "postgresql"): + from sqlalchemy.dialects import postgresql + for enum in inspector.get_enums("*"): + e.execute(postgresql.DropEnumType( + postgresql.ENUM( + name=enum['name'], + schema=enum['schema']))) + + +@post +def _reverse_topological(options, file_config): + if options.reversetop: + from sqlalchemy.orm.util import randomize_unitofwork + randomize_unitofwork() + + +@post +def _post_setup_options(opt, file_config): + from sqlalchemy.testing import config + config.options = options + config.file_config = file_config + + +@post +def _setup_profiling(options, file_config): + from sqlalchemy.testing import profiling + profiling._profile_stats = profiling.ProfileStatsFile( + file_config.get('sqla_testing', 'profile_file')) + + +def want_class(cls): + if not issubclass(cls, fixtures.TestBase): + return False + elif cls.__name__.startswith('_'): + return False + elif config.options.backend_only and not getattr(cls, '__backend__', + False): + return False + else: + return True + + +def want_method(cls, fn): + if not fn.__name__.startswith("test_"): + return False + elif fn.__module__ is None: + return False + elif include_tags: + return ( + hasattr(cls, '__tags__') and + exclusions.tags(cls.__tags__).include_test( + include_tags, exclude_tags) + ) or ( + hasattr(fn, '_sa_exclusion_extend') and + fn._sa_exclusion_extend.include_test( + include_tags, exclude_tags) + ) + elif exclude_tags and hasattr(cls, '__tags__'): + return exclusions.tags(cls.__tags__).include_test( + include_tags, exclude_tags) + elif exclude_tags and hasattr(fn, '_sa_exclusion_extend'): + return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags) + else: + return True + + +def generate_sub_tests(cls, module): + if getattr(cls, '__backend__', False): + for cfg in _possible_configs_for_cls(cls): + name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver) + subcls = type( + name, + (cls, ), + { + "__only_on__": ("%s+%s" % (cfg.db.name, cfg.db.driver)), + } + ) + setattr(module, name, subcls) + yield subcls + else: + yield cls + + +def start_test_class(cls): + _do_skips(cls) + _setup_engine(cls) + + +def stop_test_class(cls): + #from sqlalchemy import inspect + #assert not inspect(testing.db).get_table_names() + engines.testing_reaper._stop_test_ctx() + try: + if not options.low_connections: + assertions.global_cleanup_assertions() + finally: + _restore_engine() + + +def _restore_engine(): + config._current.reset(testing) + + +def final_process_cleanup(): + engines.testing_reaper._stop_test_ctx_aggressive() + assertions.global_cleanup_assertions() + _restore_engine() + + +def _setup_engine(cls): + if getattr(cls, '__engine_options__', None): + eng = engines.testing_engine(options=cls.__engine_options__) + config._current.push_engine(eng, testing) + + +def before_test(test, test_module_name, test_class, test_name): + + # like a nose id, e.g.: + # "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause" + name = test_class.__name__ + + suffix = "_%s_%s" % (config.db.name, config.db.driver) + if name.endswith(suffix): + name = name[0:-(len(suffix))] + + id_ = "%s.%s.%s" % (test_module_name, name, test_name) + + profiling._current_test = id_ + + +def after_test(test): + engines.testing_reaper._after_test_ctx() + + +def _possible_configs_for_cls(cls, reasons=None): + all_configs = set(config.Config.all_configs()) + + if cls.__unsupported_on__: + spec = exclusions.db_spec(*cls.__unsupported_on__) + for config_obj in list(all_configs): + if spec(config_obj): + all_configs.remove(config_obj) + + if getattr(cls, '__only_on__', None): + spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) + for config_obj in list(all_configs): + if not spec(config_obj): + all_configs.remove(config_obj) + + if hasattr(cls, '__requires__'): + requirements = config.requirements + for config_obj in list(all_configs): + for requirement in cls.__requires__: + check = getattr(requirements, requirement) + + skip_reasons = check.matching_config_reasons(config_obj) + if skip_reasons: + all_configs.remove(config_obj) + if reasons is not None: + reasons.extend(skip_reasons) + break + + if hasattr(cls, '__prefer_requires__'): + non_preferred = set() + requirements = config.requirements + for config_obj in list(all_configs): + for requirement in cls.__prefer_requires__: + check = getattr(requirements, requirement) + + if not check.enabled_for_config(config_obj): + non_preferred.add(config_obj) + if all_configs.difference(non_preferred): + all_configs.difference_update(non_preferred) + + return all_configs + + +def _do_skips(cls): + reasons = [] + all_configs = _possible_configs_for_cls(cls, reasons) + + if getattr(cls, '__skip_if__', False): + for c in getattr(cls, '__skip_if__'): + if c(): + config.skip_test("'%s' skipped by %s" % ( + cls.__name__, c.__name__) + ) + + if not all_configs: + if getattr(cls, '__backend__', False): + msg = "'%s' unsupported for implementation '%s'" % ( + cls.__name__, cls.__only_on__) + else: + msg = "'%s' unsupported on any DB implementation %s%s" % ( + cls.__name__, + ", ".join( + "'%s(%s)+%s'" % ( + config_obj.db.name, + ".".join( + str(dig) for dig in + config_obj.db.dialect.server_version_info), + config_obj.db.driver + ) + for config_obj in config.Config.all_configs() + ), + ", ".join(reasons) + ) + config.skip_test(msg) + elif hasattr(cls, '__prefer_backends__'): + non_preferred = set() + spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) + for config_obj in all_configs: + if not spec(config_obj): + non_preferred.add(config_obj) + if all_configs.difference(non_preferred): + all_configs.difference_update(non_preferred) + + if config._current not in all_configs: + _setup_config(all_configs.pop(), cls) + + +def _setup_config(config_obj, ctx): + config._current.push(config_obj, testing) diff --git a/app/lib/sqlalchemy/testing/plugin/pytestplugin.py b/app/lib/sqlalchemy/testing/plugin/pytestplugin.py new file mode 100644 index 0000000..d7da738 --- /dev/null +++ b/app/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -0,0 +1,194 @@ +try: + # installed by bootstrap.py + import sqla_plugin_base as plugin_base +except ImportError: + # assume we're a package, use traditional import + from . import plugin_base + +import pytest +import argparse +import inspect +import collections +import os + +try: + import xdist # noqa + has_xdist = True +except ImportError: + has_xdist = False + + +def pytest_addoption(parser): + group = parser.getgroup("sqlalchemy") + + def make_option(name, **kw): + callback_ = kw.pop("callback", None) + if callback_: + class CallableAction(argparse.Action): + def __call__(self, parser, namespace, + values, option_string=None): + callback_(option_string, values, parser) + kw["action"] = CallableAction + + group.addoption(name, **kw) + + plugin_base.setup_options(make_option) + plugin_base.read_config() + + +def pytest_configure(config): + if hasattr(config, "slaveinput"): + plugin_base.restore_important_follower_config(config.slaveinput) + plugin_base.configure_follower( + config.slaveinput["follower_ident"] + ) + + if config.option.write_idents: + with open(config.option.write_idents, "a") as file_: + file_.write(config.slaveinput["follower_ident"] + "\n") + else: + if config.option.write_idents and \ + os.path.exists(config.option.write_idents): + os.remove(config.option.write_idents) + + plugin_base.pre_begin(config.option) + + plugin_base.set_coverage_flag(bool(getattr(config.option, + "cov_source", False))) + + plugin_base.set_skip_test(pytest.skip.Exception) + + +def pytest_sessionstart(session): + plugin_base.post_begin() + + +def pytest_sessionfinish(session): + plugin_base.final_process_cleanup() + + +if has_xdist: + import uuid + + def pytest_configure_node(node): + # the master for each node fills slaveinput dictionary + # which pytest-xdist will transfer to the subprocess + + plugin_base.memoize_important_follower_config(node.slaveinput) + + node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12] + from sqlalchemy.testing import provision + provision.create_follower_db(node.slaveinput["follower_ident"]) + + def pytest_testnodedown(node, error): + from sqlalchemy.testing import provision + provision.drop_follower_db(node.slaveinput["follower_ident"]) + + +def pytest_collection_modifyitems(session, config, items): + # look for all those classes that specify __backend__ and + # expand them out into per-database test cases. + + # this is much easier to do within pytest_pycollect_makeitem, however + # pytest is iterating through cls.__dict__ as makeitem is + # called which causes a "dictionary changed size" error on py3k. + # I'd submit a pullreq for them to turn it into a list first, but + # it's to suit the rather odd use case here which is that we are adding + # new classes to a module on the fly. + + rebuilt_items = collections.defaultdict(list) + items[:] = [ + item for item in + items if isinstance(item.parent, pytest.Instance) + and not item.parent.parent.name.startswith("_")] + test_classes = set(item.parent for item in items) + for test_class in test_classes: + for sub_cls in plugin_base.generate_sub_tests( + test_class.cls, test_class.parent.module): + if sub_cls is not test_class.cls: + list_ = rebuilt_items[test_class.cls] + + for inst in pytest.Class( + sub_cls.__name__, + parent=test_class.parent.parent).collect(): + list_.extend(inst.collect()) + + newitems = [] + for item in items: + if item.parent.cls in rebuilt_items: + newitems.extend(rebuilt_items[item.parent.cls]) + rebuilt_items[item.parent.cls][:] = [] + else: + newitems.append(item) + + # seems like the functions attached to a test class aren't sorted already? + # is that true and why's that? (when using unittest, they're sorted) + items[:] = sorted(newitems, key=lambda item: ( + item.parent.parent.parent.name, + item.parent.parent.name, + item.name + )) + + +def pytest_pycollect_makeitem(collector, name, obj): + if inspect.isclass(obj) and plugin_base.want_class(obj): + return pytest.Class(name, parent=collector) + elif inspect.isfunction(obj) and \ + isinstance(collector, pytest.Instance) and \ + plugin_base.want_method(collector.cls, obj): + return pytest.Function(name, parent=collector) + else: + return [] + +_current_class = None + + +def pytest_runtest_setup(item): + # here we seem to get called only based on what we collected + # in pytest_collection_modifyitems. So to do class-based stuff + # we have to tear that out. + global _current_class + + if not isinstance(item, pytest.Function): + return + + # ... so we're doing a little dance here to figure it out... + if _current_class is None: + class_setup(item.parent.parent) + _current_class = item.parent.parent + + # this is needed for the class-level, to ensure that the + # teardown runs after the class is completed with its own + # class-level teardown... + def finalize(): + global _current_class + class_teardown(item.parent.parent) + _current_class = None + item.parent.parent.addfinalizer(finalize) + + test_setup(item) + + +def pytest_runtest_teardown(item): + # ...but this works better as the hook here rather than + # using a finalizer, as the finalizer seems to get in the way + # of the test reporting failures correctly (you get a bunch of + # py.test assertion stuff instead) + test_teardown(item) + + +def test_setup(item): + plugin_base.before_test(item, item.parent.module.__name__, + item.parent.cls, item.name) + + +def test_teardown(item): + plugin_base.after_test(item) + + +def class_setup(item): + plugin_base.start_test_class(item.cls) + + +def class_teardown(item): + plugin_base.stop_test_class(item.cls) diff --git a/app/lib/sqlalchemy/testing/profiling.py b/app/lib/sqlalchemy/testing/profiling.py new file mode 100644 index 0000000..62cdaef --- /dev/null +++ b/app/lib/sqlalchemy/testing/profiling.py @@ -0,0 +1,265 @@ +# testing/profiling.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Profiling support for unit and performance tests. + +These are special purpose profiling methods which operate +in a more fine-grained way than nose's profiling plugin. + +""" + +import os +import sys +from .util import gc_collect +from . import config +import pstats +import collections +import contextlib + +try: + import cProfile +except ImportError: + cProfile = None +from ..util import jython, pypy, win32, update_wrapper + +_current_test = None + +# ProfileStatsFile instance, set up in plugin_base +_profile_stats = None + + +class ProfileStatsFile(object): + """"Store per-platform/fn profiling results in a file. + + We're still targeting Py2.5, 2.4 on 0.7 with no dependencies, + so no json lib :( need to roll something silly + + """ + + def __init__(self, filename): + self.force_write = ( + config.options is not None and + config.options.force_write_profiles + ) + self.write = self.force_write or ( + config.options is not None and + config.options.write_profiles + ) + self.fname = os.path.abspath(filename) + self.short_fname = os.path.split(self.fname)[-1] + self.data = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._read() + if self.write: + # rewrite for the case where features changed, + # etc. + self._write() + + @property + def platform_key(self): + + dbapi_key = config.db.name + "_" + config.db.driver + + # keep it at 2.7, 3.1, 3.2, etc. for now. + py_version = '.'.join([str(v) for v in sys.version_info[0:2]]) + + platform_tokens = [py_version] + platform_tokens.append(dbapi_key) + if jython: + platform_tokens.append("jython") + if pypy: + platform_tokens.append("pypy") + if win32: + platform_tokens.append("win") + platform_tokens.append( + "nativeunicode" + if config.db.dialect.convert_unicode + else "dbapiunicode" + ) + _has_cext = config.requirements._has_cextensions() + platform_tokens.append(_has_cext and "cextensions" or "nocextensions") + return "_".join(platform_tokens) + + def has_stats(self): + test_key = _current_test + return ( + test_key in self.data and + self.platform_key in self.data[test_key] + ) + + def result(self, callcount): + test_key = _current_test + per_fn = self.data[test_key] + per_platform = per_fn[self.platform_key] + + if 'counts' not in per_platform: + per_platform['counts'] = counts = [] + else: + counts = per_platform['counts'] + + if 'current_count' not in per_platform: + per_platform['current_count'] = current_count = 0 + else: + current_count = per_platform['current_count'] + + has_count = len(counts) > current_count + + if not has_count: + counts.append(callcount) + if self.write: + self._write() + result = None + else: + result = per_platform['lineno'], counts[current_count] + per_platform['current_count'] += 1 + return result + + def replace(self, callcount): + test_key = _current_test + per_fn = self.data[test_key] + per_platform = per_fn[self.platform_key] + counts = per_platform['counts'] + current_count = per_platform['current_count'] + if current_count < len(counts): + counts[current_count - 1] = callcount + else: + counts[-1] = callcount + if self.write: + self._write() + + def _header(self): + return ( + "# %s\n" + "# This file is written out on a per-environment basis.\n" + "# For each test in aaa_profiling, the corresponding " + "function and \n" + "# environment is located within this file. " + "If it doesn't exist,\n" + "# the test is skipped.\n" + "# If a callcount does exist, it is compared " + "to what we received. \n" + "# assertions are raised if the counts do not match.\n" + "# \n" + "# To add a new callcount test, apply the function_call_count \n" + "# decorator and re-run the tests using the --write-profiles \n" + "# option - this file will be rewritten including the new count.\n" + "# \n" + ) % (self.fname) + + def _read(self): + try: + profile_f = open(self.fname) + except IOError: + return + for lineno, line in enumerate(profile_f): + line = line.strip() + if not line or line.startswith("#"): + continue + + test_key, platform_key, counts = line.split() + per_fn = self.data[test_key] + per_platform = per_fn[platform_key] + c = [int(count) for count in counts.split(",")] + per_platform['counts'] = c + per_platform['lineno'] = lineno + 1 + per_platform['current_count'] = 0 + profile_f.close() + + def _write(self): + print(("Writing profile file %s" % self.fname)) + profile_f = open(self.fname, "w") + profile_f.write(self._header()) + for test_key in sorted(self.data): + + per_fn = self.data[test_key] + profile_f.write("\n# TEST: %s\n\n" % test_key) + for platform_key in sorted(per_fn): + per_platform = per_fn[platform_key] + c = ",".join(str(count) for count in per_platform['counts']) + profile_f.write("%s %s %s\n" % (test_key, platform_key, c)) + profile_f.close() + + +def function_call_count(variance=0.05): + """Assert a target for a test case's function call count. + + The main purpose of this assertion is to detect changes in + callcounts for various functions - the actual number is not as important. + Callcounts are stored in a file keyed to Python version and OS platform + information. This file is generated automatically for new tests, + and versioned so that unexpected changes in callcounts will be detected. + + """ + + def decorate(fn): + def wrap(*args, **kw): + with count_functions(variance=variance): + return fn(*args, **kw) + return update_wrapper(wrap, fn) + return decorate + + +@contextlib.contextmanager +def count_functions(variance=0.05): + if cProfile is None: + raise SkipTest("cProfile is not installed") + + if not _profile_stats.has_stats() and not _profile_stats.write: + config.skip_test( + "No profiling stats available on this " + "platform for this function. Run tests with " + "--write-profiles to add statistics to %s for " + "this platform." % _profile_stats.short_fname) + + gc_collect() + + pr = cProfile.Profile() + pr.enable() + #began = time.time() + yield + #ended = time.time() + pr.disable() + + #s = compat.StringIO() + stats = pstats.Stats(pr, stream=sys.stdout) + + #timespent = ended - began + callcount = stats.total_calls + + expected = _profile_stats.result(callcount) + + if expected is None: + expected_count = None + else: + line_no, expected_count = expected + + print(("Pstats calls: %d Expected %s" % ( + callcount, + expected_count + ) + )) + stats.sort_stats("cumulative") + stats.print_stats() + + if expected_count: + deviance = int(callcount * variance) + failed = abs(callcount - expected_count) > deviance + + if failed or _profile_stats.force_write: + if _profile_stats.write: + _profile_stats.replace(callcount) + else: + raise AssertionError( + "Adjusted function call count %s not within %s%% " + "of expected %s, platform %s. Rerun with " + "--write-profiles to " + "regenerate this callcount." + % ( + callcount, (variance * 100), + expected_count, _profile_stats.platform_key)) + + diff --git a/app/lib/sqlalchemy/testing/provision.py b/app/lib/sqlalchemy/testing/provision.py new file mode 100644 index 0000000..7e44544 --- /dev/null +++ b/app/lib/sqlalchemy/testing/provision.py @@ -0,0 +1,318 @@ +from sqlalchemy.engine import url as sa_url +from sqlalchemy import text +from sqlalchemy import exc +from sqlalchemy.util import compat +from . import config, engines +import os +import time +import logging +log = logging.getLogger(__name__) + +FOLLOWER_IDENT = None + + +class register(object): + def __init__(self): + self.fns = {} + + @classmethod + def init(cls, fn): + return register().for_db("*")(fn) + + def for_db(self, dbname): + def decorate(fn): + self.fns[dbname] = fn + return self + return decorate + + def __call__(self, cfg, *arg): + if isinstance(cfg, compat.string_types): + url = sa_url.make_url(cfg) + elif isinstance(cfg, sa_url.URL): + url = cfg + else: + url = cfg.db.url + backend = url.get_backend_name() + if backend in self.fns: + return self.fns[backend](cfg, *arg) + else: + return self.fns['*'](cfg, *arg) + + +def create_follower_db(follower_ident): + for cfg in _configs_for_db_operation(): + _create_db(cfg, cfg.db, follower_ident) + + +def configure_follower(follower_ident): + for cfg in config.Config.all_configs(): + _configure_follower(cfg, follower_ident) + + +def setup_config(db_url, options, file_config, follower_ident): + if follower_ident: + db_url = _follower_url_from_main(db_url, follower_ident) + db_opts = {} + _update_db_opts(db_url, db_opts) + eng = engines.testing_engine(db_url, db_opts) + _post_configure_engine(db_url, eng, follower_ident) + eng.connect().close() + cfg = config.Config.register(eng, db_opts, options, file_config) + if follower_ident: + _configure_follower(cfg, follower_ident) + return cfg + + +def drop_follower_db(follower_ident): + for cfg in _configs_for_db_operation(): + _drop_db(cfg, cfg.db, follower_ident) + + +def _configs_for_db_operation(): + hosts = set() + + for cfg in config.Config.all_configs(): + cfg.db.dispose() + + for cfg in config.Config.all_configs(): + url = cfg.db.url + backend = url.get_backend_name() + host_conf = ( + backend, + url.username, url.host, url.database) + + if host_conf not in hosts: + yield cfg + hosts.add(host_conf) + + for cfg in config.Config.all_configs(): + cfg.db.dispose() + + +@register.init +def _create_db(cfg, eng, ident): + raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) + + +@register.init +def _drop_db(cfg, eng, ident): + raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) + + +@register.init +def _update_db_opts(db_url, db_opts): + pass + + +@register.init +def _configure_follower(cfg, ident): + pass + + +@register.init +def _post_configure_engine(url, engine, follower_ident): + pass + + +@register.init +def _follower_url_from_main(url, ident): + url = sa_url.make_url(url) + url.database = ident + return url + + +@_update_db_opts.for_db("mssql") +def _mssql_update_db_opts(db_url, db_opts): + db_opts['legacy_schema_aliasing'] = False + + + +@_follower_url_from_main.for_db("sqlite") +def _sqlite_follower_url_from_main(url, ident): + url = sa_url.make_url(url) + if not url.database or url.database == ':memory:': + return url + else: + return sa_url.make_url("sqlite:///%s.db" % ident) + + +@_post_configure_engine.for_db("sqlite") +def _sqlite_post_configure_engine(url, engine, follower_ident): + from sqlalchemy import event + + @event.listens_for(engine, "connect") + def connect(dbapi_connection, connection_record): + # use file DBs in all cases, memory acts kind of strangely + # as an attached + if not follower_ident: + dbapi_connection.execute( + 'ATTACH DATABASE "test_schema.db" AS test_schema') + else: + dbapi_connection.execute( + 'ATTACH DATABASE "%s_test_schema.db" AS test_schema' + % follower_ident) + + +@_create_db.for_db("postgresql") +def _pg_create_db(cfg, eng, ident): + with eng.connect().execution_options( + isolation_level="AUTOCOMMIT") as conn: + try: + _pg_drop_db(cfg, conn, ident) + except Exception: + pass + currentdb = conn.scalar("select current_database()") + for attempt in range(3): + try: + conn.execute( + "CREATE DATABASE %s TEMPLATE %s" % (ident, currentdb)) + except exc.OperationalError as err: + if attempt != 2 and "accessed by other users" in str(err): + time.sleep(.2) + continue + else: + raise + else: + break + + +@_create_db.for_db("mysql") +def _mysql_create_db(cfg, eng, ident): + with eng.connect() as conn: + try: + _mysql_drop_db(cfg, conn, ident) + except Exception: + pass + conn.execute("CREATE DATABASE %s" % ident) + conn.execute("CREATE DATABASE %s_test_schema" % ident) + conn.execute("CREATE DATABASE %s_test_schema_2" % ident) + + +@_configure_follower.for_db("mysql") +def _mysql_configure_follower(config, ident): + config.test_schema = "%s_test_schema" % ident + config.test_schema_2 = "%s_test_schema_2" % ident + + +@_create_db.for_db("sqlite") +def _sqlite_create_db(cfg, eng, ident): + pass + + +@_drop_db.for_db("postgresql") +def _pg_drop_db(cfg, eng, ident): + with eng.connect().execution_options( + isolation_level="AUTOCOMMIT") as conn: + conn.execute( + text( + "select pg_terminate_backend(pid) from pg_stat_activity " + "where usename=current_user and pid != pg_backend_pid() " + "and datname=:dname" + ), dname=ident) + conn.execute("DROP DATABASE %s" % ident) + + +@_drop_db.for_db("sqlite") +def _sqlite_drop_db(cfg, eng, ident): + if ident: + os.remove("%s_test_schema.db" % ident) + else: + os.remove("%s.db" % ident) + + +@_drop_db.for_db("mysql") +def _mysql_drop_db(cfg, eng, ident): + with eng.connect() as conn: + conn.execute("DROP DATABASE %s_test_schema" % ident) + conn.execute("DROP DATABASE %s_test_schema_2" % ident) + conn.execute("DROP DATABASE %s" % ident) + + +@_create_db.for_db("oracle") +def _oracle_create_db(cfg, eng, ident): + # NOTE: make sure you've run "ALTER DATABASE default tablespace users" or + # similar, so that the default tablespace is not "system"; reflection will + # fail otherwise + with eng.connect() as conn: + conn.execute("create user %s identified by xe" % ident) + conn.execute("create user %s_ts1 identified by xe" % ident) + conn.execute("create user %s_ts2 identified by xe" % ident) + conn.execute("grant dba to %s" % (ident, )) + conn.execute("grant unlimited tablespace to %s" % ident) + conn.execute("grant unlimited tablespace to %s_ts1" % ident) + conn.execute("grant unlimited tablespace to %s_ts2" % ident) + +@_configure_follower.for_db("oracle") +def _oracle_configure_follower(config, ident): + config.test_schema = "%s_ts1" % ident + config.test_schema_2 = "%s_ts2" % ident + + +def _ora_drop_ignore(conn, dbname): + try: + conn.execute("drop user %s cascade" % dbname) + log.info("Reaped db: %s", dbname) + return True + except exc.DatabaseError as err: + log.warning("couldn't drop db: %s", err) + return False + + +@_drop_db.for_db("oracle") +def _oracle_drop_db(cfg, eng, ident): + with eng.connect() as conn: + # cx_Oracle seems to occasionally leak open connections when a large + # suite it run, even if we confirm we have zero references to + # connection objects. + # while there is a "kill session" command in Oracle, + # it unfortunately does not release the connection sufficiently. + _ora_drop_ignore(conn, ident) + _ora_drop_ignore(conn, "%s_ts1" % ident) + _ora_drop_ignore(conn, "%s_ts2" % ident) + + +@_update_db_opts.for_db("oracle") +def _oracle_update_db_opts(db_url, db_opts): + db_opts['_retry_on_12516'] = True + + +def reap_oracle_dbs(eng, idents_file): + log.info("Reaping Oracle dbs...") + with eng.connect() as conn: + with open(idents_file) as file_: + idents = set(line.strip() for line in file_) + + log.info("identifiers in file: %s", ", ".join(idents)) + + to_reap = conn.execute( + "select u.username from all_users u where username " + "like 'TEST_%' and not exists (select username " + "from v$session where username=u.username)") + all_names = set([username.lower() for (username, ) in to_reap]) + to_drop = set() + for name in all_names: + if name.endswith("_ts1") or name.endswith("_ts2"): + continue + elif name in idents: + to_drop.add(name) + if "%s_ts1" % name in all_names: + to_drop.add("%s_ts1" % name) + if "%s_ts2" % name in all_names: + to_drop.add("%s_ts2" % name) + + dropped = total = 0 + for total, username in enumerate(to_drop, 1): + if _ora_drop_ignore(conn, username): + dropped += 1 + log.info( + "Dropped %d out of %d stale databases detected", dropped, total) + + +@_follower_url_from_main.for_db("oracle") +def _oracle_follower_url_from_main(url, ident): + url = sa_url.make_url(url) + url.username = ident + url.password = 'xe' + return url + + diff --git a/app/lib/sqlalchemy/testing/replay_fixture.py b/app/lib/sqlalchemy/testing/replay_fixture.py new file mode 100644 index 0000000..b50f52e --- /dev/null +++ b/app/lib/sqlalchemy/testing/replay_fixture.py @@ -0,0 +1,172 @@ +from . import fixtures +from . import profiling +from .. import util +import types +from collections import deque +import contextlib +from . import config +from sqlalchemy import MetaData +from sqlalchemy import create_engine +from sqlalchemy.orm import Session + + +class ReplayFixtureTest(fixtures.TestBase): + + @contextlib.contextmanager + def _dummy_ctx(self, *arg, **kw): + yield + + def test_invocation(self): + + dbapi_session = ReplayableSession() + creator = config.db.pool._creator + recorder = lambda: dbapi_session.recorder(creator()) + engine = create_engine( + config.db.url, creator=recorder, + use_native_hstore=False) + self.metadata = MetaData(engine) + self.engine = engine + self.session = Session(engine) + + self.setup_engine() + try: + self._run_steps(ctx=self._dummy_ctx) + finally: + self.teardown_engine() + engine.dispose() + + player = lambda: dbapi_session.player() + engine = create_engine( + config.db.url, creator=player, + use_native_hstore=False) + + self.metadata = MetaData(engine) + self.engine = engine + self.session = Session(engine) + + self.setup_engine() + try: + self._run_steps(ctx=profiling.count_functions) + finally: + self.session.close() + engine.dispose() + + def setup_engine(self): + pass + + def teardown_engine(self): + pass + + def _run_steps(self, ctx): + raise NotImplementedError() + + +class ReplayableSession(object): + """A simple record/playback tool. + + This is *not* a mock testing class. It only records a session for later + playback and makes no assertions on call consistency whatsoever. It's + unlikely to be suitable for anything other than DB-API recording. + + """ + + Callable = object() + NoAttribute = object() + + if util.py2k: + Natives = set([getattr(types, t) + for t in dir(types) if not t.startswith('_')]).\ + difference([getattr(types, t) + for t in ('FunctionType', 'BuiltinFunctionType', + 'MethodType', 'BuiltinMethodType', + 'LambdaType', 'UnboundMethodType',)]) + else: + Natives = set([getattr(types, t) + for t in dir(types) if not t.startswith('_')]).\ + union([type(t) if not isinstance(t, type) + else t for t in __builtins__.values()]).\ + difference([getattr(types, t) + for t in ('FunctionType', 'BuiltinFunctionType', + 'MethodType', 'BuiltinMethodType', + 'LambdaType', )]) + + def __init__(self): + self.buffer = deque() + + def recorder(self, base): + return self.Recorder(self.buffer, base) + + def player(self): + return self.Player(self.buffer) + + class Recorder(object): + def __init__(self, buffer, subject): + self._buffer = buffer + self._subject = subject + + def __call__(self, *args, **kw): + subject, buffer = [object.__getattribute__(self, x) + for x in ('_subject', '_buffer')] + + result = subject(*args, **kw) + if type(result) not in ReplayableSession.Natives: + buffer.append(ReplayableSession.Callable) + return type(self)(buffer, result) + else: + buffer.append(result) + return result + + @property + def _sqla_unwrap(self): + return self._subject + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, key) + except AttributeError: + pass + + subject, buffer = [object.__getattribute__(self, x) + for x in ('_subject', '_buffer')] + try: + result = type(subject).__getattribute__(subject, key) + except AttributeError: + buffer.append(ReplayableSession.NoAttribute) + raise + else: + if type(result) not in ReplayableSession.Natives: + buffer.append(ReplayableSession.Callable) + return type(self)(buffer, result) + else: + buffer.append(result) + return result + + class Player(object): + def __init__(self, buffer): + self._buffer = buffer + + def __call__(self, *args, **kw): + buffer = object.__getattribute__(self, '_buffer') + result = buffer.popleft() + if result is ReplayableSession.Callable: + return self + else: + return result + + @property + def _sqla_unwrap(self): + return None + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, key) + except AttributeError: + pass + buffer = object.__getattribute__(self, '_buffer') + result = buffer.popleft() + if result is ReplayableSession.Callable: + return self + elif result is ReplayableSession.NoAttribute: + raise AttributeError(key) + else: + return result diff --git a/app/lib/sqlalchemy/testing/requirements.py b/app/lib/sqlalchemy/testing/requirements.py new file mode 100644 index 0000000..e4491ca --- /dev/null +++ b/app/lib/sqlalchemy/testing/requirements.py @@ -0,0 +1,800 @@ +# testing/requirements.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Global database feature support policy. + +Provides decorators to mark tests requiring specific feature support from the +target database. + +External dialect test suites should subclass SuiteRequirements +to provide specific inclusion/exclusions. + +""" + +import sys + +from . import exclusions +from .. import util + + +class Requirements(object): + pass + + +class SuiteRequirements(Requirements): + + @property + def create_table(self): + """target platform can emit basic CreateTable DDL.""" + + return exclusions.open() + + @property + def drop_table(self): + """target platform can emit basic DropTable DDL.""" + + return exclusions.open() + + @property + def foreign_keys(self): + """Target database must support foreign keys.""" + + return exclusions.open() + + @property + def on_update_cascade(self): + """"target database must support ON UPDATE..CASCADE behavior in + foreign keys.""" + + return exclusions.open() + + @property + def non_updating_cascade(self): + """target database must *not* support ON UPDATE..CASCADE behavior in + foreign keys.""" + return exclusions.closed() + + @property + def deferrable_fks(self): + return exclusions.closed() + + @property + def on_update_or_deferrable_fks(self): + # TODO: exclusions should be composable, + # somehow only_if([x, y]) isn't working here, negation/conjunctions + # getting confused. + return exclusions.only_if( + lambda: self.on_update_cascade.enabled or + self.deferrable_fks.enabled + ) + + @property + def self_referential_foreign_keys(self): + """Target database must support self-referential foreign keys.""" + + return exclusions.open() + + @property + def foreign_key_ddl(self): + """Target database must support the DDL phrases for FOREIGN KEY.""" + + return exclusions.open() + + @property + def named_constraints(self): + """target database must support names for constraints.""" + + return exclusions.open() + + @property + def subqueries(self): + """Target database must support subqueries.""" + + return exclusions.open() + + @property + def offset(self): + """target database can render OFFSET, or an equivalent, in a + SELECT. + """ + + return exclusions.open() + + @property + def bound_limit_offset(self): + """target database can render LIMIT and/or OFFSET using a bound + parameter + """ + + return exclusions.open() + + @property + def parens_in_union_contained_select_w_limit_offset(self): + """Target database must support parenthesized SELECT in UNION + when LIMIT/OFFSET is specifically present. + + E.g. (SELECT ...) UNION (SELECT ..) + + This is known to fail on SQLite. + + """ + return exclusions.open() + + @property + def parens_in_union_contained_select_wo_limit_offset(self): + """Target database must support parenthesized SELECT in UNION + when OFFSET/LIMIT is specifically not present. + + E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..) + + This is known to fail on SQLite. It also fails on Oracle + because without LIMIT/OFFSET, there is currently no step that + creates an additional subquery. + + """ + return exclusions.open() + + @property + def boolean_col_expressions(self): + """Target database must support boolean expressions as columns""" + + return exclusions.closed() + + @property + def nullsordering(self): + """Target backends that support nulls ordering.""" + + return exclusions.closed() + + @property + def standalone_binds(self): + """target database/driver supports bound parameters as column expressions + without being in the context of a typed column. + + """ + return exclusions.closed() + + @property + def intersect(self): + """Target database must support INTERSECT or equivalent.""" + return exclusions.closed() + + @property + def except_(self): + """Target database must support EXCEPT or equivalent (i.e. MINUS).""" + return exclusions.closed() + + @property + def window_functions(self): + """Target database must support window functions.""" + return exclusions.closed() + + @property + def autoincrement_insert(self): + """target platform generates new surrogate integer primary key values + when insert() is executed, excluding the pk column.""" + + return exclusions.open() + + @property + def fetch_rows_post_commit(self): + """target platform will allow cursor.fetchone() to proceed after a + COMMIT. + + Typically this refers to an INSERT statement with RETURNING which + is invoked within "autocommit". If the row can be returned + after the autocommit, then this rule can be open. + + """ + + return exclusions.open() + + @property + def empty_inserts(self): + """target platform supports INSERT with no values, i.e. + INSERT DEFAULT VALUES or equivalent.""" + + return exclusions.only_if( + lambda config: config.db.dialect.supports_empty_insert or + config.db.dialect.supports_default_values, + "empty inserts not supported" + ) + + @property + def insert_from_select(self): + """target platform supports INSERT from a SELECT.""" + + return exclusions.open() + + @property + def returning(self): + """target platform supports RETURNING.""" + + return exclusions.only_if( + lambda config: config.db.dialect.implicit_returning, + "%(database)s %(does_support)s 'returning'" + ) + + @property + def duplicate_names_in_cursor_description(self): + """target platform supports a SELECT statement that has + the same name repeated more than once in the columns list.""" + + return exclusions.open() + + @property + def denormalized_names(self): + """Target database must have 'denormalized', i.e. + UPPERCASE as case insensitive names.""" + + return exclusions.skip_if( + lambda config: not config.db.dialect.requires_name_normalize, + "Backend does not require denormalized names." + ) + + @property + def multivalues_inserts(self): + """target database must support multiple VALUES clauses in an + INSERT statement.""" + + return exclusions.skip_if( + lambda config: not config.db.dialect.supports_multivalues_insert, + "Backend does not support multirow inserts." + ) + + @property + def implements_get_lastrowid(self): + """"target dialect implements the executioncontext.get_lastrowid() + method without reliance on RETURNING. + + """ + return exclusions.open() + + @property + def emulated_lastrowid(self): + """"target dialect retrieves cursor.lastrowid, or fetches + from a database-side function after an insert() construct executes, + within the get_lastrowid() method. + + Only dialects that "pre-execute", or need RETURNING to get last + inserted id, would return closed/fail/skip for this. + + """ + return exclusions.closed() + + @property + def dbapi_lastrowid(self): + """"target platform includes a 'lastrowid' accessor on the DBAPI + cursor object. + + """ + return exclusions.closed() + + @property + def views(self): + """Target database must support VIEWs.""" + + return exclusions.closed() + + @property + def schemas(self): + """Target database must support external schemas, and have one + named 'test_schema'.""" + + return exclusions.closed() + + @property + def server_side_cursors(self): + """Target dialect must support server side cursors.""" + + return exclusions.only_if([ + lambda config: config.db.dialect.supports_server_side_cursors + ], "no server side cursors support") + + @property + def sequences(self): + """Target database must support SEQUENCEs.""" + + return exclusions.only_if([ + lambda config: config.db.dialect.supports_sequences + ], "no sequence support") + + @property + def sequences_optional(self): + """Target database supports sequences, but also optionally + as a means of generating new PK values.""" + + return exclusions.only_if([ + lambda config: config.db.dialect.supports_sequences and + config.db.dialect.sequences_optional + ], "no sequence support, or sequences not optional") + + @property + def reflects_pk_names(self): + return exclusions.closed() + + @property + def table_reflection(self): + return exclusions.open() + + @property + def view_column_reflection(self): + """target database must support retrieval of the columns in a view, + similarly to how a table is inspected. + + This does not include the full CREATE VIEW definition. + + """ + return self.views + + @property + def view_reflection(self): + """target database must support inspection of the full CREATE VIEW definition. + """ + return self.views + + @property + def schema_reflection(self): + return self.schemas + + @property + def primary_key_constraint_reflection(self): + return exclusions.open() + + @property + def foreign_key_constraint_reflection(self): + return exclusions.open() + + @property + def foreign_key_constraint_option_reflection(self): + return exclusions.closed() + + @property + def temp_table_reflection(self): + return exclusions.open() + + @property + def temp_table_names(self): + """target dialect supports listing of temporary table names""" + return exclusions.closed() + + @property + def temporary_tables(self): + """target database supports temporary tables""" + return exclusions.open() + + @property + def temporary_views(self): + """target database supports temporary views""" + return exclusions.closed() + + @property + def index_reflection(self): + return exclusions.open() + + @property + def unique_constraint_reflection(self): + """target dialect supports reflection of unique constraints""" + return exclusions.open() + + @property + def duplicate_key_raises_integrity_error(self): + """target dialect raises IntegrityError when reporting an INSERT + with a primary key violation. (hint: it should) + + """ + return exclusions.open() + + @property + def unbounded_varchar(self): + """Target database must support VARCHAR with no length""" + + return exclusions.open() + + @property + def unicode_data(self): + """Target database/dialect must support Python unicode objects with + non-ASCII characters represented, delivered as bound parameters + as well as in result rows. + + """ + return exclusions.open() + + @property + def unicode_ddl(self): + """Target driver must support some degree of non-ascii symbol + names. + """ + return exclusions.closed() + + @property + def datetime_literals(self): + """target dialect supports rendering of a date, time, or datetime as a + literal string, e.g. via the TypeEngine.literal_processor() method. + + """ + + return exclusions.closed() + + @property + def datetime(self): + """target dialect supports representation of Python + datetime.datetime() objects.""" + + return exclusions.open() + + @property + def datetime_microseconds(self): + """target dialect supports representation of Python + datetime.datetime() with microsecond objects.""" + + return exclusions.open() + + @property + def datetime_historic(self): + """target dialect supports representation of Python + datetime.datetime() objects with historic (pre 1970) values.""" + + return exclusions.closed() + + @property + def date(self): + """target dialect supports representation of Python + datetime.date() objects.""" + + return exclusions.open() + + @property + def date_coerces_from_datetime(self): + """target dialect accepts a datetime object as the target + of a date column.""" + + return exclusions.open() + + @property + def date_historic(self): + """target dialect supports representation of Python + datetime.datetime() objects with historic (pre 1970) values.""" + + return exclusions.closed() + + @property + def time(self): + """target dialect supports representation of Python + datetime.time() objects.""" + + return exclusions.open() + + @property + def time_microseconds(self): + """target dialect supports representation of Python + datetime.time() with microsecond objects.""" + + return exclusions.open() + + @property + def binary_comparisons(self): + """target database/driver can allow BLOB/BINARY fields to be compared + against a bound parameter value. + """ + + return exclusions.open() + + @property + def binary_literals(self): + """target backend supports simple binary literals, e.g. an + expression like:: + + SELECT CAST('foo' AS BINARY) + + Where ``BINARY`` is the type emitted from :class:`.LargeBinary`, + e.g. it could be ``BLOB`` or similar. + + Basically fails on Oracle. + + """ + + return exclusions.open() + + @property + def json_type(self): + """target platform implements a native JSON type.""" + + return exclusions.closed() + + @property + def json_array_indexes(self): + """"target platform supports numeric array indexes + within a JSON structure""" + + return self.json_type + + @property + def precision_numerics_general(self): + """target backend has general support for moderately high-precision + numerics.""" + return exclusions.open() + + @property + def precision_numerics_enotation_small(self): + """target backend supports Decimal() objects using E notation + to represent very small values.""" + return exclusions.closed() + + @property + def precision_numerics_enotation_large(self): + """target backend supports Decimal() objects using E notation + to represent very large values.""" + return exclusions.closed() + + @property + def precision_numerics_many_significant_digits(self): + """target backend supports values with many digits on both sides, + such as 319438950232418390.273596, 87673.594069654243 + + """ + return exclusions.closed() + + @property + def precision_numerics_retains_significant_digits(self): + """A precision numeric type will return empty significant digits, + i.e. a value such as 10.000 will come back in Decimal form with + the .000 maintained.""" + + return exclusions.closed() + + @property + def precision_generic_float_type(self): + """target backend will return native floating point numbers with at + least seven decimal places when using the generic Float type. + + """ + return exclusions.open() + + @property + def floats_to_four_decimals(self): + """target backend can return a floating-point number with four + significant digits (such as 15.7563) accurately + (i.e. without FP inaccuracies, such as 15.75629997253418). + + """ + return exclusions.open() + + @property + def fetch_null_from_numeric(self): + """target backend doesn't crash when you try to select a NUMERIC + value that has a value of NULL. + + Added to support Pyodbc bug #351. + """ + + return exclusions.open() + + @property + def text_type(self): + """Target database must support an unbounded Text() " + "type such as TEXT or CLOB""" + + return exclusions.open() + + @property + def empty_strings_varchar(self): + """target database can persist/return an empty string with a + varchar. + + """ + return exclusions.open() + + @property + def empty_strings_text(self): + """target database can persist/return an empty string with an + unbounded text.""" + + return exclusions.open() + + @property + def selectone(self): + """target driver must support the literal statement 'select 1'""" + return exclusions.open() + + @property + def savepoints(self): + """Target database must support savepoints.""" + + return exclusions.closed() + + @property + def two_phase_transactions(self): + """Target database must support two-phase transactions.""" + + return exclusions.closed() + + @property + def update_from(self): + """Target must support UPDATE..FROM syntax""" + return exclusions.closed() + + @property + def update_where_target_in_subquery(self): + """Target must support UPDATE where the same table is present in a + subquery in the WHERE clause. + + This is an ANSI-standard syntax that apparently MySQL can't handle, + such as: + + UPDATE documents SET flag=1 WHERE documents.title IN + (SELECT max(documents.title) AS title + FROM documents GROUP BY documents.user_id + ) + """ + return exclusions.open() + + @property + def mod_operator_as_percent_sign(self): + """target database must use a plain percent '%' as the 'modulus' + operator.""" + return exclusions.closed() + + @property + def percent_schema_names(self): + """target backend supports weird identifiers with percent signs + in them, e.g. 'some % column'. + + this is a very weird use case but often has problems because of + DBAPIs that use python formatting. It's not a critical use + case either. + + """ + return exclusions.closed() + + @property + def order_by_label_with_expression(self): + """target backend supports ORDER BY a column label within an + expression. + + Basically this:: + + select data as foo from test order by foo || 'bar' + + Lots of databases including PostgreSQL don't support this, + so this is off by default. + + """ + return exclusions.closed() + + @property + def unicode_connections(self): + """Target driver must support non-ASCII characters being passed at + all. + """ + return exclusions.open() + + @property + def graceful_disconnects(self): + """Target driver must raise a DBAPI-level exception, such as + InterfaceError, when the underlying connection has been closed + and the execute() method is called. + """ + return exclusions.open() + + @property + def skip_mysql_on_windows(self): + """Catchall for a large variety of MySQL on Windows failures""" + return exclusions.open() + + @property + def ad_hoc_engines(self): + """Test environment must allow ad-hoc engine/connection creation. + + DBs that scale poorly for many connections, even when closed, i.e. + Oracle, may use the "--low-connections" option which flags this + requirement as not present. + + """ + return exclusions.skip_if( + lambda config: config.options.low_connections) + + @property + def timing_intensive(self): + return exclusions.requires_tag("timing_intensive") + + @property + def memory_intensive(self): + return exclusions.requires_tag("memory_intensive") + + @property + def threading_with_mock(self): + """Mark tests that use threading and mock at the same time - stability + issues have been observed with coverage + python 3.3 + + """ + return exclusions.skip_if( + lambda config: util.py3k and config.options.has_coverage, + "Stability issues with coverage + py3k" + ) + + @property + def python2(self): + return exclusions.skip_if( + lambda: sys.version_info >= (3,), + "Python version 2.xx is required." + ) + + @property + def python3(self): + return exclusions.skip_if( + lambda: sys.version_info < (3,), + "Python version 3.xx is required." + ) + + @property + def cpython(self): + return exclusions.only_if( + lambda: util.cpython, + "cPython interpreter needed" + ) + + @property + def non_broken_pickle(self): + from sqlalchemy.util import pickle + return exclusions.only_if( + lambda: not util.pypy and pickle.__name__ == 'cPickle' + or sys.version_info >= (3, 2), + "Needs cPickle+cPython or newer Python 3 pickle" + ) + + @property + def predictable_gc(self): + """target platform must remove all cycles unconditionally when + gc.collect() is called, as well as clean out unreferenced subclasses. + + """ + return self.cpython + + @property + def no_coverage(self): + """Test should be skipped if coverage is enabled. + + This is to block tests that exercise libraries that seem to be + sensitive to coverage, such as PostgreSQL notice logging. + + """ + return exclusions.skip_if( + lambda config: config.options.has_coverage, + "Issues observed when coverage is enabled" + ) + + def _has_mysql_on_windows(self, config): + return False + + def _has_mysql_fully_case_sensitive(self, config): + return False + + @property + def sqlite(self): + return exclusions.skip_if(lambda: not self._has_sqlite()) + + @property + def cextensions(self): + return exclusions.skip_if( + lambda: not self._has_cextensions(), "C extensions not installed" + ) + + def _has_sqlite(self): + from sqlalchemy import create_engine + try: + create_engine('sqlite://') + return True + except ImportError: + return False + + def _has_cextensions(self): + try: + from sqlalchemy import cresultproxy, cprocessors + return True + except ImportError: + return False diff --git a/app/lib/sqlalchemy/testing/runner.py b/app/lib/sqlalchemy/testing/runner.py new file mode 100644 index 0000000..75019d7 --- /dev/null +++ b/app/lib/sqlalchemy/testing/runner.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# testing/runner.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" +Nose test runner module. + +This script is a front-end to "nosetests" which +installs SQLAlchemy's testing plugin into the local environment. + +The script is intended to be used by third-party dialects and extensions +that run within SQLAlchemy's testing framework. The runner can +be invoked via:: + + python -m sqlalchemy.testing.runner + +The script is then essentially the same as the "nosetests" script, including +all of the usual Nose options. The test environment requires that a +setup.cfg is locally present including various required options. + +Note that when using this runner, Nose's "coverage" plugin will not be +able to provide coverage for SQLAlchemy itself, since SQLAlchemy is +imported into sys.modules before coverage is started. The special +script sqla_nose.py is provided as a top-level script which loads the +plugin in a special (somewhat hacky) way so that coverage against +SQLAlchemy itself is possible. + +""" + +from .plugin.noseplugin import NoseSQLAlchemy + +import nose + + +def main(): + nose.main(addplugins=[NoseSQLAlchemy()]) + + +def setup_py_test(): + """Runner to use for the 'test_suite' entry of your setup.py. + + Prevents any name clash shenanigans from the command line + argument "test" that the "setup.py test" command sends + to nose. + + """ + nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner']) diff --git a/app/lib/sqlalchemy/testing/schema.py b/app/lib/sqlalchemy/testing/schema.py new file mode 100644 index 0000000..018a291 --- /dev/null +++ b/app/lib/sqlalchemy/testing/schema.py @@ -0,0 +1,101 @@ +# testing/schema.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from . import exclusions +from .. import schema, event +from . import config + +__all__ = 'Table', 'Column', + +table_options = {} + + +def Table(*args, **kw): + """A schema.Table wrapper/hook for dialect-specific tweaks.""" + + test_opts = dict([(k, kw.pop(k)) for k in list(kw) + if k.startswith('test_')]) + + kw.update(table_options) + + if exclusions.against(config._current, 'mysql'): + if 'mysql_engine' not in kw and 'mysql_type' not in kw: + if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts: + kw['mysql_engine'] = 'InnoDB' + else: + kw['mysql_engine'] = 'MyISAM' + + # Apply some default cascading rules for self-referential foreign keys. + # MySQL InnoDB has some issues around seleting self-refs too. + if exclusions.against(config._current, 'firebird'): + table_name = args[0] + unpack = (config.db.dialect. + identifier_preparer.unformat_identifiers) + + # Only going after ForeignKeys in Columns. May need to + # expand to ForeignKeyConstraint too. + fks = [fk + for col in args if isinstance(col, schema.Column) + for fk in col.foreign_keys] + + for fk in fks: + # root around in raw spec + ref = fk._colspec + if isinstance(ref, schema.Column): + name = ref.table.name + else: + # take just the table name: on FB there cannot be + # a schema, so the first element is always the + # table name, possibly followed by the field name + name = unpack(ref)[0] + if name == table_name: + if fk.ondelete is None: + fk.ondelete = 'CASCADE' + if fk.onupdate is None: + fk.onupdate = 'CASCADE' + + return schema.Table(*args, **kw) + + +def Column(*args, **kw): + """A schema.Column wrapper/hook for dialect-specific tweaks.""" + + test_opts = dict([(k, kw.pop(k)) for k in list(kw) + if k.startswith('test_')]) + + if not config.requirements.foreign_key_ddl.enabled_for_config(config): + args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)] + + col = schema.Column(*args, **kw) + if test_opts.get('test_needs_autoincrement', False) and \ + kw.get('primary_key', False): + + if col.default is None and col.server_default is None: + col.autoincrement = True + + # allow any test suite to pick up on this + col.info['test_needs_autoincrement'] = True + + # hardcoded rule for firebird, oracle; this should + # be moved out + if exclusions.against(config._current, 'firebird', 'oracle'): + def add_seq(c, tbl): + c._init_items( + schema.Sequence(_truncate_name( + config.db.dialect, tbl.name + '_' + c.name + '_seq'), + optional=True) + ) + event.listen(col, 'after_parent_attach', add_seq, propagate=True) + return col + + +def _truncate_name(dialect, name): + if len(name) > dialect.max_identifier_length: + return name[0:max(dialect.max_identifier_length - 6, 0)] + \ + "_" + hex(hash(name) % 64)[2:] + else: + return name diff --git a/app/lib/sqlalchemy/testing/suite/__init__.py b/app/lib/sqlalchemy/testing/suite/__init__.py new file mode 100644 index 0000000..9eeffd4 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/__init__.py @@ -0,0 +1,10 @@ + +from sqlalchemy.testing.suite.test_dialect import * +from sqlalchemy.testing.suite.test_ddl import * +from sqlalchemy.testing.suite.test_insert import * +from sqlalchemy.testing.suite.test_sequence import * +from sqlalchemy.testing.suite.test_select import * +from sqlalchemy.testing.suite.test_results import * +from sqlalchemy.testing.suite.test_update_delete import * +from sqlalchemy.testing.suite.test_reflection import * +from sqlalchemy.testing.suite.test_types import * diff --git a/app/lib/sqlalchemy/testing/suite/test_ddl.py b/app/lib/sqlalchemy/testing/suite/test_ddl.py new file mode 100644 index 0000000..1d8010c --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_ddl.py @@ -0,0 +1,65 @@ + + +from .. import fixtures, config, util +from ..config import requirements +from ..assertions import eq_ + +from sqlalchemy import Table, Column, Integer, String + + +class TableDDLTest(fixtures.TestBase): + __backend__ = True + + def _simple_fixture(self): + return Table('test_table', self.metadata, + Column('id', Integer, primary_key=True, + autoincrement=False), + Column('data', String(50)) + ) + + def _underscore_fixture(self): + return Table('_test_table', self.metadata, + Column('id', Integer, primary_key=True, + autoincrement=False), + Column('_data', String(50)) + ) + + def _simple_roundtrip(self, table): + with config.db.begin() as conn: + conn.execute(table.insert().values((1, 'some data'))) + result = conn.execute(table.select()) + eq_( + result.first(), + (1, 'some data') + ) + + @requirements.create_table + @util.provide_metadata + def test_create_table(self): + table = self._simple_fixture() + table.create( + config.db, checkfirst=False + ) + self._simple_roundtrip(table) + + @requirements.drop_table + @util.provide_metadata + def test_drop_table(self): + table = self._simple_fixture() + table.create( + config.db, checkfirst=False + ) + table.drop( + config.db, checkfirst=False + ) + + @requirements.create_table + @util.provide_metadata + def test_underscore_names(self): + table = self._underscore_fixture() + table.create( + config.db, checkfirst=False + ) + self._simple_roundtrip(table) + +__all__ = ('TableDDLTest', ) diff --git a/app/lib/sqlalchemy/testing/suite/test_dialect.py b/app/lib/sqlalchemy/testing/suite/test_dialect.py new file mode 100644 index 0000000..00884a2 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_dialect.py @@ -0,0 +1,41 @@ +from .. import fixtures, config +from ..config import requirements +from sqlalchemy import exc +from sqlalchemy import Integer, String +from .. import assert_raises +from ..schema import Table, Column + + +class ExceptionTest(fixtures.TablesTest): + """Test basic exception wrapping. + + DBAPIs vary a lot in exception behavior so to actually anticipate + specific exceptions from real round trips, we need to be conservative. + + """ + run_deletes = 'each' + + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('manual_pk', metadata, + Column('id', Integer, primary_key=True, autoincrement=False), + Column('data', String(50)) + ) + + @requirements.duplicate_key_raises_integrity_error + def test_integrity_error(self): + + with config.db.begin() as conn: + conn.execute( + self.tables.manual_pk.insert(), + {'id': 1, 'data': 'd1'} + ) + + assert_raises( + exc.IntegrityError, + conn.execute, + self.tables.manual_pk.insert(), + {'id': 1, 'data': 'd1'} + ) diff --git a/app/lib/sqlalchemy/testing/suite/test_insert.py b/app/lib/sqlalchemy/testing/suite/test_insert.py new file mode 100644 index 0000000..c0b6b18 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_insert.py @@ -0,0 +1,319 @@ +from .. import fixtures, config +from ..config import requirements +from .. import exclusions +from ..assertions import eq_ +from .. import engines + +from sqlalchemy import Integer, String, select, literal_column, literal + +from ..schema import Table, Column + + +class LastrowidTest(fixtures.TablesTest): + run_deletes = 'each' + + __backend__ = True + + __requires__ = 'implements_get_lastrowid', 'autoincrement_insert' + + __engine_options__ = {"implicit_returning": False} + + @classmethod + def define_tables(cls, metadata): + Table('autoinc_pk', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('data', String(50)) + ) + + Table('manual_pk', metadata, + Column('id', Integer, primary_key=True, autoincrement=False), + Column('data', String(50)) + ) + + def _assert_round_trip(self, table, conn): + row = conn.execute(table.select()).first() + eq_( + row, + (config.db.dialect.default_sequence_base, "some data") + ) + + def test_autoincrement_on_insert(self): + + config.db.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + self._assert_round_trip(self.tables.autoinc_pk, config.db) + + def test_last_inserted_id(self): + + r = config.db.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) + eq_( + r.inserted_primary_key, + [pk] + ) + + # failed on pypy1.9 but seems to be OK on pypy 2.1 + # @exclusions.fails_if(lambda: util.pypy, + # "lastrowid not maintained after " + # "connection close") + @requirements.dbapi_lastrowid + def test_native_lastrowid_autoinc(self): + r = config.db.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + lastrowid = r.lastrowid + pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) + eq_( + lastrowid, pk + ) + + +class InsertBehaviorTest(fixtures.TablesTest): + run_deletes = 'each' + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('autoinc_pk', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('data', String(50)) + ) + Table('manual_pk', metadata, + Column('id', Integer, primary_key=True, autoincrement=False), + Column('data', String(50)) + ) + Table('includes_defaults', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('data', String(50)), + Column('x', Integer, default=5), + Column('y', Integer, + default=literal_column("2", type_=Integer) + literal(2))) + + def test_autoclose_on_insert(self): + if requirements.returning.enabled: + engine = engines.testing_engine( + options={'implicit_returning': False}) + else: + engine = config.db + + r = engine.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + assert r._soft_closed + assert not r.closed + assert r.is_insert + assert not r.returns_rows + + @requirements.returning + def test_autoclose_on_insert_implicit_returning(self): + r = config.db.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + assert r._soft_closed + assert not r.closed + assert r.is_insert + assert not r.returns_rows + + @requirements.empty_inserts + def test_empty_insert(self): + r = config.db.execute( + self.tables.autoinc_pk.insert(), + ) + assert r._soft_closed + assert not r.closed + + r = config.db.execute( + self.tables.autoinc_pk.select(). + where(self.tables.autoinc_pk.c.id != None) + ) + + assert len(r.fetchall()) + + @requirements.insert_from_select + def test_insert_from_select_autoinc(self): + src_table = self.tables.manual_pk + dest_table = self.tables.autoinc_pk + config.db.execute( + src_table.insert(), + [ + dict(id=1, data="data1"), + dict(id=2, data="data2"), + dict(id=3, data="data3"), + ] + ) + + result = config.db.execute( + dest_table.insert(). + from_select( + ("data",), + select([src_table.c.data]). + where(src_table.c.data.in_(["data2", "data3"])) + ) + ) + + eq_(result.inserted_primary_key, [None]) + + result = config.db.execute( + select([dest_table.c.data]).order_by(dest_table.c.data) + ) + eq_(result.fetchall(), [("data2", ), ("data3", )]) + + @requirements.insert_from_select + def test_insert_from_select_autoinc_no_rows(self): + src_table = self.tables.manual_pk + dest_table = self.tables.autoinc_pk + + result = config.db.execute( + dest_table.insert(). + from_select( + ("data",), + select([src_table.c.data]). + where(src_table.c.data.in_(["data2", "data3"])) + ) + ) + eq_(result.inserted_primary_key, [None]) + + result = config.db.execute( + select([dest_table.c.data]).order_by(dest_table.c.data) + ) + + eq_(result.fetchall(), []) + + @requirements.insert_from_select + def test_insert_from_select(self): + table = self.tables.manual_pk + config.db.execute( + table.insert(), + [ + dict(id=1, data="data1"), + dict(id=2, data="data2"), + dict(id=3, data="data3"), + ] + ) + + config.db.execute( + table.insert(inline=True). + from_select(("id", "data",), + select([table.c.id + 5, table.c.data]). + where(table.c.data.in_(["data2", "data3"])) + ), + ) + + eq_( + config.db.execute( + select([table.c.data]).order_by(table.c.data) + ).fetchall(), + [("data1", ), ("data2", ), ("data2", ), + ("data3", ), ("data3", )] + ) + + @requirements.insert_from_select + def test_insert_from_select_with_defaults(self): + table = self.tables.includes_defaults + config.db.execute( + table.insert(), + [ + dict(id=1, data="data1"), + dict(id=2, data="data2"), + dict(id=3, data="data3"), + ] + ) + + config.db.execute( + table.insert(inline=True). + from_select(("id", "data",), + select([table.c.id + 5, table.c.data]). + where(table.c.data.in_(["data2", "data3"])) + ), + ) + + eq_( + config.db.execute( + select([table]).order_by(table.c.data, table.c.id) + ).fetchall(), + [(1, 'data1', 5, 4), (2, 'data2', 5, 4), + (7, 'data2', 5, 4), (3, 'data3', 5, 4), (8, 'data3', 5, 4)] + ) + + +class ReturningTest(fixtures.TablesTest): + run_create_tables = 'each' + __requires__ = 'returning', 'autoincrement_insert' + __backend__ = True + + __engine_options__ = {"implicit_returning": True} + + def _assert_round_trip(self, table, conn): + row = conn.execute(table.select()).first() + eq_( + row, + (config.db.dialect.default_sequence_base, "some data") + ) + + @classmethod + def define_tables(cls, metadata): + Table('autoinc_pk', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('data', String(50)) + ) + + @requirements.fetch_rows_post_commit + def test_explicit_returning_pk_autocommit(self): + engine = config.db + table = self.tables.autoinc_pk + r = engine.execute( + table.insert().returning( + table.c.id), + data="some data" + ) + pk = r.first()[0] + fetched_pk = config.db.scalar(select([table.c.id])) + eq_(fetched_pk, pk) + + def test_explicit_returning_pk_no_autocommit(self): + engine = config.db + table = self.tables.autoinc_pk + with engine.begin() as conn: + r = conn.execute( + table.insert().returning( + table.c.id), + data="some data" + ) + pk = r.first()[0] + fetched_pk = config.db.scalar(select([table.c.id])) + eq_(fetched_pk, pk) + + def test_autoincrement_on_insert_implcit_returning(self): + + config.db.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + self._assert_round_trip(self.tables.autoinc_pk, config.db) + + def test_last_inserted_id_implicit_returning(self): + + r = config.db.execute( + self.tables.autoinc_pk.insert(), + data="some data" + ) + pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) + eq_( + r.inserted_primary_key, + [pk] + ) + + +__all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest') diff --git a/app/lib/sqlalchemy/testing/suite/test_reflection.py b/app/lib/sqlalchemy/testing/suite/test_reflection.py new file mode 100644 index 0000000..ed6a33b --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_reflection.py @@ -0,0 +1,746 @@ + + +import sqlalchemy as sa +from sqlalchemy import exc as sa_exc +from sqlalchemy import types as sql_types +from sqlalchemy import inspect +from sqlalchemy import MetaData, Integer, String +from sqlalchemy.engine.reflection import Inspector +from sqlalchemy.testing import engines, fixtures +from sqlalchemy.testing.schema import Table, Column +from sqlalchemy.testing import eq_, assert_raises_message +from sqlalchemy import testing +from .. import config +import operator +from sqlalchemy.schema import DDL, Index +from sqlalchemy import event +from sqlalchemy.sql.elements import quoted_name +from sqlalchemy import ForeignKey + +metadata, users = None, None + + +class HasTableTest(fixtures.TablesTest): + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('test_table', metadata, + Column('id', Integer, primary_key=True), + Column('data', String(50)) + ) + + def test_has_table(self): + with config.db.begin() as conn: + assert config.db.dialect.has_table(conn, "test_table") + assert not config.db.dialect.has_table(conn, "nonexistent_table") + + +class ComponentReflectionTest(fixtures.TablesTest): + run_inserts = run_deletes = None + + __backend__ = True + + @classmethod + def setup_bind(cls): + if config.requirements.independent_connections.enabled: + from sqlalchemy import pool + return engines.testing_engine( + options=dict(poolclass=pool.StaticPool)) + else: + return config.db + + @classmethod + def define_tables(cls, metadata): + cls.define_reflected_tables(metadata, None) + if testing.requires.schemas.enabled: + cls.define_reflected_tables(metadata, testing.config.test_schema) + + @classmethod + def define_reflected_tables(cls, metadata, schema): + if schema: + schema_prefix = schema + "." + else: + schema_prefix = "" + + if testing.requires.self_referential_foreign_keys.enabled: + users = Table('users', metadata, + Column('user_id', sa.INT, primary_key=True), + Column('test1', sa.CHAR(5), nullable=False), + Column('test2', sa.Float(5), nullable=False), + Column('parent_user_id', sa.Integer, + sa.ForeignKey('%susers.user_id' % + schema_prefix)), + schema=schema, + test_needs_fk=True, + ) + else: + users = Table('users', metadata, + Column('user_id', sa.INT, primary_key=True), + Column('test1', sa.CHAR(5), nullable=False), + Column('test2', sa.Float(5), nullable=False), + schema=schema, + test_needs_fk=True, + ) + + Table("dingalings", metadata, + Column('dingaling_id', sa.Integer, primary_key=True), + Column('address_id', sa.Integer, + sa.ForeignKey('%semail_addresses.address_id' % + schema_prefix)), + Column('data', sa.String(30)), + schema=schema, + test_needs_fk=True, + ) + Table('email_addresses', metadata, + Column('address_id', sa.Integer), + Column('remote_user_id', sa.Integer, + sa.ForeignKey(users.c.user_id)), + Column('email_address', sa.String(20)), + sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'), + schema=schema, + test_needs_fk=True, + ) + + if testing.requires.index_reflection.enabled: + cls.define_index(metadata, users) + if testing.requires.view_column_reflection.enabled: + cls.define_views(metadata, schema) + if not schema and testing.requires.temp_table_reflection.enabled: + cls.define_temp_tables(metadata) + + @classmethod + def define_temp_tables(cls, metadata): + # cheat a bit, we should fix this with some dialect-level + # temp table fixture + if testing.against("oracle"): + kw = { + 'prefixes': ["GLOBAL TEMPORARY"], + 'oracle_on_commit': 'PRESERVE ROWS' + } + else: + kw = { + 'prefixes': ["TEMPORARY"], + } + + user_tmp = Table( + "user_tmp", metadata, + Column("id", sa.INT, primary_key=True), + Column('name', sa.VARCHAR(50)), + Column('foo', sa.INT), + sa.UniqueConstraint('name', name='user_tmp_uq'), + sa.Index("user_tmp_ix", "foo"), + **kw + ) + if testing.requires.view_reflection.enabled and \ + testing.requires.temporary_views.enabled: + event.listen( + user_tmp, "after_create", + DDL("create temporary view user_tmp_v as " + "select * from user_tmp") + ) + event.listen( + user_tmp, "before_drop", + DDL("drop view user_tmp_v") + ) + + @classmethod + def define_index(cls, metadata, users): + Index("users_t_idx", users.c.test1, users.c.test2) + Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1) + + @classmethod + def define_views(cls, metadata, schema): + for table_name in ('users', 'email_addresses'): + fullname = table_name + if schema: + fullname = "%s.%s" % (schema, table_name) + view_name = fullname + '_v' + query = "CREATE VIEW %s AS SELECT * FROM %s" % ( + view_name, fullname) + + event.listen( + metadata, + "after_create", + DDL(query) + ) + event.listen( + metadata, + "before_drop", + DDL("DROP VIEW %s" % view_name) + ) + + @testing.requires.schema_reflection + def test_get_schema_names(self): + insp = inspect(testing.db) + + self.assert_(testing.config.test_schema in insp.get_schema_names()) + + @testing.requires.schema_reflection + def test_dialect_initialize(self): + engine = engines.testing_engine() + assert not hasattr(engine.dialect, 'default_schema_name') + inspect(engine) + assert hasattr(engine.dialect, 'default_schema_name') + + @testing.requires.schema_reflection + def test_get_default_schema_name(self): + insp = inspect(testing.db) + eq_(insp.default_schema_name, testing.db.dialect.default_schema_name) + + @testing.provide_metadata + def _test_get_table_names(self, schema=None, table_type='table', + order_by=None): + meta = self.metadata + users, addresses, dingalings = self.tables.users, \ + self.tables.email_addresses, self.tables.dingalings + insp = inspect(meta.bind) + + if table_type == 'view': + table_names = insp.get_view_names(schema) + table_names.sort() + answer = ['email_addresses_v', 'users_v'] + eq_(sorted(table_names), answer) + else: + table_names = insp.get_table_names(schema, + order_by=order_by) + if order_by == 'foreign_key': + answer = ['users', 'email_addresses', 'dingalings'] + eq_(table_names, answer) + else: + answer = ['dingalings', 'email_addresses', 'users'] + eq_(sorted(table_names), answer) + + @testing.requires.temp_table_names + def test_get_temp_table_names(self): + insp = inspect(self.bind) + temp_table_names = insp.get_temp_table_names() + eq_(sorted(temp_table_names), ['user_tmp']) + + @testing.requires.view_reflection + @testing.requires.temp_table_names + @testing.requires.temporary_views + def test_get_temp_view_names(self): + insp = inspect(self.bind) + temp_table_names = insp.get_temp_view_names() + eq_(sorted(temp_table_names), ['user_tmp_v']) + + @testing.requires.table_reflection + def test_get_table_names(self): + self._test_get_table_names() + + @testing.requires.table_reflection + @testing.requires.foreign_key_constraint_reflection + def test_get_table_names_fks(self): + self._test_get_table_names(order_by='foreign_key') + + @testing.requires.table_reflection + @testing.requires.schemas + def test_get_table_names_with_schema(self): + self._test_get_table_names(testing.config.test_schema) + + @testing.requires.view_column_reflection + def test_get_view_names(self): + self._test_get_table_names(table_type='view') + + @testing.requires.view_column_reflection + @testing.requires.schemas + def test_get_view_names_with_schema(self): + self._test_get_table_names( + testing.config.test_schema, table_type='view') + + @testing.requires.table_reflection + @testing.requires.view_column_reflection + def test_get_tables_and_views(self): + self._test_get_table_names() + self._test_get_table_names(table_type='view') + + def _test_get_columns(self, schema=None, table_type='table'): + meta = MetaData(testing.db) + users, addresses, dingalings = self.tables.users, \ + self.tables.email_addresses, self.tables.dingalings + table_names = ['users', 'email_addresses'] + if table_type == 'view': + table_names = ['users_v', 'email_addresses_v'] + insp = inspect(meta.bind) + for table_name, table in zip(table_names, (users, + addresses)): + schema_name = schema + cols = insp.get_columns(table_name, schema=schema_name) + self.assert_(len(cols) > 0, len(cols)) + + # should be in order + + for i, col in enumerate(table.columns): + eq_(col.name, cols[i]['name']) + ctype = cols[i]['type'].__class__ + ctype_def = col.type + if isinstance(ctype_def, sa.types.TypeEngine): + ctype_def = ctype_def.__class__ + + # Oracle returns Date for DateTime. + + if testing.against('oracle') and ctype_def \ + in (sql_types.Date, sql_types.DateTime): + ctype_def = sql_types.Date + + # assert that the desired type and return type share + # a base within one of the generic types. + + self.assert_(len(set(ctype.__mro__). + intersection(ctype_def.__mro__). + intersection([ + sql_types.Integer, + sql_types.Numeric, + sql_types.DateTime, + sql_types.Date, + sql_types.Time, + sql_types.String, + sql_types._Binary, + ])) > 0, '%s(%s), %s(%s)' % + (col.name, col.type, cols[i]['name'], ctype)) + + if not col.primary_key: + assert cols[i]['default'] is None + + @testing.requires.table_reflection + def test_get_columns(self): + self._test_get_columns() + + @testing.provide_metadata + def _type_round_trip(self, *types): + t = Table('t', self.metadata, + *[ + Column('t%d' % i, type_) + for i, type_ in enumerate(types) + ] + ) + t.create() + + return [ + c['type'] for c in + inspect(self.metadata.bind).get_columns('t') + ] + + @testing.requires.table_reflection + def test_numeric_reflection(self): + for typ in self._type_round_trip( + sql_types.Numeric(18, 5), + ): + assert isinstance(typ, sql_types.Numeric) + eq_(typ.precision, 18) + eq_(typ.scale, 5) + + @testing.requires.table_reflection + def test_varchar_reflection(self): + typ = self._type_round_trip(sql_types.String(52))[0] + assert isinstance(typ, sql_types.String) + eq_(typ.length, 52) + + @testing.requires.table_reflection + @testing.provide_metadata + def test_nullable_reflection(self): + t = Table('t', self.metadata, + Column('a', Integer, nullable=True), + Column('b', Integer, nullable=False)) + t.create() + eq_( + dict( + (col['name'], col['nullable']) + for col in inspect(self.metadata.bind).get_columns('t') + ), + {"a": True, "b": False} + ) + + @testing.requires.table_reflection + @testing.requires.schemas + def test_get_columns_with_schema(self): + self._test_get_columns(schema=testing.config.test_schema) + + @testing.requires.temp_table_reflection + def test_get_temp_table_columns(self): + meta = MetaData(self.bind) + user_tmp = self.tables.user_tmp + insp = inspect(meta.bind) + cols = insp.get_columns('user_tmp') + self.assert_(len(cols) > 0, len(cols)) + + for i, col in enumerate(user_tmp.columns): + eq_(col.name, cols[i]['name']) + + @testing.requires.temp_table_reflection + @testing.requires.view_column_reflection + @testing.requires.temporary_views + def test_get_temp_view_columns(self): + insp = inspect(self.bind) + cols = insp.get_columns('user_tmp_v') + eq_( + [col['name'] for col in cols], + ['id', 'name', 'foo'] + ) + + @testing.requires.view_column_reflection + def test_get_view_columns(self): + self._test_get_columns(table_type='view') + + @testing.requires.view_column_reflection + @testing.requires.schemas + def test_get_view_columns_with_schema(self): + self._test_get_columns( + schema=testing.config.test_schema, table_type='view') + + @testing.provide_metadata + def _test_get_pk_constraint(self, schema=None): + meta = self.metadata + users, addresses = self.tables.users, self.tables.email_addresses + insp = inspect(meta.bind) + + users_cons = insp.get_pk_constraint(users.name, schema=schema) + users_pkeys = users_cons['constrained_columns'] + eq_(users_pkeys, ['user_id']) + + addr_cons = insp.get_pk_constraint(addresses.name, schema=schema) + addr_pkeys = addr_cons['constrained_columns'] + eq_(addr_pkeys, ['address_id']) + + with testing.requires.reflects_pk_names.fail_if(): + eq_(addr_cons['name'], 'email_ad_pk') + + @testing.requires.primary_key_constraint_reflection + def test_get_pk_constraint(self): + self._test_get_pk_constraint() + + @testing.requires.table_reflection + @testing.requires.primary_key_constraint_reflection + @testing.requires.schemas + def test_get_pk_constraint_with_schema(self): + self._test_get_pk_constraint(schema=testing.config.test_schema) + + @testing.requires.table_reflection + @testing.provide_metadata + def test_deprecated_get_primary_keys(self): + meta = self.metadata + users = self.tables.users + insp = Inspector(meta.bind) + assert_raises_message( + sa_exc.SADeprecationWarning, + "Call to deprecated method get_primary_keys." + " Use get_pk_constraint instead.", + insp.get_primary_keys, users.name + ) + + @testing.provide_metadata + def _test_get_foreign_keys(self, schema=None): + meta = self.metadata + users, addresses, dingalings = self.tables.users, \ + self.tables.email_addresses, self.tables.dingalings + insp = inspect(meta.bind) + expected_schema = schema + # users + + if testing.requires.self_referential_foreign_keys.enabled: + users_fkeys = insp.get_foreign_keys(users.name, + schema=schema) + fkey1 = users_fkeys[0] + + with testing.requires.named_constraints.fail_if(): + self.assert_(fkey1['name'] is not None) + + eq_(fkey1['referred_schema'], expected_schema) + eq_(fkey1['referred_table'], users.name) + eq_(fkey1['referred_columns'], ['user_id', ]) + if testing.requires.self_referential_foreign_keys.enabled: + eq_(fkey1['constrained_columns'], ['parent_user_id']) + + # addresses + addr_fkeys = insp.get_foreign_keys(addresses.name, + schema=schema) + fkey1 = addr_fkeys[0] + + with testing.requires.named_constraints.fail_if(): + self.assert_(fkey1['name'] is not None) + + eq_(fkey1['referred_schema'], expected_schema) + eq_(fkey1['referred_table'], users.name) + eq_(fkey1['referred_columns'], ['user_id', ]) + eq_(fkey1['constrained_columns'], ['remote_user_id']) + + @testing.requires.foreign_key_constraint_reflection + def test_get_foreign_keys(self): + self._test_get_foreign_keys() + + @testing.requires.foreign_key_constraint_reflection + @testing.requires.schemas + def test_get_foreign_keys_with_schema(self): + self._test_get_foreign_keys(schema=testing.config.test_schema) + + @testing.requires.foreign_key_constraint_option_reflection + @testing.provide_metadata + def test_get_foreign_key_options(self): + meta = self.metadata + + Table( + 'x', meta, + Column('id', Integer, primary_key=True), + test_needs_fk=True + ) + + Table('table', meta, + Column('id', Integer, primary_key=True), + Column('x_id', Integer, sa.ForeignKey('x.id', name='xid')), + Column('test', String(10)), + test_needs_fk=True) + + Table('user', meta, + Column('id', Integer, primary_key=True), + Column('name', String(50), nullable=False), + Column('tid', Integer), + sa.ForeignKeyConstraint( + ['tid'], ['table.id'], + name='myfk', + onupdate="SET NULL", ondelete="CASCADE"), + test_needs_fk=True) + + meta.create_all() + + insp = inspect(meta.bind) + + # test 'options' is always present for a backend + # that can reflect these, since alembic looks for this + opts = insp.get_foreign_keys('table')[0]['options'] + + eq_( + dict( + (k, opts[k]) + for k in opts if opts[k] + ), + {} + ) + + opts = insp.get_foreign_keys('user')[0]['options'] + eq_( + dict( + (k, opts[k]) + for k in opts if opts[k] + ), + {'onupdate': 'SET NULL', 'ondelete': 'CASCADE'} + ) + + @testing.provide_metadata + def _test_get_indexes(self, schema=None): + meta = self.metadata + users, addresses, dingalings = self.tables.users, \ + self.tables.email_addresses, self.tables.dingalings + # The database may decide to create indexes for foreign keys, etc. + # so there may be more indexes than expected. + insp = inspect(meta.bind) + indexes = insp.get_indexes('users', schema=schema) + expected_indexes = [ + {'unique': False, + 'column_names': ['test1', 'test2'], + 'name': 'users_t_idx'}, + {'unique': False, + 'column_names': ['user_id', 'test2', 'test1'], + 'name': 'users_all_idx'} + ] + index_names = [d['name'] for d in indexes] + for e_index in expected_indexes: + assert e_index['name'] in index_names + index = indexes[index_names.index(e_index['name'])] + for key in e_index: + eq_(e_index[key], index[key]) + + @testing.requires.index_reflection + def test_get_indexes(self): + self._test_get_indexes() + + @testing.requires.index_reflection + @testing.requires.schemas + def test_get_indexes_with_schema(self): + self._test_get_indexes(schema=testing.config.test_schema) + + @testing.requires.unique_constraint_reflection + def test_get_unique_constraints(self): + self._test_get_unique_constraints() + + @testing.requires.temp_table_reflection + @testing.requires.unique_constraint_reflection + def test_get_temp_table_unique_constraints(self): + insp = inspect(self.bind) + reflected = insp.get_unique_constraints('user_tmp') + for refl in reflected: + # Different dialects handle duplicate index and constraints + # differently, so ignore this flag + refl.pop('duplicates_index', None) + eq_(reflected, [{'column_names': ['name'], 'name': 'user_tmp_uq'}]) + + @testing.requires.temp_table_reflection + def test_get_temp_table_indexes(self): + insp = inspect(self.bind) + indexes = insp.get_indexes('user_tmp') + for ind in indexes: + ind.pop('dialect_options', None) + eq_( + # TODO: we need to add better filtering for indexes/uq constraints + # that are doubled up + [idx for idx in indexes if idx['name'] == 'user_tmp_ix'], + [{'unique': False, 'column_names': ['foo'], 'name': 'user_tmp_ix'}] + ) + + @testing.requires.unique_constraint_reflection + @testing.requires.schemas + def test_get_unique_constraints_with_schema(self): + self._test_get_unique_constraints(schema=testing.config.test_schema) + + @testing.provide_metadata + def _test_get_unique_constraints(self, schema=None): + # SQLite dialect needs to parse the names of the constraints + # separately from what it gets from PRAGMA index_list(), and + # then matches them up. so same set of column_names in two + # constraints will confuse it. Perhaps we should no longer + # bother with index_list() here since we have the whole + # CREATE TABLE? + uniques = sorted( + [ + {'name': 'unique_a', 'column_names': ['a']}, + {'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']}, + {'name': 'unique_c_a_b', 'column_names': ['c', 'a', 'b']}, + {'name': 'unique_asc_key', 'column_names': ['asc', 'key']}, + {'name': 'i.have.dots', 'column_names': ['b']}, + {'name': 'i have spaces', 'column_names': ['c']}, + ], + key=operator.itemgetter('name') + ) + orig_meta = self.metadata + table = Table( + 'testtbl', orig_meta, + Column('a', sa.String(20)), + Column('b', sa.String(30)), + Column('c', sa.Integer), + # reserved identifiers + Column('asc', sa.String(30)), + Column('key', sa.String(30)), + schema=schema + ) + for uc in uniques: + table.append_constraint( + sa.UniqueConstraint(*uc['column_names'], name=uc['name']) + ) + orig_meta.create_all() + + inspector = inspect(orig_meta.bind) + reflected = sorted( + inspector.get_unique_constraints('testtbl', schema=schema), + key=operator.itemgetter('name') + ) + + for orig, refl in zip(uniques, reflected): + # Different dialects handle duplicate index and constraints + # differently, so ignore this flag + refl.pop('duplicates_index', None) + eq_(orig, refl) + + @testing.provide_metadata + def _test_get_view_definition(self, schema=None): + meta = self.metadata + users, addresses, dingalings = self.tables.users, \ + self.tables.email_addresses, self.tables.dingalings + view_name1 = 'users_v' + view_name2 = 'email_addresses_v' + insp = inspect(meta.bind) + v1 = insp.get_view_definition(view_name1, schema=schema) + self.assert_(v1) + v2 = insp.get_view_definition(view_name2, schema=schema) + self.assert_(v2) + + @testing.requires.view_reflection + def test_get_view_definition(self): + self._test_get_view_definition() + + @testing.requires.view_reflection + @testing.requires.schemas + def test_get_view_definition_with_schema(self): + self._test_get_view_definition(schema=testing.config.test_schema) + + @testing.only_on("postgresql", "PG specific feature") + @testing.provide_metadata + def _test_get_table_oid(self, table_name, schema=None): + meta = self.metadata + users, addresses, dingalings = self.tables.users, \ + self.tables.email_addresses, self.tables.dingalings + insp = inspect(meta.bind) + oid = insp.get_table_oid(table_name, schema) + self.assert_(isinstance(oid, int)) + + def test_get_table_oid(self): + self._test_get_table_oid('users') + + @testing.requires.schemas + def test_get_table_oid_with_schema(self): + self._test_get_table_oid('users', schema=testing.config.test_schema) + + @testing.requires.table_reflection + @testing.provide_metadata + def test_autoincrement_col(self): + """test that 'autoincrement' is reflected according to sqla's policy. + + Don't mark this test as unsupported for any backend ! + + (technically it fails with MySQL InnoDB since "id" comes before "id2") + + A backend is better off not returning "autoincrement" at all, + instead of potentially returning "False" for an auto-incrementing + primary key column. + + """ + + meta = self.metadata + insp = inspect(meta.bind) + + for tname, cname in [ + ('users', 'user_id'), + ('email_addresses', 'address_id'), + ('dingalings', 'dingaling_id'), + ]: + cols = insp.get_columns(tname) + id_ = dict((c['name'], c) for c in cols)[cname] + assert id_.get('autoincrement', True) + + +class NormalizedNameTest(fixtures.TablesTest): + __requires__ = 'denormalized_names', + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + quoted_name('t1', quote=True), metadata, + Column('id', Integer, primary_key=True), + ) + Table( + quoted_name('t2', quote=True), metadata, + Column('id', Integer, primary_key=True), + Column('t1id', ForeignKey('t1.id')) + ) + + def test_reflect_lowercase_forced_tables(self): + + m2 = MetaData(testing.db) + t2_ref = Table(quoted_name('t2', quote=True), m2, autoload=True) + t1_ref = m2.tables['t1'] + assert t2_ref.c.t1id.references(t1_ref.c.id) + + m3 = MetaData(testing.db) + m3.reflect(only=lambda name, m: name.lower() in ('t1', 't2')) + assert m3.tables['t2'].c.t1id.references(m3.tables['t1'].c.id) + + def test_get_table_names(self): + tablenames = [ + t for t in inspect(testing.db).get_table_names() + if t.lower() in ("t1", "t2")] + + eq_(tablenames[0].upper(), tablenames[0].lower()) + eq_(tablenames[1].upper(), tablenames[1].lower()) + + +__all__ = ('ComponentReflectionTest', 'HasTableTest', 'NormalizedNameTest') diff --git a/app/lib/sqlalchemy/testing/suite/test_results.py b/app/lib/sqlalchemy/testing/suite/test_results.py new file mode 100644 index 0000000..98ddc7e --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_results.py @@ -0,0 +1,367 @@ +from .. import fixtures, config +from ..config import requirements +from .. import exclusions +from ..assertions import eq_ +from .. import engines +from ... import testing + +from sqlalchemy import Integer, String, select, util, sql, DateTime, text, func +import datetime +from ..schema import Table, Column + + +class RowFetchTest(fixtures.TablesTest): + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('plain_pk', metadata, + Column('id', Integer, primary_key=True), + Column('data', String(50)) + ) + Table('has_dates', metadata, + Column('id', Integer, primary_key=True), + Column('today', DateTime) + ) + + @classmethod + def insert_data(cls): + config.db.execute( + cls.tables.plain_pk.insert(), + [ + {"id": 1, "data": "d1"}, + {"id": 2, "data": "d2"}, + {"id": 3, "data": "d3"}, + ] + ) + + config.db.execute( + cls.tables.has_dates.insert(), + [ + {"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)} + ] + ) + + def test_via_string(self): + row = config.db.execute( + self.tables.plain_pk.select(). + order_by(self.tables.plain_pk.c.id) + ).first() + + eq_( + row['id'], 1 + ) + eq_( + row['data'], "d1" + ) + + def test_via_int(self): + row = config.db.execute( + self.tables.plain_pk.select(). + order_by(self.tables.plain_pk.c.id) + ).first() + + eq_( + row[0], 1 + ) + eq_( + row[1], "d1" + ) + + def test_via_col_object(self): + row = config.db.execute( + self.tables.plain_pk.select(). + order_by(self.tables.plain_pk.c.id) + ).first() + + eq_( + row[self.tables.plain_pk.c.id], 1 + ) + eq_( + row[self.tables.plain_pk.c.data], "d1" + ) + + @requirements.duplicate_names_in_cursor_description + def test_row_with_dupe_names(self): + result = config.db.execute( + select([self.tables.plain_pk.c.data, + self.tables.plain_pk.c.data.label('data')]). + order_by(self.tables.plain_pk.c.id) + ) + row = result.first() + eq_(result.keys(), ['data', 'data']) + eq_(row, ('d1', 'd1')) + + def test_row_w_scalar_select(self): + """test that a scalar select as a column is returned as such + and that type conversion works OK. + + (this is half a SQLAlchemy Core test and half to catch database + backends that may have unusual behavior with scalar selects.) + + """ + datetable = self.tables.has_dates + s = select([datetable.alias('x').c.today]).as_scalar() + s2 = select([datetable.c.id, s.label('somelabel')]) + row = config.db.execute(s2).first() + + eq_(row['somelabel'], datetime.datetime(2006, 5, 12, 12, 0, 0)) + + +class PercentSchemaNamesTest(fixtures.TablesTest): + """tests using percent signs, spaces in table and column names. + + This is a very fringe use case, doesn't work for MySQL + or PostgreSQL. the requirement, "percent_schema_names", + is marked "skip" by default. + + """ + + __requires__ = ('percent_schema_names', ) + + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + cls.tables.percent_table = Table('percent%table', metadata, + Column("percent%", Integer), + Column( + "spaces % more spaces", Integer), + ) + cls.tables.lightweight_percent_table = sql.table( + 'percent%table', sql.column("percent%"), + sql.column("spaces % more spaces") + ) + + def test_single_roundtrip(self): + percent_table = self.tables.percent_table + for params in [ + {'percent%': 5, 'spaces % more spaces': 12}, + {'percent%': 7, 'spaces % more spaces': 11}, + {'percent%': 9, 'spaces % more spaces': 10}, + {'percent%': 11, 'spaces % more spaces': 9} + ]: + config.db.execute(percent_table.insert(), params) + self._assert_table() + + def test_executemany_roundtrip(self): + percent_table = self.tables.percent_table + config.db.execute( + percent_table.insert(), + {'percent%': 5, 'spaces % more spaces': 12} + ) + config.db.execute( + percent_table.insert(), + [{'percent%': 7, 'spaces % more spaces': 11}, + {'percent%': 9, 'spaces % more spaces': 10}, + {'percent%': 11, 'spaces % more spaces': 9}] + ) + self._assert_table() + + def _assert_table(self): + percent_table = self.tables.percent_table + lightweight_percent_table = self.tables.lightweight_percent_table + + for table in ( + percent_table, + percent_table.alias(), + lightweight_percent_table, + lightweight_percent_table.alias()): + eq_( + list( + config.db.execute( + table.select().order_by(table.c['percent%']) + ) + ), + [ + (5, 12), + (7, 11), + (9, 10), + (11, 9) + ] + ) + + eq_( + list( + config.db.execute( + table.select(). + where(table.c['spaces % more spaces'].in_([9, 10])). + order_by(table.c['percent%']), + ) + ), + [ + (9, 10), + (11, 9) + ] + ) + + row = config.db.execute(table.select(). + order_by(table.c['percent%'])).first() + eq_(row['percent%'], 5) + eq_(row['spaces % more spaces'], 12) + + eq_(row[table.c['percent%']], 5) + eq_(row[table.c['spaces % more spaces']], 12) + + config.db.execute( + percent_table.update().values( + {percent_table.c['spaces % more spaces']: 15} + ) + ) + + eq_( + list( + config.db.execute( + percent_table. + select(). + order_by(percent_table.c['percent%']) + ) + ), + [(5, 15), (7, 15), (9, 15), (11, 15)] + ) + + +class ServerSideCursorsTest(fixtures.TestBase, testing.AssertsExecutionResults): + + __requires__ = ('server_side_cursors', ) + + __backend__ = True + + def _is_server_side(self, cursor): + if self.engine.url.drivername == 'postgresql': + return cursor.name + elif self.engine.url.drivername == 'mysql': + sscursor = __import__('MySQLdb.cursors').cursors.SSCursor + return isinstance(cursor, sscursor) + elif self.engine.url.drivername == 'mysql+pymysql': + sscursor = __import__('pymysql.cursors').cursors.SSCursor + return isinstance(cursor, sscursor) + else: + return False + + def _fixture(self, server_side_cursors): + self.engine = engines.testing_engine( + options={'server_side_cursors': server_side_cursors} + ) + return self.engine + + def tearDown(self): + engines.testing_reaper.close_all() + self.engine.dispose() + + def test_global_string(self): + engine = self._fixture(True) + result = engine.execute('select 1') + assert self._is_server_side(result.cursor) + + def test_global_text(self): + engine = self._fixture(True) + result = engine.execute(text('select 1')) + assert self._is_server_side(result.cursor) + + def test_global_expr(self): + engine = self._fixture(True) + result = engine.execute(select([1])) + assert self._is_server_side(result.cursor) + + def test_global_off_explicit(self): + engine = self._fixture(False) + result = engine.execute(text('select 1')) + + # It should be off globally ... + + assert not self._is_server_side(result.cursor) + + def test_stmt_option(self): + engine = self._fixture(False) + + s = select([1]).execution_options(stream_results=True) + result = engine.execute(s) + + # ... but enabled for this one. + + assert self._is_server_side(result.cursor) + + def test_conn_option(self): + engine = self._fixture(False) + + # and this one + result = \ + engine.connect().execution_options(stream_results=True).\ + execute('select 1' + ) + assert self._is_server_side(result.cursor) + + def test_stmt_enabled_conn_option_disabled(self): + engine = self._fixture(False) + + s = select([1]).execution_options(stream_results=True) + + # not this one + result = \ + engine.connect().execution_options(stream_results=False).\ + execute(s) + assert not self._is_server_side(result.cursor) + + def test_stmt_option_disabled(self): + engine = self._fixture(True) + s = select([1]).execution_options(stream_results=False) + result = engine.execute(s) + assert not self._is_server_side(result.cursor) + + def test_aliases_and_ss(self): + engine = self._fixture(False) + s1 = select([1]).execution_options(stream_results=True).alias() + result = engine.execute(s1) + assert self._is_server_side(result.cursor) + + # s1's options shouldn't affect s2 when s2 is used as a + # from_obj. + s2 = select([1], from_obj=s1) + result = engine.execute(s2) + assert not self._is_server_side(result.cursor) + + def test_for_update_expr(self): + engine = self._fixture(True) + s1 = select([1], for_update=True) + result = engine.execute(s1) + assert self._is_server_side(result.cursor) + + def test_for_update_string(self): + engine = self._fixture(True) + result = engine.execute('SELECT 1 FOR UPDATE') + assert self._is_server_side(result.cursor) + + def test_text_no_ss(self): + engine = self._fixture(False) + s = text('select 42') + result = engine.execute(s) + assert not self._is_server_side(result.cursor) + + def test_text_ss_option(self): + engine = self._fixture(False) + s = text('select 42').execution_options(stream_results=True) + result = engine.execute(s) + assert self._is_server_side(result.cursor) + + @testing.provide_metadata + def test_roundtrip(self): + md = self.metadata + + engine = self._fixture(True) + test_table = Table('test_table', md, + Column('id', Integer, primary_key=True), + Column('data', String(50))) + test_table.create(checkfirst=True) + test_table.insert().execute(data='data1') + test_table.insert().execute(data='data2') + eq_(test_table.select().execute().fetchall(), [(1, 'data1' + ), (2, 'data2')]) + test_table.update().where( + test_table.c.id == 2).values( + data=test_table.c.data + + ' updated').execute() + eq_(test_table.select().execute().fetchall(), + [(1, 'data1'), (2, 'data2 updated')]) + test_table.delete().execute() + eq_(select([func.count('*')]).select_from(test_table).scalar(), 0) diff --git a/app/lib/sqlalchemy/testing/suite/test_select.py b/app/lib/sqlalchemy/testing/suite/test_select.py new file mode 100644 index 0000000..e7de356 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_select.py @@ -0,0 +1,312 @@ +from .. import fixtures, config +from ..assertions import eq_ + +from sqlalchemy import util +from sqlalchemy import Integer, String, select, func, bindparam, union +from sqlalchemy import testing + +from ..schema import Table, Column + + +class OrderByLabelTest(fixtures.TablesTest): + """Test the dialect sends appropriate ORDER BY expressions when + labels are used. + + This essentially exercises the "supports_simple_order_by_label" + setting. + + """ + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table("some_table", metadata, + Column('id', Integer, primary_key=True), + Column('x', Integer), + Column('y', Integer), + Column('q', String(50)), + Column('p', String(50)) + ) + + @classmethod + def insert_data(cls): + config.db.execute( + cls.tables.some_table.insert(), + [ + {"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"}, + {"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"}, + {"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"}, + ] + ) + + def _assert_result(self, select, result): + eq_( + config.db.execute(select).fetchall(), + result + ) + + def test_plain(self): + table = self.tables.some_table + lx = table.c.x.label('lx') + self._assert_result( + select([lx]).order_by(lx), + [(1, ), (2, ), (3, )] + ) + + def test_composed_int(self): + table = self.tables.some_table + lx = (table.c.x + table.c.y).label('lx') + self._assert_result( + select([lx]).order_by(lx), + [(3, ), (5, ), (7, )] + ) + + def test_composed_multiple(self): + table = self.tables.some_table + lx = (table.c.x + table.c.y).label('lx') + ly = (func.lower(table.c.q) + table.c.p).label('ly') + self._assert_result( + select([lx, ly]).order_by(lx, ly.desc()), + [(3, util.u('q1p3')), (5, util.u('q2p2')), (7, util.u('q3p1'))] + ) + + def test_plain_desc(self): + table = self.tables.some_table + lx = table.c.x.label('lx') + self._assert_result( + select([lx]).order_by(lx.desc()), + [(3, ), (2, ), (1, )] + ) + + def test_composed_int_desc(self): + table = self.tables.some_table + lx = (table.c.x + table.c.y).label('lx') + self._assert_result( + select([lx]).order_by(lx.desc()), + [(7, ), (5, ), (3, )] + ) + + def test_group_by_composed(self): + table = self.tables.some_table + expr = (table.c.x + table.c.y).label('lx') + stmt = select([func.count(table.c.id), expr]).group_by(expr).order_by(expr) + self._assert_result( + stmt, + [(1, 3), (1, 5), (1, 7)] + ) + + +class LimitOffsetTest(fixtures.TablesTest): + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table("some_table", metadata, + Column('id', Integer, primary_key=True), + Column('x', Integer), + Column('y', Integer)) + + @classmethod + def insert_data(cls): + config.db.execute( + cls.tables.some_table.insert(), + [ + {"id": 1, "x": 1, "y": 2}, + {"id": 2, "x": 2, "y": 3}, + {"id": 3, "x": 3, "y": 4}, + {"id": 4, "x": 4, "y": 5}, + ] + ) + + def _assert_result(self, select, result, params=()): + eq_( + config.db.execute(select, params).fetchall(), + result + ) + + def test_simple_limit(self): + table = self.tables.some_table + self._assert_result( + select([table]).order_by(table.c.id).limit(2), + [(1, 1, 2), (2, 2, 3)] + ) + + @testing.requires.offset + def test_simple_offset(self): + table = self.tables.some_table + self._assert_result( + select([table]).order_by(table.c.id).offset(2), + [(3, 3, 4), (4, 4, 5)] + ) + + @testing.requires.offset + def test_simple_limit_offset(self): + table = self.tables.some_table + self._assert_result( + select([table]).order_by(table.c.id).limit(2).offset(1), + [(2, 2, 3), (3, 3, 4)] + ) + + @testing.requires.offset + def test_limit_offset_nobinds(self): + """test that 'literal binds' mode works - no bound params.""" + + table = self.tables.some_table + stmt = select([table]).order_by(table.c.id).limit(2).offset(1) + sql = stmt.compile( + dialect=config.db.dialect, + compile_kwargs={"literal_binds": True}) + sql = str(sql) + + self._assert_result( + sql, + [(2, 2, 3), (3, 3, 4)] + ) + + @testing.requires.bound_limit_offset + def test_bound_limit(self): + table = self.tables.some_table + self._assert_result( + select([table]).order_by(table.c.id).limit(bindparam('l')), + [(1, 1, 2), (2, 2, 3)], + params={"l": 2} + ) + + @testing.requires.bound_limit_offset + def test_bound_offset(self): + table = self.tables.some_table + self._assert_result( + select([table]).order_by(table.c.id).offset(bindparam('o')), + [(3, 3, 4), (4, 4, 5)], + params={"o": 2} + ) + + @testing.requires.bound_limit_offset + def test_bound_limit_offset(self): + table = self.tables.some_table + self._assert_result( + select([table]).order_by(table.c.id). + limit(bindparam("l")).offset(bindparam("o")), + [(2, 2, 3), (3, 3, 4)], + params={"l": 2, "o": 1} + ) + + +class CompoundSelectTest(fixtures.TablesTest): + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table("some_table", metadata, + Column('id', Integer, primary_key=True), + Column('x', Integer), + Column('y', Integer)) + + @classmethod + def insert_data(cls): + config.db.execute( + cls.tables.some_table.insert(), + [ + {"id": 1, "x": 1, "y": 2}, + {"id": 2, "x": 2, "y": 3}, + {"id": 3, "x": 3, "y": 4}, + {"id": 4, "x": 4, "y": 5}, + ] + ) + + def _assert_result(self, select, result, params=()): + eq_( + config.db.execute(select, params).fetchall(), + result + ) + + def test_plain_union(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2) + s2 = select([table]).where(table.c.id == 3) + + u1 = union(s1, s2) + self._assert_result( + u1.order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) + + def test_select_from_plain_union(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2) + s2 = select([table]).where(table.c.id == 3) + + u1 = union(s1, s2).alias().select() + self._assert_result( + u1.order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) + + @testing.requires.parens_in_union_contained_select_w_limit_offset + def test_limit_offset_selectable_in_unions(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2).\ + limit(1).order_by(table.c.id) + s2 = select([table]).where(table.c.id == 3).\ + limit(1).order_by(table.c.id) + + u1 = union(s1, s2).limit(2) + self._assert_result( + u1.order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) + + @testing.requires.parens_in_union_contained_select_wo_limit_offset + def test_order_by_selectable_in_unions(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2).\ + order_by(table.c.id) + s2 = select([table]).where(table.c.id == 3).\ + order_by(table.c.id) + + u1 = union(s1, s2).limit(2) + self._assert_result( + u1.order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) + + def test_distinct_selectable_in_unions(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2).\ + distinct() + s2 = select([table]).where(table.c.id == 3).\ + distinct() + + u1 = union(s1, s2).limit(2) + self._assert_result( + u1.order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) + + @testing.requires.parens_in_union_contained_select_w_limit_offset + def test_limit_offset_in_unions_from_alias(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2).\ + limit(1).order_by(table.c.id) + s2 = select([table]).where(table.c.id == 3).\ + limit(1).order_by(table.c.id) + + # this necessarily has double parens + u1 = union(s1, s2).alias() + self._assert_result( + u1.select().limit(2).order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) + + def test_limit_offset_aliased_selectable_in_unions(self): + table = self.tables.some_table + s1 = select([table]).where(table.c.id == 2).\ + limit(1).order_by(table.c.id).alias().select() + s2 = select([table]).where(table.c.id == 3).\ + limit(1).order_by(table.c.id).alias().select() + + u1 = union(s1, s2).limit(2) + self._assert_result( + u1.order_by(u1.c.id), + [(2, 2, 3), (3, 3, 4)] + ) diff --git a/app/lib/sqlalchemy/testing/suite/test_sequence.py b/app/lib/sqlalchemy/testing/suite/test_sequence.py new file mode 100644 index 0000000..b2d52f2 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_sequence.py @@ -0,0 +1,126 @@ +from .. import fixtures, config +from ..config import requirements +from ..assertions import eq_ +from ... import testing + +from ... import Integer, String, Sequence, schema + +from ..schema import Table, Column + + +class SequenceTest(fixtures.TablesTest): + __requires__ = ('sequences',) + __backend__ = True + + run_create_tables = 'each' + + @classmethod + def define_tables(cls, metadata): + Table('seq_pk', metadata, + Column('id', Integer, Sequence('tab_id_seq'), primary_key=True), + Column('data', String(50)) + ) + + Table('seq_opt_pk', metadata, + Column('id', Integer, Sequence('tab_id_seq', optional=True), + primary_key=True), + Column('data', String(50)) + ) + + def test_insert_roundtrip(self): + config.db.execute( + self.tables.seq_pk.insert(), + data="some data" + ) + self._assert_round_trip(self.tables.seq_pk, config.db) + + def test_insert_lastrowid(self): + r = config.db.execute( + self.tables.seq_pk.insert(), + data="some data" + ) + eq_( + r.inserted_primary_key, + [1] + ) + + def test_nextval_direct(self): + r = config.db.execute( + self.tables.seq_pk.c.id.default + ) + eq_( + r, 1 + ) + + @requirements.sequences_optional + def test_optional_seq(self): + r = config.db.execute( + self.tables.seq_opt_pk.insert(), + data="some data" + ) + eq_( + r.inserted_primary_key, + [1] + ) + + def _assert_round_trip(self, table, conn): + row = conn.execute(table.select()).first() + eq_( + row, + (1, "some data") + ) + + +class HasSequenceTest(fixtures.TestBase): + __requires__ = 'sequences', + __backend__ = True + + def test_has_sequence(self): + s1 = Sequence('user_id_seq') + testing.db.execute(schema.CreateSequence(s1)) + try: + eq_(testing.db.dialect.has_sequence(testing.db, + 'user_id_seq'), True) + finally: + testing.db.execute(schema.DropSequence(s1)) + + @testing.requires.schemas + def test_has_sequence_schema(self): + s1 = Sequence('user_id_seq', schema=config.test_schema) + testing.db.execute(schema.CreateSequence(s1)) + try: + eq_(testing.db.dialect.has_sequence( + testing.db, 'user_id_seq', schema=config.test_schema), True) + finally: + testing.db.execute(schema.DropSequence(s1)) + + def test_has_sequence_neg(self): + eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), + False) + + @testing.requires.schemas + def test_has_sequence_schemas_neg(self): + eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', + schema=config.test_schema), + False) + + @testing.requires.schemas + def test_has_sequence_default_not_in_remote(self): + s1 = Sequence('user_id_seq') + testing.db.execute(schema.CreateSequence(s1)) + try: + eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', + schema=config.test_schema), + False) + finally: + testing.db.execute(schema.DropSequence(s1)) + + @testing.requires.schemas + def test_has_sequence_remote_not_in_default(self): + s1 = Sequence('user_id_seq', schema=config.test_schema) + testing.db.execute(schema.CreateSequence(s1)) + try: + eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), + False) + finally: + testing.db.execute(schema.DropSequence(s1)) diff --git a/app/lib/sqlalchemy/testing/suite/test_types.py b/app/lib/sqlalchemy/testing/suite/test_types.py new file mode 100644 index 0000000..dbbe031 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_types.py @@ -0,0 +1,898 @@ +# coding: utf-8 + +from .. import fixtures, config +from ..assertions import eq_ +from ..config import requirements +from sqlalchemy import Integer, Unicode, UnicodeText, select +from sqlalchemy import Date, DateTime, Time, MetaData, String, \ + Text, Numeric, Float, literal, Boolean, cast, null, JSON, and_, type_coerce +from ..schema import Table, Column +from ... import testing +import decimal +import datetime +from ...util import u +from ... import util + + +class _LiteralRoundTripFixture(object): + @testing.provide_metadata + def _literal_round_trip(self, type_, input_, output, filter_=None): + """test literal rendering """ + + # for literal, we test the literal render in an INSERT + # into a typed column. we can then SELECT it back as its + # official type; ideally we'd be able to use CAST here + # but MySQL in particular can't CAST fully + t = Table('t', self.metadata, Column('x', type_)) + t.create() + + for value in input_: + ins = t.insert().values(x=literal(value)).compile( + dialect=testing.db.dialect, + compile_kwargs=dict(literal_binds=True) + ) + testing.db.execute(ins) + + for row in t.select().execute(): + value = row[0] + if filter_ is not None: + value = filter_(value) + assert value in output + + +class _UnicodeFixture(_LiteralRoundTripFixture): + __requires__ = 'unicode_data', + + data = u("Alors vous imaginez ma surprise, au lever du jour, " + "quand une drôle de petite voix m’a réveillé. Elle " + "disait: « S’il vous plaît… dessine-moi un mouton! »") + + @classmethod + def define_tables(cls, metadata): + Table('unicode_table', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('unicode_data', cls.datatype), + ) + + def test_round_trip(self): + unicode_table = self.tables.unicode_table + + config.db.execute( + unicode_table.insert(), + { + 'unicode_data': self.data, + } + ) + + row = config.db.execute( + select([ + unicode_table.c.unicode_data, + ]) + ).first() + + eq_( + row, + (self.data, ) + ) + assert isinstance(row[0], util.text_type) + + def test_round_trip_executemany(self): + unicode_table = self.tables.unicode_table + + config.db.execute( + unicode_table.insert(), + [ + { + 'unicode_data': self.data, + } + for i in range(3) + ] + ) + + rows = config.db.execute( + select([ + unicode_table.c.unicode_data, + ]) + ).fetchall() + eq_( + rows, + [(self.data, ) for i in range(3)] + ) + for row in rows: + assert isinstance(row[0], util.text_type) + + def _test_empty_strings(self): + unicode_table = self.tables.unicode_table + + config.db.execute( + unicode_table.insert(), + {"unicode_data": u('')} + ) + row = config.db.execute( + select([unicode_table.c.unicode_data]) + ).first() + eq_(row, (u(''),)) + + def test_literal(self): + self._literal_round_trip(self.datatype, [self.data], [self.data]) + + +class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest): + __requires__ = 'unicode_data', + __backend__ = True + + datatype = Unicode(255) + + @requirements.empty_strings_varchar + def test_empty_strings_varchar(self): + self._test_empty_strings() + + +class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest): + __requires__ = 'unicode_data', 'text_type' + __backend__ = True + + datatype = UnicodeText() + + @requirements.empty_strings_text + def test_empty_strings_text(self): + self._test_empty_strings() + + +class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest): + __requires__ = 'text_type', + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('text_table', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('text_data', Text), + ) + + def test_text_roundtrip(self): + text_table = self.tables.text_table + + config.db.execute( + text_table.insert(), + {"text_data": 'some text'} + ) + row = config.db.execute( + select([text_table.c.text_data]) + ).first() + eq_(row, ('some text',)) + + def test_text_empty_strings(self): + text_table = self.tables.text_table + + config.db.execute( + text_table.insert(), + {"text_data": ''} + ) + row = config.db.execute( + select([text_table.c.text_data]) + ).first() + eq_(row, ('',)) + + def test_literal(self): + self._literal_round_trip(Text, ["some text"], ["some text"]) + + def test_literal_quoting(self): + data = '''some 'text' hey "hi there" that's text''' + self._literal_round_trip(Text, [data], [data]) + + def test_literal_backslashes(self): + data = r'backslash one \ backslash two \\ end' + self._literal_round_trip(Text, [data], [data]) + + +class StringTest(_LiteralRoundTripFixture, fixtures.TestBase): + __backend__ = True + + @requirements.unbounded_varchar + def test_nolength_string(self): + metadata = MetaData() + foo = Table('foo', metadata, + Column('one', String) + ) + + foo.create(config.db) + foo.drop(config.db) + + def test_literal(self): + self._literal_round_trip(String(40), ["some text"], ["some text"]) + + def test_literal_quoting(self): + data = '''some 'text' hey "hi there" that's text''' + self._literal_round_trip(String(40), [data], [data]) + + def test_literal_backslashes(self): + data = r'backslash one \ backslash two \\ end' + self._literal_round_trip(String(40), [data], [data]) + + +class _DateFixture(_LiteralRoundTripFixture): + compare = None + + @classmethod + def define_tables(cls, metadata): + Table('date_table', metadata, + Column('id', Integer, primary_key=True, + test_needs_autoincrement=True), + Column('date_data', cls.datatype), + ) + + def test_round_trip(self): + date_table = self.tables.date_table + + config.db.execute( + date_table.insert(), + {'date_data': self.data} + ) + + row = config.db.execute( + select([ + date_table.c.date_data, + ]) + ).first() + + compare = self.compare or self.data + eq_(row, + (compare, )) + assert isinstance(row[0], type(compare)) + + def test_null(self): + date_table = self.tables.date_table + + config.db.execute( + date_table.insert(), + {'date_data': None} + ) + + row = config.db.execute( + select([ + date_table.c.date_data, + ]) + ).first() + eq_(row, (None,)) + + @testing.requires.datetime_literals + def test_literal(self): + compare = self.compare or self.data + self._literal_round_trip(self.datatype, [self.data], [compare]) + + +class DateTimeTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'datetime', + __backend__ = True + datatype = DateTime + data = datetime.datetime(2012, 10, 15, 12, 57, 18) + + +class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'datetime_microseconds', + __backend__ = True + datatype = DateTime + data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) + + +class TimeTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'time', + __backend__ = True + datatype = Time + data = datetime.time(12, 57, 18) + + +class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'time_microseconds', + __backend__ = True + datatype = Time + data = datetime.time(12, 57, 18, 396) + + +class DateTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'date', + __backend__ = True + datatype = Date + data = datetime.date(2012, 10, 15) + + +class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'date', 'date_coerces_from_datetime' + __backend__ = True + datatype = Date + data = datetime.datetime(2012, 10, 15, 12, 57, 18) + compare = datetime.date(2012, 10, 15) + + +class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'datetime_historic', + __backend__ = True + datatype = DateTime + data = datetime.datetime(1850, 11, 10, 11, 52, 35) + + +class DateHistoricTest(_DateFixture, fixtures.TablesTest): + __requires__ = 'date_historic', + __backend__ = True + datatype = Date + data = datetime.date(1727, 4, 1) + + +class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase): + __backend__ = True + + def test_literal(self): + self._literal_round_trip(Integer, [5], [5]) + + +class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase): + __backend__ = True + + @testing.emits_warning(r".*does \*not\* support Decimal objects natively") + @testing.provide_metadata + def _do_test(self, type_, input_, output, + filter_=None, check_scale=False): + metadata = self.metadata + t = Table('t', metadata, Column('x', type_)) + t.create() + t.insert().execute([{'x': x} for x in input_]) + + result = set([row[0] for row in t.select().execute()]) + output = set(output) + if filter_: + result = set(filter_(x) for x in result) + output = set(filter_(x) for x in output) + eq_(result, output) + if check_scale: + eq_( + [str(x) for x in result], + [str(x) for x in output], + ) + + @testing.emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric(self): + self._literal_round_trip( + Numeric(precision=8, scale=4), + [15.7563, decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + @testing.emits_warning(r".*does \*not\* support Decimal objects natively") + def test_render_literal_numeric_asfloat(self): + self._literal_round_trip( + Numeric(precision=8, scale=4, asdecimal=False), + [15.7563, decimal.Decimal("15.7563")], + [15.7563], + ) + + def test_render_literal_float(self): + self._literal_round_trip( + Float(4), + [15.7563, decimal.Decimal("15.7563")], + [15.7563, ], + filter_=lambda n: n is not None and round(n, 5) or None + ) + + @testing.requires.precision_generic_float_type + def test_float_custom_scale(self): + self._do_test( + Float(None, decimal_return_scale=7, asdecimal=True), + [15.7563827, decimal.Decimal("15.7563827")], + [decimal.Decimal("15.7563827"), ], + check_scale=True + ) + + def test_numeric_as_decimal(self): + self._do_test( + Numeric(precision=8, scale=4), + [15.7563, decimal.Decimal("15.7563")], + [decimal.Decimal("15.7563")], + ) + + def test_numeric_as_float(self): + self._do_test( + Numeric(precision=8, scale=4, asdecimal=False), + [15.7563, decimal.Decimal("15.7563")], + [15.7563], + ) + + @testing.requires.fetch_null_from_numeric + def test_numeric_null_as_decimal(self): + self._do_test( + Numeric(precision=8, scale=4), + [None], + [None], + ) + + @testing.requires.fetch_null_from_numeric + def test_numeric_null_as_float(self): + self._do_test( + Numeric(precision=8, scale=4, asdecimal=False), + [None], + [None], + ) + + @testing.requires.floats_to_four_decimals + def test_float_as_decimal(self): + self._do_test( + Float(precision=8, asdecimal=True), + [15.7563, decimal.Decimal("15.7563"), None], + [decimal.Decimal("15.7563"), None], + ) + + def test_float_as_float(self): + self._do_test( + Float(precision=8), + [15.7563, decimal.Decimal("15.7563")], + [15.7563], + filter_=lambda n: n is not None and round(n, 5) or None + ) + + @testing.requires.precision_numerics_general + def test_precision_decimal(self): + numbers = set([ + decimal.Decimal("54.234246451650"), + decimal.Decimal("0.004354"), + decimal.Decimal("900.0"), + ]) + + self._do_test( + Numeric(precision=18, scale=12), + numbers, + numbers, + ) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal(self): + """test exceedingly small decimals. + + Decimal reports values with E notation when the exponent + is greater than 6. + + """ + + numbers = set([ + decimal.Decimal('1E-2'), + decimal.Decimal('1E-3'), + decimal.Decimal('1E-4'), + decimal.Decimal('1E-5'), + decimal.Decimal('1E-6'), + decimal.Decimal('1E-7'), + decimal.Decimal('1E-8'), + decimal.Decimal("0.01000005940696"), + decimal.Decimal("0.00000005940696"), + decimal.Decimal("0.00000000000696"), + decimal.Decimal("0.70000000000696"), + decimal.Decimal("696E-12"), + ]) + self._do_test( + Numeric(precision=18, scale=14), + numbers, + numbers + ) + + @testing.requires.precision_numerics_enotation_large + def test_enotation_decimal_large(self): + """test exceedingly large decimals. + + """ + + numbers = set([ + decimal.Decimal('4E+8'), + decimal.Decimal("5748E+15"), + decimal.Decimal('1.521E+15'), + decimal.Decimal('00000000000000.1E+12'), + ]) + self._do_test( + Numeric(precision=25, scale=2), + numbers, + numbers + ) + + @testing.requires.precision_numerics_many_significant_digits + def test_many_significant_digits(self): + numbers = set([ + decimal.Decimal("31943874831932418390.01"), + decimal.Decimal("319438950232418390.273596"), + decimal.Decimal("87673.594069654243"), + ]) + self._do_test( + Numeric(precision=38, scale=12), + numbers, + numbers + ) + + @testing.requires.precision_numerics_retains_significant_digits + def test_numeric_no_decimal(self): + numbers = set([ + decimal.Decimal("1.000") + ]) + self._do_test( + Numeric(precision=5, scale=3), + numbers, + numbers, + check_scale=True + ) + + +class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest): + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('boolean_table', metadata, + Column('id', Integer, primary_key=True, autoincrement=False), + Column('value', Boolean), + Column('unconstrained_value', Boolean(create_constraint=False)), + ) + + def test_render_literal_bool(self): + self._literal_round_trip( + Boolean(), + [True, False], + [True, False] + ) + + def test_round_trip(self): + boolean_table = self.tables.boolean_table + + config.db.execute( + boolean_table.insert(), + { + 'id': 1, + 'value': True, + 'unconstrained_value': False + } + ) + + row = config.db.execute( + select([ + boolean_table.c.value, + boolean_table.c.unconstrained_value + ]) + ).first() + + eq_( + row, + (True, False) + ) + assert isinstance(row[0], bool) + + def test_null(self): + boolean_table = self.tables.boolean_table + + config.db.execute( + boolean_table.insert(), + { + 'id': 1, + 'value': None, + 'unconstrained_value': None + } + ) + + row = config.db.execute( + select([ + boolean_table.c.value, + boolean_table.c.unconstrained_value + ]) + ).first() + + eq_( + row, + (None, None) + ) + + +class JSONTest(_LiteralRoundTripFixture, fixtures.TablesTest): + __requires__ = 'json_type', + __backend__ = True + + datatype = JSON + + data1 = { + "key1": "value1", + "key2": "value2" + } + + data2 = { + "Key 'One'": "value1", + "key two": "value2", + "key three": "value ' three '" + } + + data3 = { + "key1": [1, 2, 3], + "key2": ["one", "two", "three"], + "key3": [{"four": "five"}, {"six": "seven"}] + } + + data4 = ["one", "two", "three"] + + data5 = { + "nested": { + "elem1": [ + {"a": "b", "c": "d"}, + {"e": "f", "g": "h"} + ], + "elem2": { + "elem3": {"elem4": "elem5"} + } + } + } + + data6 = { + "a": 5, + "b": "some value", + "c": {"foo": "bar"} + } + + @classmethod + def define_tables(cls, metadata): + Table('data_table', metadata, + Column('id', Integer, primary_key=True), + Column('name', String(30), nullable=False), + Column('data', cls.datatype), + Column('nulldata', cls.datatype(none_as_null=True)) + ) + + def test_round_trip_data1(self): + self._test_round_trip(self.data1) + + def _test_round_trip(self, data_element): + data_table = self.tables.data_table + + config.db.execute( + data_table.insert(), + {'name': 'row1', 'data': data_element} + ) + + row = config.db.execute( + select([ + data_table.c.data, + ]) + ).first() + + eq_(row, (data_element, )) + + def test_round_trip_none_as_sql_null(self): + col = self.tables.data_table.c['nulldata'] + + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + {"name": "r1", "data": None} + ) + + eq_( + conn.scalar( + select([self.tables.data_table.c.name]). + where(col.is_(null())) + ), + "r1" + ) + + eq_( + conn.scalar( + select([col]) + ), + None + ) + + def test_round_trip_json_null_as_json_null(self): + col = self.tables.data_table.c['data'] + + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + {"name": "r1", "data": JSON.NULL} + ) + + eq_( + conn.scalar( + select([self.tables.data_table.c.name]). + where(cast(col, String) == 'null') + ), + "r1" + ) + + eq_( + conn.scalar( + select([col]) + ), + None + ) + + def test_round_trip_none_as_json_null(self): + col = self.tables.data_table.c['data'] + + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + {"name": "r1", "data": None} + ) + + eq_( + conn.scalar( + select([self.tables.data_table.c.name]). + where(cast(col, String) == 'null') + ), + "r1" + ) + + eq_( + conn.scalar( + select([col]) + ), + None + ) + + def _criteria_fixture(self): + config.db.execute( + self.tables.data_table.insert(), + [{"name": "r1", "data": self.data1}, + {"name": "r2", "data": self.data2}, + {"name": "r3", "data": self.data3}, + {"name": "r4", "data": self.data4}, + {"name": "r5", "data": self.data5}, + {"name": "r6", "data": self.data6}] + ) + + def _test_index_criteria(self, crit, expected, test_literal=True): + self._criteria_fixture() + with config.db.connect() as conn: + stmt = select([self.tables.data_table.c.name]).where(crit) + + eq_( + conn.scalar(stmt), + expected + ) + + if test_literal: + literal_sql = str(stmt.compile( + config.db, compile_kwargs={"literal_binds": True})) + + eq_(conn.scalar(literal_sql), expected) + + def test_crit_spaces_in_key(self): + name = self.tables.data_table.c.name + col = self.tables.data_table.c['data'] + + # limit the rows here to avoid PG error + # "cannot extract field from a non-object", which is + # fixed in 9.4 but may exist in 9.3 + self._test_index_criteria( + and_( + name.in_(["r1", "r2", "r3"]), + cast(col["key two"], String) == '"value2"' + ), + "r2" + ) + + @config.requirements.json_array_indexes + def test_crit_simple_int(self): + name = self.tables.data_table.c.name + col = self.tables.data_table.c['data'] + + # limit the rows here to avoid PG error + # "cannot extract array element from a non-array", which is + # fixed in 9.4 but may exist in 9.3 + self._test_index_criteria( + and_(name == 'r4', cast(col[1], String) == '"two"'), + "r4" + ) + + def test_crit_mixed_path(self): + col = self.tables.data_table.c['data'] + self._test_index_criteria( + cast(col[("key3", 1, "six")], String) == '"seven"', + "r3" + ) + + def test_crit_string_path(self): + col = self.tables.data_table.c['data'] + self._test_index_criteria( + cast(col[("nested", "elem2", "elem3", "elem4")], String) + == '"elem5"', + "r5" + ) + + def test_crit_against_string_basic(self): + name = self.tables.data_table.c.name + col = self.tables.data_table.c['data'] + + self._test_index_criteria( + and_(name == 'r6', cast(col["b"], String) == '"some value"'), + "r6" + ) + + def test_crit_against_string_coerce_type(self): + name = self.tables.data_table.c.name + col = self.tables.data_table.c['data'] + + self._test_index_criteria( + and_(name == 'r6', + cast(col["b"], String) == type_coerce("some value", JSON)), + "r6", + test_literal=False + ) + + def test_crit_against_int_basic(self): + name = self.tables.data_table.c.name + col = self.tables.data_table.c['data'] + + self._test_index_criteria( + and_(name == 'r6', cast(col["a"], String) == '5'), + "r6" + ) + + def test_crit_against_int_coerce_type(self): + name = self.tables.data_table.c.name + col = self.tables.data_table.c['data'] + + self._test_index_criteria( + and_(name == 'r6', cast(col["a"], String) == type_coerce(5, JSON)), + "r6", + test_literal=False + ) + + def test_unicode_round_trip(self): + s = select([ + cast( + { + util.u('réveillé'): util.u('réveillé'), + "data": {"k1": util.u('drôle')} + }, + self.datatype + ) + ]) + eq_( + config.db.scalar(s), + { + util.u('réveillé'): util.u('réveillé'), + "data": {"k1": util.u('drôle')} + }, + ) + + def test_eval_none_flag_orm(self): + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.orm import Session + + Base = declarative_base() + + class Data(Base): + __table__ = self.tables.data_table + + s = Session(testing.db) + + d1 = Data(name='d1', data=None, nulldata=None) + s.add(d1) + s.commit() + + s.bulk_insert_mappings( + Data, [{"name": "d2", "data": None, "nulldata": None}] + ) + eq_( + s.query( + cast(self.tables.data_table.c.data, String(convert_unicode="force")), + cast(self.tables.data_table.c.nulldata, String) + ).filter(self.tables.data_table.c.name == 'd1').first(), + ("null", None) + ) + eq_( + s.query( + cast(self.tables.data_table.c.data, String(convert_unicode="force")), + cast(self.tables.data_table.c.nulldata, String) + ).filter(self.tables.data_table.c.name == 'd2').first(), + ("null", None) + ) + + +__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest', 'JSONTest', + 'DateTest', 'DateTimeTest', 'TextTest', + 'NumericTest', 'IntegerTest', + 'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest', + 'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest', + 'DateHistoricTest', 'StringTest', 'BooleanTest') diff --git a/app/lib/sqlalchemy/testing/suite/test_update_delete.py b/app/lib/sqlalchemy/testing/suite/test_update_delete.py new file mode 100644 index 0000000..e4c61e7 --- /dev/null +++ b/app/lib/sqlalchemy/testing/suite/test_update_delete.py @@ -0,0 +1,63 @@ +from .. import fixtures, config +from ..assertions import eq_ + +from sqlalchemy import Integer, String +from ..schema import Table, Column + + +class SimpleUpdateDeleteTest(fixtures.TablesTest): + run_deletes = 'each' + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table('plain_pk', metadata, + Column('id', Integer, primary_key=True), + Column('data', String(50)) + ) + + @classmethod + def insert_data(cls): + config.db.execute( + cls.tables.plain_pk.insert(), + [ + {"id": 1, "data": "d1"}, + {"id": 2, "data": "d2"}, + {"id": 3, "data": "d3"}, + ] + ) + + def test_update(self): + t = self.tables.plain_pk + r = config.db.execute( + t.update().where(t.c.id == 2), + data="d2_new" + ) + assert not r.is_insert + assert not r.returns_rows + + eq_( + config.db.execute(t.select().order_by(t.c.id)).fetchall(), + [ + (1, "d1"), + (2, "d2_new"), + (3, "d3") + ] + ) + + def test_delete(self): + t = self.tables.plain_pk + r = config.db.execute( + t.delete().where(t.c.id == 2) + ) + assert not r.is_insert + assert not r.returns_rows + eq_( + config.db.execute(t.select().order_by(t.c.id)).fetchall(), + [ + (1, "d1"), + (3, "d3") + ] + ) + +__all__ = ('SimpleUpdateDeleteTest', ) diff --git a/app/lib/sqlalchemy/testing/util.py b/app/lib/sqlalchemy/testing/util.py new file mode 100644 index 0000000..8f91f31 --- /dev/null +++ b/app/lib/sqlalchemy/testing/util.py @@ -0,0 +1,280 @@ +# testing/util.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from ..util import jython, pypy, defaultdict, decorator, py2k +import decimal +import gc +import time +import random +import sys +import types + +if jython: + def jython_gc_collect(*args): + """aggressive gc.collect for tests.""" + gc.collect() + time.sleep(0.1) + gc.collect() + gc.collect() + return 0 + + # "lazy" gc, for VM's that don't GC on refcount == 0 + gc_collect = lazy_gc = jython_gc_collect +elif pypy: + def pypy_gc_collect(*args): + gc.collect() + gc.collect() + gc_collect = lazy_gc = pypy_gc_collect +else: + # assume CPython - straight gc.collect, lazy_gc() is a pass + gc_collect = gc.collect + + def lazy_gc(): + pass + + +def picklers(): + picklers = set() + if py2k: + try: + import cPickle + picklers.add(cPickle) + except ImportError: + pass + + import pickle + picklers.add(pickle) + + # yes, this thing needs this much testing + for pickle_ in picklers: + for protocol in -1, 0, 1, 2: + yield pickle_.loads, lambda d: pickle_.dumps(d, protocol) + + +def round_decimal(value, prec): + if isinstance(value, float): + return round(value, prec) + + # can also use shift() here but that is 2.6 only + return (value * decimal.Decimal("1" + "0" * prec) + ).to_integral(decimal.ROUND_FLOOR) / \ + pow(10, prec) + + +class RandomSet(set): + def __iter__(self): + l = list(set.__iter__(self)) + random.shuffle(l) + return iter(l) + + def pop(self): + index = random.randint(0, len(self) - 1) + item = list(set.__iter__(self))[index] + self.remove(item) + return item + + def union(self, other): + return RandomSet(set.union(self, other)) + + def difference(self, other): + return RandomSet(set.difference(self, other)) + + def intersection(self, other): + return RandomSet(set.intersection(self, other)) + + def copy(self): + return RandomSet(self) + + +def conforms_partial_ordering(tuples, sorted_elements): + """True if the given sorting conforms to the given partial ordering.""" + + deps = defaultdict(set) + for parent, child in tuples: + deps[parent].add(child) + for i, node in enumerate(sorted_elements): + for n in sorted_elements[i:]: + if node in deps[n]: + return False + else: + return True + + +def all_partial_orderings(tuples, elements): + edges = defaultdict(set) + for parent, child in tuples: + edges[child].add(parent) + + def _all_orderings(elements): + + if len(elements) == 1: + yield list(elements) + else: + for elem in elements: + subset = set(elements).difference([elem]) + if not subset.intersection(edges[elem]): + for sub_ordering in _all_orderings(subset): + yield [elem] + sub_ordering + + return iter(_all_orderings(elements)) + + +def function_named(fn, name): + """Return a function with a given __name__. + + Will assign to __name__ and return the original function if possible on + the Python implementation, otherwise a new function will be constructed. + + This function should be phased out as much as possible + in favor of @decorator. Tests that "generate" many named tests + should be modernized. + + """ + try: + fn.__name__ = name + except TypeError: + fn = types.FunctionType(fn.__code__, fn.__globals__, name, + fn.__defaults__, fn.__closure__) + return fn + + +def run_as_contextmanager(ctx, fn, *arg, **kw): + """Run the given function under the given contextmanager, + simulating the behavior of 'with' to support older + Python versions. + + This is not necessary anymore as we have placed 2.6 + as minimum Python version, however some tests are still using + this structure. + + """ + + obj = ctx.__enter__() + try: + result = fn(obj, *arg, **kw) + ctx.__exit__(None, None, None) + return result + except: + exc_info = sys.exc_info() + raise_ = ctx.__exit__(*exc_info) + if raise_ is None: + raise + else: + return raise_ + + +def rowset(results): + """Converts the results of sql execution into a plain set of column tuples. + + Useful for asserting the results of an unordered query. + """ + + return set([tuple(row) for row in results]) + + +def fail(msg): + assert False, msg + + +@decorator +def provide_metadata(fn, *args, **kw): + """Provide bound MetaData for a single test, dropping afterwards.""" + + from . import config + from . import engines + from sqlalchemy import schema + + metadata = schema.MetaData(config.db) + self = args[0] + prev_meta = getattr(self, 'metadata', None) + self.metadata = metadata + try: + return fn(*args, **kw) + finally: + engines.drop_all_tables(metadata, config.db) + self.metadata = prev_meta + + +def force_drop_names(*names): + """Force the given table names to be dropped after test complete, + isolating for foreign key cycles + + """ + from . import config + from sqlalchemy import inspect + + @decorator + def go(fn, *args, **kw): + + try: + return fn(*args, **kw) + finally: + drop_all_tables( + config.db, inspect(config.db), include_names=names) + return go + + +class adict(dict): + """Dict keys available as attributes. Shadows.""" + + def __getattribute__(self, key): + try: + return self[key] + except KeyError: + return dict.__getattribute__(self, key) + + def __call__(self, *keys): + return tuple([self[key] for key in keys]) + + get_all = __call__ + + +def drop_all_tables(engine, inspector, schema=None, include_names=None): + from sqlalchemy import Column, Table, Integer, MetaData, \ + ForeignKeyConstraint + from sqlalchemy.schema import DropTable, DropConstraint + + if include_names is not None: + include_names = set(include_names) + + with engine.connect() as conn: + for tname, fkcs in reversed( + inspector.get_sorted_table_and_fkc_names(schema=schema)): + if tname: + if include_names is not None and tname not in include_names: + continue + conn.execute(DropTable( + Table(tname, MetaData(), schema=schema) + )) + elif fkcs: + if not engine.dialect.supports_alter: + continue + for tname, fkc in fkcs: + if include_names is not None and \ + tname not in include_names: + continue + tb = Table( + tname, MetaData(), + Column('x', Integer), + Column('y', Integer), + schema=schema + ) + conn.execute(DropConstraint( + ForeignKeyConstraint( + [tb.c.x], [tb.c.y], name=fkc) + )) + + +def teardown_events(event_cls): + @decorator + def decorate(fn, *arg, **kw): + try: + return fn(*arg, **kw) + finally: + event_cls._clear() + return decorate + diff --git a/app/lib/sqlalchemy/testing/warnings.py b/app/lib/sqlalchemy/testing/warnings.py new file mode 100644 index 0000000..3d1c098 --- /dev/null +++ b/app/lib/sqlalchemy/testing/warnings.py @@ -0,0 +1,41 @@ +# testing/warnings.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from __future__ import absolute_import + +import warnings +from .. import exc as sa_exc +from . import assertions + + +def setup_filters(): + """Set global warning behavior for the test suite.""" + + warnings.filterwarnings('ignore', + category=sa_exc.SAPendingDeprecationWarning) + warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning) + warnings.filterwarnings('error', category=sa_exc.SAWarning) + + # some selected deprecations... + warnings.filterwarnings('error', category=DeprecationWarning) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message=".*StopIteration") + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message=".*inspect.getargspec") + + +def assert_warnings(fn, warning_msgs, regex=False): + """Assert that each of the given warnings are emitted by fn. + + Deprecated. Please use assertions.expect_warnings(). + + """ + + with assertions._expect_warnings( + sa_exc.SAWarning, warning_msgs, regex=regex): + return fn() + diff --git a/app/lib/sqlalchemy/types.py b/app/lib/sqlalchemy/types.py new file mode 100644 index 0000000..ea07b91 --- /dev/null +++ b/app/lib/sqlalchemy/types.py @@ -0,0 +1,81 @@ +# types.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Compatibility namespace for sqlalchemy.sql.types. + +""" + +__all__ = ['TypeEngine', 'TypeDecorator', 'UserDefinedType', + 'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR', 'TEXT', 'Text', + 'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME', + 'CLOB', 'BLOB', 'BINARY', 'VARBINARY', 'BOOLEAN', 'BIGINT', + 'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'String', 'Integer', + 'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime', + 'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode', + 'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum', + 'Indexable', 'ARRAY', 'JSON'] + +from .sql.type_api import ( + adapt_type, + TypeEngine, + TypeDecorator, + Variant, + to_instance, + UserDefinedType +) +from .sql.sqltypes import ( + ARRAY, + BIGINT, + BINARY, + BLOB, + BOOLEAN, + BigInteger, + Binary, + _Binary, + Boolean, + CHAR, + CLOB, + Concatenable, + DATE, + DATETIME, + DECIMAL, + Date, + DateTime, + Enum, + FLOAT, + Float, + Indexable, + INT, + INTEGER, + Integer, + Interval, + JSON, + LargeBinary, + MatchType, + NCHAR, + NVARCHAR, + NullType, + NULLTYPE, + NUMERIC, + Numeric, + PickleType, + REAL, + SchemaType, + SMALLINT, + SmallInteger, + String, + STRINGTYPE, + TEXT, + TIME, + TIMESTAMP, + Text, + Time, + Unicode, + UnicodeText, + VARBINARY, + VARCHAR, + ) diff --git a/app/lib/sqlalchemy/util/__init__.py b/app/lib/sqlalchemy/util/__init__.py new file mode 100644 index 0000000..ea1d925 --- /dev/null +++ b/app/lib/sqlalchemy/util/__init__.py @@ -0,0 +1,49 @@ +# util/__init__.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .compat import callable, cmp, reduce, \ + threading, py3k, py33, py36, py2k, jython, pypy, cpython, win32, \ + pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \ + raise_from_cause, text_type, safe_kwarg, string_types, int_types, \ + binary_type, nested, \ + quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\ + unquote_plus, unquote, b64decode, b64encode, byte_buffer, itertools_filter,\ + iterbytes, StringIO, inspect_getargspec, zip_longest + +from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \ + Properties, OrderedProperties, ImmutableProperties, OrderedDict, \ + OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \ + column_dict, ordered_column_set, populate_column_dict, unique_list, \ + UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \ + to_column_set, update_copy, flatten_iterator, has_intersection, \ + LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \ + coerce_generator_arg, lightweight_named_tuple + +from .langhelpers import iterate_attributes, class_hierarchy, \ + portable_instancemethod, unbound_method_to_callable, \ + getargspec_init, format_argspec_init, format_argspec_plus, \ + get_func_kwargs, get_cls_kwargs, decorator, as_interface, \ + memoized_property, memoized_instancemethod, md5_hex, \ + group_expirable_memoized_property, dependencies, decode_slice, \ + monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\ + duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\ + classproperty, set_creation_order, warn_exception, warn, NoneType,\ + constructor_copy, methods_equivalent, chop_traceback, asint,\ + generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \ + safe_reraise,\ + get_callable_argspec, only_once, attrsetter, ellipses_string, \ + warn_limited, map_bits, MemoizedSlots, EnsureKWArgType, wrap_callable + +from .deprecations import warn_deprecated, warn_pending_deprecation, \ + deprecated, pending_deprecation, inject_docstring_text + +# things that used to be not always available, +# but are now as of current support Python versions +from collections import defaultdict +from functools import partial +from functools import update_wrapper +from contextlib import contextmanager diff --git a/app/lib/sqlalchemy/util/_collections.py b/app/lib/sqlalchemy/util/_collections.py new file mode 100644 index 0000000..e3e1d71 --- /dev/null +++ b/app/lib/sqlalchemy/util/_collections.py @@ -0,0 +1,1045 @@ +# util/_collections.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Collection classes and helpers.""" + +from __future__ import absolute_import +import weakref +import operator +from .compat import threading, itertools_filterfalse, string_types, \ + binary_types +from . import py2k +import types +import collections + +EMPTY_SET = frozenset() + + +class AbstractKeyedTuple(tuple): + __slots__ = () + + def keys(self): + """Return a list of string key names for this :class:`.KeyedTuple`. + + .. seealso:: + + :attr:`.KeyedTuple._fields` + + """ + + return list(self._fields) + + +class KeyedTuple(AbstractKeyedTuple): + """``tuple`` subclass that adds labeled names. + + E.g.:: + + >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"]) + >>> k.one + 1 + >>> k.two + 2 + + Result rows returned by :class:`.Query` that contain multiple + ORM entities and/or column expressions make use of this + class to return rows. + + The :class:`.KeyedTuple` exhibits similar behavior to the + ``collections.namedtuple()`` construct provided in the Python + standard library, however is architected very differently. + Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is + does not rely on creation of custom subtypes in order to represent + a new series of keys, instead each :class:`.KeyedTuple` instance + receives its list of keys in place. The subtype approach + of ``collections.namedtuple()`` introduces significant complexity + and performance overhead, which is not necessary for the + :class:`.Query` object's use case. + + .. versionchanged:: 0.8 + Compatibility methods with ``collections.namedtuple()`` have been + added including :attr:`.KeyedTuple._fields` and + :meth:`.KeyedTuple._asdict`. + + .. seealso:: + + :ref:`ormtutorial_querying` + + """ + + def __new__(cls, vals, labels=None): + t = tuple.__new__(cls, vals) + if labels: + t.__dict__.update(zip(labels, vals)) + else: + labels = [] + t.__dict__['_labels'] = labels + return t + + @property + def _fields(self): + """Return a tuple of string key names for this :class:`.KeyedTuple`. + + This method provides compatibility with ``collections.namedtuple()``. + + .. versionadded:: 0.8 + + .. seealso:: + + :meth:`.KeyedTuple.keys` + + """ + return tuple([l for l in self._labels if l is not None]) + + def __setattr__(self, key, value): + raise AttributeError("Can't set attribute: %s" % key) + + def _asdict(self): + """Return the contents of this :class:`.KeyedTuple` as a dictionary. + + This method provides compatibility with ``collections.namedtuple()``, + with the exception that the dictionary returned is **not** ordered. + + .. versionadded:: 0.8 + + """ + return dict((key, self.__dict__[key]) for key in self.keys()) + + +class _LW(AbstractKeyedTuple): + __slots__ = () + + def __new__(cls, vals): + return tuple.__new__(cls, vals) + + def __reduce__(self): + # for pickling, degrade down to the regular + # KeyedTuple, thus avoiding anonymous class pickling + # difficulties + return KeyedTuple, (list(self), self._real_fields) + + def _asdict(self): + """Return the contents of this :class:`.KeyedTuple` as a dictionary.""" + + d = dict(zip(self._real_fields, self)) + d.pop(None, None) + return d + + +class ImmutableContainer(object): + def _immutable(self, *arg, **kw): + raise TypeError("%s object is immutable" % self.__class__.__name__) + + __delitem__ = __setitem__ = __setattr__ = _immutable + + +class immutabledict(ImmutableContainer, dict): + + clear = pop = popitem = setdefault = \ + update = ImmutableContainer._immutable + + def __new__(cls, *args): + new = dict.__new__(cls) + dict.__init__(new, *args) + return new + + def __init__(self, *args): + pass + + def __reduce__(self): + return immutabledict, (dict(self), ) + + def union(self, d): + if not d: + return self + elif not self: + if isinstance(d, immutabledict): + return d + else: + return immutabledict(d) + else: + d2 = immutabledict(self) + dict.update(d2, d) + return d2 + + def __repr__(self): + return "immutabledict(%s)" % dict.__repr__(self) + + +class Properties(object): + """Provide a __getattr__/__setattr__ interface over a dict.""" + + __slots__ = '_data', + + def __init__(self, data): + object.__setattr__(self, '_data', data) + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(list(self._data.values())) + + def __add__(self, other): + return list(self) + list(other) + + def __setitem__(self, key, object): + self._data[key] = object + + def __getitem__(self, key): + return self._data[key] + + def __delitem__(self, key): + del self._data[key] + + def __setattr__(self, key, obj): + self._data[key] = obj + + def __getstate__(self): + return {'_data': self._data} + + def __setstate__(self, state): + object.__setattr__(self, '_data', state['_data']) + + def __getattr__(self, key): + try: + return self._data[key] + except KeyError: + raise AttributeError(key) + + def __contains__(self, key): + return key in self._data + + def as_immutable(self): + """Return an immutable proxy for this :class:`.Properties`.""" + + return ImmutableProperties(self._data) + + def update(self, value): + self._data.update(value) + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def keys(self): + return list(self._data) + + def values(self): + return list(self._data.values()) + + def items(self): + return list(self._data.items()) + + def has_key(self, key): + return key in self._data + + def clear(self): + self._data.clear() + + +class OrderedProperties(Properties): + """Provide a __getattr__/__setattr__ interface with an OrderedDict + as backing store.""" + + __slots__ = () + + def __init__(self): + Properties.__init__(self, OrderedDict()) + + +class ImmutableProperties(ImmutableContainer, Properties): + """Provide immutable dict/object attribute to an underlying dictionary.""" + + __slots__ = () + + +class OrderedDict(dict): + """A dict that returns keys/values/items in the order they were added.""" + + __slots__ = '_list', + + def __reduce__(self): + return OrderedDict, (self.items(),) + + def __init__(self, ____sequence=None, **kwargs): + self._list = [] + if ____sequence is None: + if kwargs: + self.update(**kwargs) + else: + self.update(____sequence, **kwargs) + + def clear(self): + self._list = [] + dict.clear(self) + + def copy(self): + return self.__copy__() + + def __copy__(self): + return OrderedDict(self) + + def sort(self, *arg, **kw): + self._list.sort(*arg, **kw) + + def update(self, ____sequence=None, **kwargs): + if ____sequence is not None: + if hasattr(____sequence, 'keys'): + for key in ____sequence.keys(): + self.__setitem__(key, ____sequence[key]) + else: + for key, value in ____sequence: + self[key] = value + if kwargs: + self.update(kwargs) + + def setdefault(self, key, value): + if key not in self: + self.__setitem__(key, value) + return value + else: + return self.__getitem__(key) + + def __iter__(self): + return iter(self._list) + + def keys(self): + return list(self) + + def values(self): + return [self[key] for key in self._list] + + def items(self): + return [(key, self[key]) for key in self._list] + + if py2k: + def itervalues(self): + return iter(self.values()) + + def iterkeys(self): + return iter(self) + + def iteritems(self): + return iter(self.items()) + + def __setitem__(self, key, object): + if key not in self: + try: + self._list.append(key) + except AttributeError: + # work around Python pickle loads() with + # dict subclass (seems to ignore __setstate__?) + self._list = [key] + dict.__setitem__(self, key, object) + + def __delitem__(self, key): + dict.__delitem__(self, key) + self._list.remove(key) + + def pop(self, key, *default): + present = key in self + value = dict.pop(self, key, *default) + if present: + self._list.remove(key) + return value + + def popitem(self): + item = dict.popitem(self) + self._list.remove(item[0]) + return item + + +class OrderedSet(set): + def __init__(self, d=None): + set.__init__(self) + self._list = [] + if d is not None: + self._list = unique_list(d) + set.update(self, self._list) + else: + self._list = [] + + def add(self, element): + if element not in self: + self._list.append(element) + set.add(self, element) + + def remove(self, element): + set.remove(self, element) + self._list.remove(element) + + def insert(self, pos, element): + if element not in self: + self._list.insert(pos, element) + set.add(self, element) + + def discard(self, element): + if element in self: + self._list.remove(element) + set.remove(self, element) + + def clear(self): + set.clear(self) + self._list = [] + + def __getitem__(self, key): + return self._list[key] + + def __iter__(self): + return iter(self._list) + + def __add__(self, other): + return self.union(other) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, self._list) + + __str__ = __repr__ + + def update(self, iterable): + for e in iterable: + if e not in self: + self._list.append(e) + set.add(self, e) + return self + + __ior__ = update + + def union(self, other): + result = self.__class__(self) + result.update(other) + return result + + __or__ = union + + def intersection(self, other): + other = set(other) + return self.__class__(a for a in self if a in other) + + __and__ = intersection + + def symmetric_difference(self, other): + other = set(other) + result = self.__class__(a for a in self if a not in other) + result.update(a for a in other if a not in self) + return result + + __xor__ = symmetric_difference + + def difference(self, other): + other = set(other) + return self.__class__(a for a in self if a not in other) + + __sub__ = difference + + def intersection_update(self, other): + other = set(other) + set.intersection_update(self, other) + self._list = [a for a in self._list if a in other] + return self + + __iand__ = intersection_update + + def symmetric_difference_update(self, other): + set.symmetric_difference_update(self, other) + self._list = [a for a in self._list if a in self] + self._list += [a for a in other._list if a in self] + return self + + __ixor__ = symmetric_difference_update + + def difference_update(self, other): + set.difference_update(self, other) + self._list = [a for a in self._list if a in self] + return self + + __isub__ = difference_update + + +class IdentitySet(object): + """A set that considers only object id() for uniqueness. + + This strategy has edge cases for builtin types- it's possible to have + two 'foo' strings in one of these sets, for example. Use sparingly. + + """ + + _working_set = set + + def __init__(self, iterable=None): + self._members = dict() + if iterable: + for o in iterable: + self.add(o) + + def add(self, value): + self._members[id(value)] = value + + def __contains__(self, value): + return id(value) in self._members + + def remove(self, value): + del self._members[id(value)] + + def discard(self, value): + try: + self.remove(value) + except KeyError: + pass + + def pop(self): + try: + pair = self._members.popitem() + return pair[1] + except KeyError: + raise KeyError('pop from an empty set') + + def clear(self): + self._members.clear() + + def __cmp__(self, other): + raise TypeError('cannot compare sets using cmp()') + + def __eq__(self, other): + if isinstance(other, IdentitySet): + return self._members == other._members + else: + return False + + def __ne__(self, other): + if isinstance(other, IdentitySet): + return self._members != other._members + else: + return True + + def issubset(self, iterable): + other = type(self)(iterable) + + if len(self) > len(other): + return False + for m in itertools_filterfalse(other._members.__contains__, + iter(self._members.keys())): + return False + return True + + def __le__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.issubset(other) + + def __lt__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return len(self) < len(other) and self.issubset(other) + + def issuperset(self, iterable): + other = type(self)(iterable) + + if len(self) < len(other): + return False + + for m in itertools_filterfalse(self._members.__contains__, + iter(other._members.keys())): + return False + return True + + def __ge__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.issuperset(other) + + def __gt__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return len(self) > len(other) and self.issuperset(other) + + def union(self, iterable): + result = type(self)() + # testlib.pragma exempt:__hash__ + members = self._member_id_tuples() + other = _iter_id(iterable) + result._members.update(self._working_set(members).union(other)) + return result + + def __or__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.union(other) + + def update(self, iterable): + self._members = self.union(iterable)._members + + def __ior__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.update(other) + return self + + def difference(self, iterable): + result = type(self)() + # testlib.pragma exempt:__hash__ + members = self._member_id_tuples() + other = _iter_id(iterable) + result._members.update(self._working_set(members).difference(other)) + return result + + def __sub__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.difference(other) + + def difference_update(self, iterable): + self._members = self.difference(iterable)._members + + def __isub__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.difference_update(other) + return self + + def intersection(self, iterable): + result = type(self)() + # testlib.pragma exempt:__hash__ + members = self._member_id_tuples() + other = _iter_id(iterable) + result._members.update(self._working_set(members).intersection(other)) + return result + + def __and__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.intersection(other) + + def intersection_update(self, iterable): + self._members = self.intersection(iterable)._members + + def __iand__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.intersection_update(other) + return self + + def symmetric_difference(self, iterable): + result = type(self)() + # testlib.pragma exempt:__hash__ + members = self._member_id_tuples() + other = _iter_id(iterable) + result._members.update( + self._working_set(members).symmetric_difference(other)) + return result + + def _member_id_tuples(self): + return ((id(v), v) for v in self._members.values()) + + def __xor__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + return self.symmetric_difference(other) + + def symmetric_difference_update(self, iterable): + self._members = self.symmetric_difference(iterable)._members + + def __ixor__(self, other): + if not isinstance(other, IdentitySet): + return NotImplemented + self.symmetric_difference(other) + return self + + def copy(self): + return type(self)(iter(self._members.values())) + + __copy__ = copy + + def __len__(self): + return len(self._members) + + def __iter__(self): + return iter(self._members.values()) + + def __hash__(self): + raise TypeError('set objects are unhashable') + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, list(self._members.values())) + + +class WeakSequence(object): + def __init__(self, __elements=()): + self._storage = [ + weakref.ref(element, self._remove) for element in __elements + ] + + def append(self, item): + self._storage.append(weakref.ref(item, self._remove)) + + def _remove(self, ref): + self._storage.remove(ref) + + def __len__(self): + return len(self._storage) + + def __iter__(self): + return (obj for obj in + (ref() for ref in self._storage) if obj is not None) + + def __getitem__(self, index): + try: + obj = self._storage[index] + except KeyError: + raise IndexError("Index %s out of range" % index) + else: + return obj() + + +class OrderedIdentitySet(IdentitySet): + class _working_set(OrderedSet): + # a testing pragma: exempt the OIDS working set from the test suite's + # "never call the user's __hash__" assertions. this is a big hammer, + # but it's safe here: IDS operates on (id, instance) tuples in the + # working set. + __sa_hash_exempt__ = True + + def __init__(self, iterable=None): + IdentitySet.__init__(self) + self._members = OrderedDict() + if iterable: + for o in iterable: + self.add(o) + + +class PopulateDict(dict): + """A dict which populates missing values via a creation function. + + Note the creation function takes a key, unlike + collections.defaultdict. + + """ + + def __init__(self, creator): + self.creator = creator + + def __missing__(self, key): + self[key] = val = self.creator(key) + return val + +# Define collections that are capable of storing +# ColumnElement objects as hashable keys/elements. +# At this point, these are mostly historical, things +# used to be more complicated. +column_set = set +column_dict = dict +ordered_column_set = OrderedSet +populate_column_dict = PopulateDict + + +_getters = PopulateDict(operator.itemgetter) + +_property_getters = PopulateDict( + lambda idx: property(operator.itemgetter(idx))) + + +def unique_list(seq, hashfunc=None): + seen = set() + seen_add = seen.add + if not hashfunc: + return [x for x in seq + if x not in seen + and not seen_add(x)] + else: + return [x for x in seq + if hashfunc(x) not in seen + and not seen_add(hashfunc(x))] + + +class UniqueAppender(object): + """Appends items to a collection ensuring uniqueness. + + Additional appends() of the same object are ignored. Membership is + determined by identity (``is a``) not equality (``==``). + """ + + def __init__(self, data, via=None): + self.data = data + self._unique = {} + if via: + self._data_appender = getattr(data, via) + elif hasattr(data, 'append'): + self._data_appender = data.append + elif hasattr(data, 'add'): + self._data_appender = data.add + + def append(self, item): + id_ = id(item) + if id_ not in self._unique: + self._data_appender(item) + self._unique[id_] = True + + def __iter__(self): + return iter(self.data) + + +def coerce_generator_arg(arg): + if len(arg) == 1 and isinstance(arg[0], types.GeneratorType): + return list(arg[0]) + else: + return arg + + +def to_list(x, default=None): + if x is None: + return default + if not isinstance(x, collections.Iterable) or \ + isinstance(x, string_types + binary_types): + return [x] + elif isinstance(x, list): + return x + else: + return list(x) + + +def has_intersection(set_, iterable): + """return True if any items of set_ are present in iterable. + + Goes through special effort to ensure __hash__ is not called + on items in iterable that don't support it. + + """ + # TODO: optimize, write in C, etc. + return bool( + set_.intersection([i for i in iterable if i.__hash__]) + ) + + +def to_set(x): + if x is None: + return set() + if not isinstance(x, set): + return set(to_list(x)) + else: + return x + + +def to_column_set(x): + if x is None: + return column_set() + if not isinstance(x, column_set): + return column_set(to_list(x)) + else: + return x + + +def update_copy(d, _new=None, **kw): + """Copy the given dict and update with the given values.""" + + d = d.copy() + if _new: + d.update(_new) + d.update(**kw) + return d + + +def flatten_iterator(x): + """Given an iterator of which further sub-elements may also be + iterators, flatten the sub-elements into a single iterator. + + """ + for elem in x: + if not isinstance(elem, str) and hasattr(elem, '__iter__'): + for y in flatten_iterator(elem): + yield y + else: + yield elem + + +class LRUCache(dict): + """Dictionary with 'squishy' removal of least + recently used items. + + Note that either get() or [] should be used here, but + generally its not safe to do an "in" check first as the dictionary + can change subsequent to that call. + + """ + + def __init__(self, capacity=100, threshold=.5): + self.capacity = capacity + self.threshold = threshold + self._counter = 0 + self._mutex = threading.Lock() + + def _inc_counter(self): + self._counter += 1 + return self._counter + + def get(self, key, default=None): + item = dict.get(self, key, default) + if item is not default: + item[2] = self._inc_counter() + return item[1] + else: + return default + + def __getitem__(self, key): + item = dict.__getitem__(self, key) + item[2] = self._inc_counter() + return item[1] + + def values(self): + return [i[1] for i in dict.values(self)] + + def setdefault(self, key, value): + if key in self: + return self[key] + else: + self[key] = value + return value + + def __setitem__(self, key, value): + item = dict.get(self, key) + if item is None: + item = [key, value, self._inc_counter()] + dict.__setitem__(self, key, item) + else: + item[1] = value + self._manage_size() + + def _manage_size(self): + if not self._mutex.acquire(False): + return + try: + while len(self) > self.capacity + self.capacity * self.threshold: + by_counter = sorted(dict.values(self), + key=operator.itemgetter(2), + reverse=True) + for item in by_counter[self.capacity:]: + try: + del self[item[0]] + except KeyError: + # deleted elsewhere; skip + continue + finally: + self._mutex.release() + + +_lw_tuples = LRUCache(100) + + +def lightweight_named_tuple(name, fields): + hash_ = (name, ) + tuple(fields) + tp_cls = _lw_tuples.get(hash_) + if tp_cls: + return tp_cls + + tp_cls = type( + name, (_LW,), + dict([ + (field, _property_getters[idx]) + for idx, field in enumerate(fields) if field is not None + ] + [('__slots__', ())]) + ) + + tp_cls._real_fields = fields + tp_cls._fields = tuple([f for f in fields if f is not None]) + + _lw_tuples[hash_] = tp_cls + return tp_cls + + +class ScopedRegistry(object): + """A Registry that can store one or multiple instances of a single + class on the basis of a "scope" function. + + The object implements ``__call__`` as the "getter", so by + calling ``myregistry()`` the contained object is returned + for the current scope. + + :param createfunc: + a callable that returns a new object to be placed in the registry + + :param scopefunc: + a callable that will return a key to store/retrieve an object. + """ + + def __init__(self, createfunc, scopefunc): + """Construct a new :class:`.ScopedRegistry`. + + :param createfunc: A creation function that will generate + a new value for the current scope, if none is present. + + :param scopefunc: A function that returns a hashable + token representing the current scope (such as, current + thread identifier). + + """ + self.createfunc = createfunc + self.scopefunc = scopefunc + self.registry = {} + + def __call__(self): + key = self.scopefunc() + try: + return self.registry[key] + except KeyError: + return self.registry.setdefault(key, self.createfunc()) + + def has(self): + """Return True if an object is present in the current scope.""" + + return self.scopefunc() in self.registry + + def set(self, obj): + """Set the value for the current scope.""" + + self.registry[self.scopefunc()] = obj + + def clear(self): + """Clear the current scope, if any.""" + + try: + del self.registry[self.scopefunc()] + except KeyError: + pass + + +class ThreadLocalRegistry(ScopedRegistry): + """A :class:`.ScopedRegistry` that uses a ``threading.local()`` + variable for storage. + + """ + + def __init__(self, createfunc): + self.createfunc = createfunc + self.registry = threading.local() + + def __call__(self): + try: + return self.registry.value + except AttributeError: + val = self.registry.value = self.createfunc() + return val + + def has(self): + return hasattr(self.registry, "value") + + def set(self, obj): + self.registry.value = obj + + def clear(self): + try: + del self.registry.value + except AttributeError: + pass + + +def _iter_id(iterable): + """Generator: ((id(o), o) for o in iterable).""" + + for item in iterable: + yield id(item), item diff --git a/app/lib/sqlalchemy/util/compat.py b/app/lib/sqlalchemy/util/compat.py new file mode 100644 index 0000000..5c615b0 --- /dev/null +++ b/app/lib/sqlalchemy/util/compat.py @@ -0,0 +1,269 @@ +# util/compat.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Handle Python version/platform incompatibilities.""" + +import sys +from contextlib import contextmanager + +try: + import threading +except ImportError: + import dummy_threading as threading + +py36 = sys.version_info >= (3, 6) +py33 = sys.version_info >= (3, 3) +py32 = sys.version_info >= (3, 2) +py3k = sys.version_info >= (3, 0) +py2k = sys.version_info < (3, 0) +py265 = sys.version_info >= (2, 6, 5) +jython = sys.platform.startswith('java') +pypy = hasattr(sys, 'pypy_version_info') +win32 = sys.platform.startswith('win') +cpython = not pypy and not jython # TODO: something better for this ? + +import collections +next = next + +if py3k: + import pickle +else: + try: + import cPickle as pickle + except ImportError: + import pickle + +# work around http://bugs.python.org/issue2646 +if py265: + safe_kwarg = lambda arg: arg +else: + safe_kwarg = str + +ArgSpec = collections.namedtuple("ArgSpec", + ["args", "varargs", "keywords", "defaults"]) + +if py3k: + import builtins + + from inspect import getfullargspec as inspect_getfullargspec + from urllib.parse import (quote_plus, unquote_plus, + parse_qsl, quote, unquote) + import configparser + from io import StringIO + + from io import BytesIO as byte_buffer + + def inspect_getargspec(func): + return ArgSpec( + *inspect_getfullargspec(func)[0:4] + ) + + string_types = str, + binary_types = bytes, + binary_type = bytes + text_type = str + int_types = int, + iterbytes = iter + + def u(s): + return s + + def ue(s): + return s + + def b(s): + return s.encode("latin-1") + + if py32: + callable = callable + else: + def callable(fn): + return hasattr(fn, '__call__') + + def cmp(a, b): + return (a > b) - (a < b) + + from functools import reduce + + print_ = getattr(builtins, "print") + + import_ = getattr(builtins, '__import__') + + import itertools + itertools_filterfalse = itertools.filterfalse + itertools_filter = filter + itertools_imap = map + from itertools import zip_longest + + import base64 + + def b64encode(x): + return base64.b64encode(x).decode('ascii') + + def b64decode(x): + return base64.b64decode(x.encode('ascii')) + +else: + from inspect import getargspec as inspect_getfullargspec + inspect_getargspec = inspect_getfullargspec + from urllib import quote_plus, unquote_plus, quote, unquote + from urlparse import parse_qsl + import ConfigParser as configparser + from StringIO import StringIO + from cStringIO import StringIO as byte_buffer + + string_types = basestring, + binary_types = bytes, + binary_type = str + text_type = unicode + int_types = int, long + + def iterbytes(buf): + return (ord(byte) for byte in buf) + + def u(s): + # this differs from what six does, which doesn't support non-ASCII + # strings - we only use u() with + # literal source strings, and all our source files with non-ascii + # in them (all are tests) are utf-8 encoded. + return unicode(s, "utf-8") + + def ue(s): + return unicode(s, "unicode_escape") + + def b(s): + return s + + def import_(*args): + if len(args) == 4: + args = args[0:3] + ([str(arg) for arg in args[3]],) + return __import__(*args) + + callable = callable + cmp = cmp + reduce = reduce + + import base64 + b64encode = base64.b64encode + b64decode = base64.b64decode + + def print_(*args, **kwargs): + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + for arg in enumerate(args): + if not isinstance(arg, basestring): + arg = str(arg) + fp.write(arg) + + import itertools + itertools_filterfalse = itertools.ifilterfalse + itertools_filter = itertools.ifilter + itertools_imap = itertools.imap + from itertools import izip_longest as zip_longest + + +import time +if win32 or jython: + time_func = time.clock +else: + time_func = time.time + +from collections import namedtuple +from operator import attrgetter as dottedgetter + + +if py3k: + def reraise(tp, value, tb=None, cause=None): + if cause is not None: + assert cause is not value, "Same cause emitted" + value.__cause__ = cause + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + # not as nice as that of Py3K, but at least preserves + # the code line where the issue occurred + exec("def reraise(tp, value, tb=None, cause=None):\n" + " if cause is not None:\n" + " assert cause is not value, 'Same cause emitted'\n" + " raise tp, value, tb\n") + + +def raise_from_cause(exception, exc_info=None): + if exc_info is None: + exc_info = sys.exc_info() + exc_type, exc_value, exc_tb = exc_info + cause = exc_value if exc_value is not exception else None + reraise(type(exception), exception, tb=exc_tb, cause=cause) + +if py3k: + exec_ = getattr(builtins, 'exec') +else: + def exec_(func_text, globals_, lcl=None): + if lcl is None: + exec('exec func_text in globals_') + else: + exec('exec func_text in globals_, lcl') + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass. + + Drops the middle class upon creation. + + Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/ + + """ + + class metaclass(meta): + __call__ = type.__call__ + __init__ = type.__init__ + + def __new__(cls, name, this_bases, d): + if this_bases is None: + return type.__new__(cls, name, (), d) + return meta(name, bases, d) + return metaclass('temporary_class', None, {}) + + + + +@contextmanager +def nested(*managers): + """Implement contextlib.nested, mostly for unit tests. + + As tests still need to run on py2.6 we can't use multiple-with yet. + + Function is removed in py3k but also emits deprecation warning in 2.7 + so just roll it here for everyone. + + """ + + exits = [] + vars = [] + exc = (None, None, None) + try: + for mgr in managers: + exit = mgr.__exit__ + enter = mgr.__enter__ + vars.append(enter()) + exits.append(exit) + yield vars + except: + exc = sys.exc_info() + finally: + while exits: + exit = exits.pop() + try: + if exit(*exc): + exc = (None, None, None) + except: + exc = sys.exc_info() + if exc != (None, None, None): + reraise(exc[0], exc[1], exc[2]) diff --git a/app/lib/sqlalchemy/util/deprecations.py b/app/lib/sqlalchemy/util/deprecations.py new file mode 100644 index 0000000..4da3d45 --- /dev/null +++ b/app/lib/sqlalchemy/util/deprecations.py @@ -0,0 +1,146 @@ +# util/deprecations.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Helpers related to deprecation of functions, methods, classes, other +functionality.""" + +from .. import exc +import warnings +import re +from .langhelpers import decorator + + +def warn_deprecated(msg, stacklevel=3): + warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) + + +def warn_pending_deprecation(msg, stacklevel=3): + warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) + + +def deprecated(version, message=None, add_deprecation_to_docstring=True): + """Decorates a function and issues a deprecation warning on use. + + :param message: + If provided, issue message in the warning. A sensible default + is used if not provided. + + :param add_deprecation_to_docstring: + Default True. If False, the wrapped function's __doc__ is left + as-is. If True, the 'message' is prepended to the docs if + provided, or sensible default if message is omitted. + + """ + + if add_deprecation_to_docstring: + header = ".. deprecated:: %s %s" % \ + (version, (message or '')) + else: + header = None + + if message is None: + message = "Call to deprecated function %(func)s" + + def decorate(fn): + return _decorate_with_warning( + fn, exc.SADeprecationWarning, + message % dict(func=fn.__name__), header) + return decorate + + +def pending_deprecation(version, message=None, + add_deprecation_to_docstring=True): + """Decorates a function and issues a pending deprecation warning on use. + + :param version: + An approximate future version at which point the pending deprecation + will become deprecated. Not used in messaging. + + :param message: + If provided, issue message in the warning. A sensible default + is used if not provided. + + :param add_deprecation_to_docstring: + Default True. If False, the wrapped function's __doc__ is left + as-is. If True, the 'message' is prepended to the docs if + provided, or sensible default if message is omitted. + """ + + if add_deprecation_to_docstring: + header = ".. deprecated:: %s (pending) %s" % \ + (version, (message or '')) + else: + header = None + + if message is None: + message = "Call to deprecated function %(func)s" + + def decorate(fn): + return _decorate_with_warning( + fn, exc.SAPendingDeprecationWarning, + message % dict(func=fn.__name__), header) + return decorate + + +def _sanitize_restructured_text(text): + def repl(m): + type_, name = m.group(1, 2) + if type_ in ("func", "meth"): + name += "()" + return name + return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text) + + +def _decorate_with_warning(func, wtype, message, docstring_header=None): + """Wrap a function with a warnings.warn and augmented docstring.""" + + message = _sanitize_restructured_text(message) + + @decorator + def warned(fn, *args, **kwargs): + warnings.warn(message, wtype, stacklevel=3) + return fn(*args, **kwargs) + + doc = func.__doc__ is not None and func.__doc__ or '' + if docstring_header is not None: + docstring_header %= dict(func=func.__name__) + + doc = inject_docstring_text(doc, docstring_header, 1) + + decorated = warned(func) + decorated.__doc__ = doc + return decorated + +import textwrap + + +def _dedent_docstring(text): + split_text = text.split("\n", 1) + if len(split_text) == 1: + return text + else: + firstline, remaining = split_text + if not firstline.startswith(" "): + return firstline + "\n" + textwrap.dedent(remaining) + else: + return textwrap.dedent(text) + + +def inject_docstring_text(doctext, injecttext, pos): + doctext = _dedent_docstring(doctext or "") + lines = doctext.split('\n') + injectlines = textwrap.dedent(injecttext).split("\n") + if injectlines[0]: + injectlines.insert(0, "") + + blanks = [num for num, line in enumerate(lines) if not line.strip()] + blanks.insert(0, 0) + + inject_pos = blanks[min(pos, len(blanks) - 1)] + + lines = lines[0:inject_pos] + injectlines + lines[inject_pos:] + return "\n".join(lines) diff --git a/app/lib/sqlalchemy/util/langhelpers.py b/app/lib/sqlalchemy/util/langhelpers.py new file mode 100644 index 0000000..66a06c5 --- /dev/null +++ b/app/lib/sqlalchemy/util/langhelpers.py @@ -0,0 +1,1419 @@ +# util/langhelpers.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Routines to help with the creation, loading and introspection of +modules, classes, hierarchies, attributes, functions, and methods. + +""" +import itertools +import inspect +import operator +import re +import sys +import types +import warnings +from functools import update_wrapper +from .. import exc +import hashlib +from . import compat +from . import _collections + + +def md5_hex(x): + if compat.py3k: + x = x.encode('utf-8') + m = hashlib.md5() + m.update(x) + return m.hexdigest() + + +class safe_reraise(object): + """Reraise an exception after invoking some + handler code. + + Stores the existing exception info before + invoking so that it is maintained across a potential + coroutine context switch. + + e.g.:: + + try: + sess.commit() + except: + with safe_reraise(): + sess.rollback() + + """ + + __slots__ = ('warn_only', '_exc_info') + + def __init__(self, warn_only=False): + self.warn_only = warn_only + + def __enter__(self): + self._exc_info = sys.exc_info() + + def __exit__(self, type_, value, traceback): + # see #2703 for notes + if type_ is None: + exc_type, exc_value, exc_tb = self._exc_info + self._exc_info = None # remove potential circular references + if not self.warn_only: + compat.reraise(exc_type, exc_value, exc_tb) + else: + if not compat.py3k and self._exc_info and self._exc_info[1]: + # emulate Py3K's behavior of telling us when an exception + # occurs in an exception handler. + warn( + "An exception has occurred during handling of a " + "previous exception. The previous exception " + "is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1])) + self._exc_info = None # remove potential circular references + compat.reraise(type_, value, traceback) + + +def decode_slice(slc): + """decode a slice object as sent to __getitem__. + + takes into account the 2.5 __index__() method, basically. + + """ + ret = [] + for x in slc.start, slc.stop, slc.step: + if hasattr(x, '__index__'): + x = x.__index__() + ret.append(x) + return tuple(ret) + + +def _unique_symbols(used, *bases): + used = set(used) + for base in bases: + pool = itertools.chain((base,), + compat.itertools_imap(lambda i: base + str(i), + range(1000))) + for sym in pool: + if sym not in used: + used.add(sym) + yield sym + break + else: + raise NameError("exhausted namespace for symbol base %s" % base) + + +def map_bits(fn, n): + """Call the given function given each nonzero bit from n.""" + + while n: + b = n & (~n + 1) + yield fn(b) + n ^= b + + +def decorator(target): + """A signature-matching decorator factory.""" + + def decorate(fn): + if not inspect.isfunction(fn): + raise Exception("not a decoratable function") + spec = compat.inspect_getfullargspec(fn) + names = tuple(spec[0]) + spec[1:3] + (fn.__name__,) + targ_name, fn_name = _unique_symbols(names, 'target', 'fn') + + metadata = dict(target=targ_name, fn=fn_name) + metadata.update(format_argspec_plus(spec, grouped=False)) + metadata['name'] = fn.__name__ + code = """\ +def %(name)s(%(args)s): + return %(target)s(%(fn)s, %(apply_kw)s) +""" % metadata + decorated = _exec_code_in_env(code, + {targ_name: target, fn_name: fn}, + fn.__name__) + decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ + decorated.__wrapped__ = fn + return update_wrapper(decorated, fn) + return update_wrapper(decorate, target) + + +def _exec_code_in_env(code, env, fn_name): + exec(code, env) + return env[fn_name] + + +def public_factory(target, location): + """Produce a wrapping function for the given cls or classmethod. + + Rationale here is so that the __init__ method of the + class can serve as documentation for the function. + + """ + if isinstance(target, type): + fn = target.__init__ + callable_ = target + doc = "Construct a new :class:`.%s` object. \n\n"\ + "This constructor is mirrored as a public API function; "\ + "see :func:`~%s` "\ + "for a full usage and argument description." % ( + target.__name__, location, ) + else: + fn = callable_ = target + doc = "This function is mirrored; see :func:`~%s` "\ + "for a description of arguments." % location + + location_name = location.split(".")[-1] + spec = compat.inspect_getfullargspec(fn) + del spec[0][0] + metadata = format_argspec_plus(spec, grouped=False) + metadata['name'] = location_name + code = """\ +def %(name)s(%(args)s): + return cls(%(apply_kw)s) +""" % metadata + env = {'cls': callable_, 'symbol': symbol} + exec(code, env) + decorated = env[location_name] + decorated.__doc__ = fn.__doc__ + decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0] + if compat.py2k or hasattr(fn, '__func__'): + fn.__func__.__doc__ = doc + else: + fn.__doc__ = doc + return decorated + + +class PluginLoader(object): + + def __init__(self, group, auto_fn=None): + self.group = group + self.impls = {} + self.auto_fn = auto_fn + + def load(self, name): + if name in self.impls: + return self.impls[name]() + + if self.auto_fn: + loader = self.auto_fn(name) + if loader: + self.impls[name] = loader + return loader() + + try: + import pkg_resources + except ImportError: + pass + else: + for impl in pkg_resources.iter_entry_points( + self.group, name): + self.impls[name] = impl.load + return impl.load() + + raise exc.NoSuchModuleError( + "Can't load plugin: %s:%s" % + (self.group, name)) + + def register(self, name, modulepath, objname): + def load(): + mod = compat.import_(modulepath) + for token in modulepath.split(".")[1:]: + mod = getattr(mod, token) + return getattr(mod, objname) + self.impls[name] = load + + +def get_cls_kwargs(cls, _set=None): + r"""Return the full set of inherited kwargs for the given `cls`. + + Probes a class's __init__ method, collecting all named arguments. If the + __init__ defines a \**kwargs catch-all, then the constructor is presumed + to pass along unrecognized keywords to its base classes, and the + collection process is repeated recursively on each of the bases. + + Uses a subset of inspect.getargspec() to cut down on method overhead. + No anonymous tuple arguments please ! + + """ + toplevel = _set is None + if toplevel: + _set = set() + + ctr = cls.__dict__.get('__init__', False) + + has_init = ctr and isinstance(ctr, types.FunctionType) and \ + isinstance(ctr.__code__, types.CodeType) + + if has_init: + names, has_kw = inspect_func_args(ctr) + _set.update(names) + + if not has_kw and not toplevel: + return None + + if not has_init or has_kw: + for c in cls.__bases__: + if get_cls_kwargs(c, _set) is None: + break + + _set.discard('self') + return _set + + +try: + # TODO: who doesn't have this constant? + from inspect import CO_VARKEYWORDS + + def inspect_func_args(fn): + co = fn.__code__ + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + has_kw = bool(co.co_flags & CO_VARKEYWORDS) + return args, has_kw + +except ImportError: + def inspect_func_args(fn): + names, _, has_kw, _ = inspect.getargspec(fn) + return names, bool(has_kw) + + +def get_func_kwargs(func): + """Return the set of legal kwargs for the given `func`. + + Uses getargspec so is safe to call for methods, functions, + etc. + + """ + + return compat.inspect_getargspec(func)[0] + + +def get_callable_argspec(fn, no_self=False, _is_init=False): + """Return the argument signature for any callable. + + All pure-Python callables are accepted, including + functions, methods, classes, objects with __call__; + builtins and other edge cases like functools.partial() objects + raise a TypeError. + + """ + if inspect.isbuiltin(fn): + raise TypeError("Can't inspect builtin: %s" % fn) + elif inspect.isfunction(fn): + if _is_init and no_self: + spec = compat.inspect_getargspec(fn) + return compat.ArgSpec(spec.args[1:], spec.varargs, + spec.keywords, spec.defaults) + else: + return compat.inspect_getargspec(fn) + elif inspect.ismethod(fn): + if no_self and (_is_init or fn.__self__): + spec = compat.inspect_getargspec(fn.__func__) + return compat.ArgSpec(spec.args[1:], spec.varargs, + spec.keywords, spec.defaults) + else: + return compat.inspect_getargspec(fn.__func__) + elif inspect.isclass(fn): + return get_callable_argspec( + fn.__init__, no_self=no_self, _is_init=True) + elif hasattr(fn, '__func__'): + return compat.inspect_getargspec(fn.__func__) + elif hasattr(fn, '__call__'): + if inspect.ismethod(fn.__call__): + return get_callable_argspec(fn.__call__, no_self=no_self) + else: + raise TypeError("Can't inspect callable: %s" % fn) + else: + raise TypeError("Can't inspect callable: %s" % fn) + + +def format_argspec_plus(fn, grouped=True): + """Returns a dictionary of formatted, introspected function arguments. + + A enhanced variant of inspect.formatargspec to support code generation. + + fn + An inspectable callable or tuple of inspect getargspec() results. + grouped + Defaults to True; include (parens, around, argument) lists + + Returns: + + args + Full inspect.formatargspec for fn + self_arg + The name of the first positional argument, varargs[0], or None + if the function defines no positional arguments. + apply_pos + args, re-written in calling rather than receiving syntax. Arguments are + passed positionally. + apply_kw + Like apply_pos, except keyword-ish args are passed as keywords. + + Example:: + + >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) + {'args': '(self, a, b, c=3, **d)', + 'self_arg': 'self', + 'apply_kw': '(self, a, b, c=c, **d)', + 'apply_pos': '(self, a, b, c, **d)'} + + """ + if compat.callable(fn): + spec = compat.inspect_getfullargspec(fn) + else: + # we accept an existing argspec... + spec = fn + args = inspect.formatargspec(*spec) + if spec[0]: + self_arg = spec[0][0] + elif spec[1]: + self_arg = '%s[0]' % spec[1] + else: + self_arg = None + + if compat.py3k: + apply_pos = inspect.formatargspec(spec[0], spec[1], + spec[2], None, spec[4]) + num_defaults = 0 + if spec[3]: + num_defaults += len(spec[3]) + if spec[4]: + num_defaults += len(spec[4]) + name_args = spec[0] + spec[4] + else: + apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) + num_defaults = 0 + if spec[3]: + num_defaults += len(spec[3]) + name_args = spec[0] + + if num_defaults: + defaulted_vals = name_args[0 - num_defaults:] + else: + defaulted_vals = () + + apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], + defaulted_vals, + formatvalue=lambda x: '=' + x) + if grouped: + return dict(args=args, self_arg=self_arg, + apply_pos=apply_pos, apply_kw=apply_kw) + else: + return dict(args=args[1:-1], self_arg=self_arg, + apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1]) + + +def format_argspec_init(method, grouped=True): + """format_argspec_plus with considerations for typical __init__ methods + + Wraps format_argspec_plus with error handling strategies for typical + __init__ cases:: + + object.__init__ -> (self) + other unreflectable (usually C) -> (self, *args, **kwargs) + + """ + if method is object.__init__: + args = grouped and '(self)' or 'self' + else: + try: + return format_argspec_plus(method, grouped=grouped) + except TypeError: + args = (grouped and '(self, *args, **kwargs)' + or 'self, *args, **kwargs') + return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) + + +def getargspec_init(method): + """inspect.getargspec with considerations for typical __init__ methods + + Wraps inspect.getargspec with error handling for typical __init__ cases:: + + object.__init__ -> (self) + other unreflectable (usually C) -> (self, *args, **kwargs) + + """ + try: + return compat.inspect_getargspec(method) + except TypeError: + if method is object.__init__: + return (['self'], None, None, None) + else: + return (['self'], 'args', 'kwargs', None) + + +def unbound_method_to_callable(func_or_cls): + """Adjust the incoming callable such that a 'self' argument is not + required. + + """ + + if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__: + return func_or_cls.__func__ + else: + return func_or_cls + + +def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()): + """Produce a __repr__() based on direct association of the __init__() + specification vs. same-named attributes present. + + """ + if to_inspect is None: + to_inspect = [obj] + else: + to_inspect = _collections.to_list(to_inspect) + + missing = object() + + pos_args = [] + kw_args = _collections.OrderedDict() + vargs = None + for i, insp in enumerate(to_inspect): + try: + (_args, _vargs, vkw, defaults) = \ + compat.inspect_getargspec(insp.__init__) + except TypeError: + continue + else: + default_len = defaults and len(defaults) or 0 + if i == 0: + if _vargs: + vargs = _vargs + if default_len: + pos_args.extend(_args[1:-default_len]) + else: + pos_args.extend(_args[1:]) + else: + kw_args.update([ + (arg, missing) for arg in _args[1:-default_len] + ]) + + if default_len: + kw_args.update([ + (arg, default) + for arg, default + in zip(_args[-default_len:], defaults) + ]) + output = [] + + output.extend(repr(getattr(obj, arg, None)) for arg in pos_args) + + if vargs is not None and hasattr(obj, vargs): + output.extend([repr(val) for val in getattr(obj, vargs)]) + + for arg, defval in kw_args.items(): + if arg in omit_kwarg: + continue + try: + val = getattr(obj, arg, missing) + if val is not missing and val != defval: + output.append('%s=%r' % (arg, val)) + except Exception: + pass + + if additional_kw: + for arg, defval in additional_kw: + try: + val = getattr(obj, arg, missing) + if val is not missing and val != defval: + output.append('%s=%r' % (arg, val)) + except Exception: + pass + + return "%s(%s)" % (obj.__class__.__name__, ", ".join(output)) + + +class portable_instancemethod(object): + """Turn an instancemethod into a (parent, name) pair + to produce a serializable callable. + + """ + + __slots__ = 'target', 'name', 'kwargs', '__weakref__' + + def __getstate__(self): + return {'target': self.target, 'name': self.name, + 'kwargs': self.kwargs} + + def __setstate__(self, state): + self.target = state['target'] + self.name = state['name'] + self.kwargs = state.get('kwargs', ()) + + def __init__(self, meth, kwargs=()): + self.target = meth.__self__ + self.name = meth.__name__ + self.kwargs = kwargs + + def __call__(self, *arg, **kw): + kw.update(self.kwargs) + return getattr(self.target, self.name)(*arg, **kw) + + +def class_hierarchy(cls): + """Return an unordered sequence of all classes related to cls. + + Traverses diamond hierarchies. + + Fibs slightly: subclasses of builtin types are not returned. Thus + class_hierarchy(class A(object)) returns (A, object), not A plus every + class systemwide that derives from object. + + Old-style classes are discarded and hierarchies rooted on them + will not be descended. + + """ + if compat.py2k: + if isinstance(cls, types.ClassType): + return list() + + hier = set([cls]) + process = list(cls.__mro__) + while process: + c = process.pop() + if compat.py2k: + if isinstance(c, types.ClassType): + continue + bases = (_ for _ in c.__bases__ + if _ not in hier and not isinstance(_, types.ClassType)) + else: + bases = (_ for _ in c.__bases__ if _ not in hier) + + for b in bases: + process.append(b) + hier.add(b) + + if compat.py3k: + if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'): + continue + else: + if c.__module__ == '__builtin__' or not hasattr( + c, '__subclasses__'): + continue + + for s in [_ for _ in c.__subclasses__() if _ not in hier]: + process.append(s) + hier.add(s) + return list(hier) + + +def iterate_attributes(cls): + """iterate all the keys and attributes associated + with a class, without using getattr(). + + Does not use getattr() so that class-sensitive + descriptors (i.e. property.__get__()) are not called. + + """ + keys = dir(cls) + for key in keys: + for c in cls.__mro__: + if key in c.__dict__: + yield (key, c.__dict__[key]) + break + + +def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None, + name='self.proxy', from_instance=None): + """Automates delegation of __specials__ for a proxying type.""" + + if only: + dunders = only + else: + if skip is None: + skip = ('__slots__', '__del__', '__getattribute__', + '__metaclass__', '__getstate__', '__setstate__') + dunders = [m for m in dir(from_cls) + if (m.startswith('__') and m.endswith('__') and + not hasattr(into_cls, m) and m not in skip)] + + for method in dunders: + try: + fn = getattr(from_cls, method) + if not hasattr(fn, '__call__'): + continue + fn = getattr(fn, 'im_func', fn) + except AttributeError: + continue + try: + spec = compat.inspect_getargspec(fn) + fn_args = inspect.formatargspec(spec[0]) + d_args = inspect.formatargspec(spec[0][1:]) + except TypeError: + fn_args = '(self, *args, **kw)' + d_args = '(*args, **kw)' + + py = ("def %(method)s%(fn_args)s: " + "return %(name)s.%(method)s%(d_args)s" % locals()) + + env = from_instance is not None and {name: from_instance} or {} + compat.exec_(py, env) + try: + env[method].__defaults__ = fn.__defaults__ + except AttributeError: + pass + setattr(into_cls, method, env[method]) + + +def methods_equivalent(meth1, meth2): + """Return True if the two methods are the same implementation.""" + + return getattr(meth1, '__func__', meth1) is getattr( + meth2, '__func__', meth2) + + +def as_interface(obj, cls=None, methods=None, required=None): + """Ensure basic interface compliance for an instance or dict of callables. + + Checks that ``obj`` implements public methods of ``cls`` or has members + listed in ``methods``. If ``required`` is not supplied, implementing at + least one interface method is sufficient. Methods present on ``obj`` that + are not in the interface are ignored. + + If ``obj`` is a dict and ``dict`` does not meet the interface + requirements, the keys of the dictionary are inspected. Keys present in + ``obj`` that are not in the interface will raise TypeErrors. + + Raises TypeError if ``obj`` does not meet the interface criteria. + + In all passing cases, an object with callable members is returned. In the + simple case, ``obj`` is returned as-is; if dict processing kicks in then + an anonymous class is returned. + + obj + A type, instance, or dictionary of callables. + cls + Optional, a type. All public methods of cls are considered the + interface. An ``obj`` instance of cls will always pass, ignoring + ``required``.. + methods + Optional, a sequence of method names to consider as the interface. + required + Optional, a sequence of mandatory implementations. If omitted, an + ``obj`` that provides at least one interface method is considered + sufficient. As a convenience, required may be a type, in which case + all public methods of the type are required. + + """ + if not cls and not methods: + raise TypeError('a class or collection of method names are required') + + if isinstance(cls, type) and isinstance(obj, cls): + return obj + + interface = set(methods or [m for m in dir(cls) if not m.startswith('_')]) + implemented = set(dir(obj)) + + complies = operator.ge + if isinstance(required, type): + required = interface + elif not required: + required = set() + complies = operator.gt + else: + required = set(required) + + if complies(implemented.intersection(interface), required): + return obj + + # No dict duck typing here. + if not isinstance(obj, dict): + qualifier = complies is operator.gt and 'any of' or 'all of' + raise TypeError("%r does not implement %s: %s" % ( + obj, qualifier, ', '.join(interface))) + + class AnonymousInterface(object): + """A callable-holding shell.""" + + if cls: + AnonymousInterface.__name__ = 'Anonymous' + cls.__name__ + found = set() + + for method, impl in dictlike_iteritems(obj): + if method not in interface: + raise TypeError("%r: unknown in this interface" % method) + if not compat.callable(impl): + raise TypeError("%r=%r is not callable" % (method, impl)) + setattr(AnonymousInterface, method, staticmethod(impl)) + found.add(method) + + if complies(found, required): + return AnonymousInterface + + raise TypeError("dictionary does not contain required keys %s" % + ', '.join(required - found)) + + +class memoized_property(object): + """A read-only @property that is only evaluated once.""" + + def __init__(self, fget, doc=None): + self.fget = fget + self.__doc__ = doc or fget.__doc__ + self.__name__ = fget.__name__ + + def __get__(self, obj, cls): + if obj is None: + return self + obj.__dict__[self.__name__] = result = self.fget(obj) + return result + + def _reset(self, obj): + memoized_property.reset(obj, self.__name__) + + @classmethod + def reset(cls, obj, name): + obj.__dict__.pop(name, None) + + +def memoized_instancemethod(fn): + """Decorate a method memoize its return value. + + Best applied to no-arg methods: memoization is not sensitive to + argument values, and will always return the same value even when + called with different arguments. + + """ + + def oneshot(self, *args, **kw): + result = fn(self, *args, **kw) + memo = lambda *a, **kw: result + memo.__name__ = fn.__name__ + memo.__doc__ = fn.__doc__ + self.__dict__[fn.__name__] = memo + return result + return update_wrapper(oneshot, fn) + + +class group_expirable_memoized_property(object): + """A family of @memoized_properties that can be expired in tandem.""" + + def __init__(self, attributes=()): + self.attributes = [] + if attributes: + self.attributes.extend(attributes) + + def expire_instance(self, instance): + """Expire all memoized properties for *instance*.""" + stash = instance.__dict__ + for attribute in self.attributes: + stash.pop(attribute, None) + + def __call__(self, fn): + self.attributes.append(fn.__name__) + return memoized_property(fn) + + def method(self, fn): + self.attributes.append(fn.__name__) + return memoized_instancemethod(fn) + + +class MemoizedSlots(object): + """Apply memoized items to an object using a __getattr__ scheme. + + This allows the functionality of memoized_property and + memoized_instancemethod to be available to a class using __slots__. + + """ + + __slots__ = () + + def _fallback_getattr(self, key): + raise AttributeError(key) + + def __getattr__(self, key): + if key.startswith('_memoized'): + raise AttributeError(key) + elif hasattr(self, '_memoized_attr_%s' % key): + value = getattr(self, '_memoized_attr_%s' % key)() + setattr(self, key, value) + return value + elif hasattr(self, '_memoized_method_%s' % key): + fn = getattr(self, '_memoized_method_%s' % key) + + def oneshot(*args, **kw): + result = fn(*args, **kw) + memo = lambda *a, **kw: result + memo.__name__ = fn.__name__ + memo.__doc__ = fn.__doc__ + setattr(self, key, memo) + return result + oneshot.__doc__ = fn.__doc__ + return oneshot + else: + return self._fallback_getattr(key) + + +def dependency_for(modulename): + def decorate(obj): + # TODO: would be nice to improve on this import silliness, + # unfortunately importlib doesn't work that great either + tokens = modulename.split(".") + mod = compat.import_( + ".".join(tokens[0:-1]), globals(), locals(), tokens[-1]) + mod = getattr(mod, tokens[-1]) + setattr(mod, obj.__name__, obj) + return obj + return decorate + + +class dependencies(object): + """Apply imported dependencies as arguments to a function. + + E.g.:: + + @util.dependencies( + "sqlalchemy.sql.widget", + "sqlalchemy.engine.default" + ); + def some_func(self, widget, default, arg1, arg2, **kw): + # ... + + Rationale is so that the impact of a dependency cycle can be + associated directly with the few functions that cause the cycle, + and not pollute the module-level namespace. + + """ + + def __init__(self, *deps): + self.import_deps = [] + for dep in deps: + tokens = dep.split(".") + self.import_deps.append( + dependencies._importlater( + ".".join(tokens[0:-1]), + tokens[-1] + ) + ) + + def __call__(self, fn): + import_deps = self.import_deps + spec = compat.inspect_getfullargspec(fn) + + spec_zero = list(spec[0]) + hasself = spec_zero[0] in ('self', 'cls') + + for i in range(len(import_deps)): + spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i + + inner_spec = format_argspec_plus(spec, grouped=False) + + for impname in import_deps: + del spec_zero[1 if hasself else 0] + spec[0][:] = spec_zero + + outer_spec = format_argspec_plus(spec, grouped=False) + + code = 'lambda %(args)s: fn(%(apply_kw)s)' % { + "args": outer_spec['args'], + "apply_kw": inner_spec['apply_kw'] + } + + decorated = eval(code, locals()) + decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ + return update_wrapper(decorated, fn) + + @classmethod + def resolve_all(cls, path): + for m in list(dependencies._unresolved): + if m._full_path.startswith(path): + m._resolve() + + _unresolved = set() + _by_key = {} + + class _importlater(object): + _unresolved = set() + + _by_key = {} + + def __new__(cls, path, addtl): + key = path + "." + addtl + if key in dependencies._by_key: + return dependencies._by_key[key] + else: + dependencies._by_key[key] = imp = object.__new__(cls) + return imp + + def __init__(self, path, addtl): + self._il_path = path + self._il_addtl = addtl + dependencies._unresolved.add(self) + + @property + def _full_path(self): + return self._il_path + "." + self._il_addtl + + @memoized_property + def module(self): + if self in dependencies._unresolved: + raise ImportError( + "importlater.resolve_all() hasn't " + "been called (this is %s %s)" + % (self._il_path, self._il_addtl)) + + return getattr(self._initial_import, self._il_addtl) + + def _resolve(self): + dependencies._unresolved.discard(self) + self._initial_import = compat.import_( + self._il_path, globals(), locals(), + [self._il_addtl]) + + def __getattr__(self, key): + if key == 'module': + raise ImportError("Could not resolve module %s" + % self._full_path) + try: + attr = getattr(self.module, key) + except AttributeError: + raise AttributeError( + "Module %s has no attribute '%s'" % + (self._full_path, key) + ) + self.__dict__[key] = attr + return attr + + +# from paste.deploy.converters +def asbool(obj): + if isinstance(obj, compat.string_types): + obj = obj.strip().lower() + if obj in ['true', 'yes', 'on', 'y', 't', '1']: + return True + elif obj in ['false', 'no', 'off', 'n', 'f', '0']: + return False + else: + raise ValueError("String is not true/false: %r" % obj) + return bool(obj) + + +def bool_or_str(*text): + """Return a callable that will evaluate a string as + boolean, or one of a set of "alternate" string values. + + """ + def bool_or_value(obj): + if obj in text: + return obj + else: + return asbool(obj) + return bool_or_value + + +def asint(value): + """Coerce to integer.""" + + if value is None: + return value + return int(value) + + +def coerce_kw_type(kw, key, type_, flexi_bool=True): + r"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if + necessary. If 'flexi_bool' is True, the string '0' is considered false + when coercing to boolean. + """ + + if key in kw and not isinstance(kw[key], type_) and kw[key] is not None: + if type_ is bool and flexi_bool: + kw[key] = asbool(kw[key]) + else: + kw[key] = type_(kw[key]) + + +def constructor_copy(obj, cls, *args, **kw): + """Instantiate cls using the __dict__ of obj as constructor arguments. + + Uses inspect to match the named arguments of ``cls``. + + """ + + names = get_cls_kwargs(cls) + kw.update( + (k, obj.__dict__[k]) for k in names.difference(kw) + if k in obj.__dict__) + return cls(*args, **kw) + + +def counter(): + """Return a threadsafe counter function.""" + + lock = compat.threading.Lock() + counter = itertools.count(1) + + # avoid the 2to3 "next" transformation... + def _next(): + lock.acquire() + try: + return next(counter) + finally: + lock.release() + + return _next + + +def duck_type_collection(specimen, default=None): + """Given an instance or class, guess if it is or is acting as one of + the basic collection types: list, set and dict. If the __emulates__ + property is present, return that preferentially. + """ + + if hasattr(specimen, '__emulates__'): + # canonicalize set vs sets.Set to a standard: the builtin set + if (specimen.__emulates__ is not None and + issubclass(specimen.__emulates__, set)): + return set + else: + return specimen.__emulates__ + + isa = isinstance(specimen, type) and issubclass or isinstance + if isa(specimen, list): + return list + elif isa(specimen, set): + return set + elif isa(specimen, dict): + return dict + + if hasattr(specimen, 'append'): + return list + elif hasattr(specimen, 'add'): + return set + elif hasattr(specimen, 'set'): + return dict + else: + return default + + +def assert_arg_type(arg, argtype, name): + if isinstance(arg, argtype): + return arg + else: + if isinstance(argtype, tuple): + raise exc.ArgumentError( + "Argument '%s' is expected to be one of type %s, got '%s'" % + (name, ' or '.join("'%s'" % a for a in argtype), type(arg))) + else: + raise exc.ArgumentError( + "Argument '%s' is expected to be of type '%s', got '%s'" % + (name, argtype, type(arg))) + + +def dictlike_iteritems(dictlike): + """Return a (key, value) iterator for almost any dict-like object.""" + + if compat.py3k: + if hasattr(dictlike, 'items'): + return list(dictlike.items()) + else: + if hasattr(dictlike, 'iteritems'): + return dictlike.iteritems() + elif hasattr(dictlike, 'items'): + return iter(dictlike.items()) + + getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None)) + if getter is None: + raise TypeError( + "Object '%r' is not dict-like" % dictlike) + + if hasattr(dictlike, 'iterkeys'): + def iterator(): + for key in dictlike.iterkeys(): + yield key, getter(key) + return iterator() + elif hasattr(dictlike, 'keys'): + return iter((key, getter(key)) for key in dictlike.keys()) + else: + raise TypeError( + "Object '%r' is not dict-like" % dictlike) + + +class classproperty(property): + """A decorator that behaves like @property except that operates + on classes rather than instances. + + The decorator is currently special when using the declarative + module, but note that the + :class:`~.sqlalchemy.ext.declarative.declared_attr` + decorator should be used for this purpose with declarative. + + """ + + def __init__(self, fget, *arg, **kw): + super(classproperty, self).__init__(fget, *arg, **kw) + self.__doc__ = fget.__doc__ + + def __get__(desc, self, cls): + return desc.fget(cls) + + +class hybridproperty(object): + def __init__(self, func): + self.func = func + + def __get__(self, instance, owner): + if instance is None: + clsval = self.func(owner) + clsval.__doc__ = self.func.__doc__ + return clsval + else: + return self.func(instance) + + +class hybridmethod(object): + """Decorate a function as cls- or instance- level.""" + + def __init__(self, func): + self.func = func + + def __get__(self, instance, owner): + if instance is None: + return self.func.__get__(owner, owner.__class__) + else: + return self.func.__get__(instance, owner) + + +class _symbol(int): + def __new__(self, name, doc=None, canonical=None): + """Construct a new named symbol.""" + assert isinstance(name, compat.string_types) + if canonical is None: + canonical = hash(name) + v = int.__new__(_symbol, canonical) + v.name = name + if doc: + v.__doc__ = doc + return v + + def __reduce__(self): + return symbol, (self.name, "x", int(self)) + + def __str__(self): + return repr(self) + + def __repr__(self): + return "symbol(%r)" % self.name + +_symbol.__name__ = 'symbol' + + +class symbol(object): + """A constant symbol. + + >>> symbol('foo') is symbol('foo') + True + >>> symbol('foo') + + + A slight refinement of the MAGICCOOKIE=object() pattern. The primary + advantage of symbol() is its repr(). They are also singletons. + + Repeated calls of symbol('name') will all return the same instance. + + The optional ``doc`` argument assigns to ``__doc__``. This + is strictly so that Sphinx autoattr picks up the docstring we want + (it doesn't appear to pick up the in-module docstring if the datamember + is in a different module - autoattribute also blows up completely). + If Sphinx fixes/improves this then we would no longer need + ``doc`` here. + + """ + symbols = {} + _lock = compat.threading.Lock() + + def __new__(cls, name, doc=None, canonical=None): + cls._lock.acquire() + try: + sym = cls.symbols.get(name) + if sym is None: + cls.symbols[name] = sym = _symbol(name, doc, canonical) + return sym + finally: + symbol._lock.release() + + +_creation_order = 1 + + +def set_creation_order(instance): + """Assign a '_creation_order' sequence to the given instance. + + This allows multiple instances to be sorted in order of creation + (typically within a single thread; the counter is not particularly + threadsafe). + + """ + global _creation_order + instance._creation_order = _creation_order + _creation_order += 1 + + +def warn_exception(func, *args, **kwargs): + """executes the given function, catches all exceptions and converts to + a warning. + + """ + try: + return func(*args, **kwargs) + except Exception: + warn("%s('%s') ignored" % sys.exc_info()[0:2]) + + +def ellipses_string(value, len_=25): + try: + if len(value) > len_: + return "%s..." % value[0:len_] + else: + return value + except TypeError: + return value + + +class _hash_limit_string(compat.text_type): + """A string subclass that can only be hashed on a maximum amount + of unique values. + + This is used for warnings so that we can send out parameterized warnings + without the __warningregistry__ of the module, or the non-overridable + "once" registry within warnings.py, overloading memory, + + + """ + def __new__(cls, value, num, args): + interpolated = (value % args) + \ + (" (this warning may be suppressed after %d occurrences)" % num) + self = super(_hash_limit_string, cls).__new__(cls, interpolated) + self._hash = hash("%s_%d" % (value, hash(interpolated) % num)) + return self + + def __hash__(self): + return self._hash + + def __eq__(self, other): + return hash(self) == hash(other) + + +def warn(msg): + """Issue a warning. + + If msg is a string, :class:`.exc.SAWarning` is used as + the category. + + """ + warnings.warn(msg, exc.SAWarning, stacklevel=2) + + +def warn_limited(msg, args): + """Issue a warning with a paramterized string, limiting the number + of registrations. + + """ + if args: + msg = _hash_limit_string(msg, 10, args) + warnings.warn(msg, exc.SAWarning, stacklevel=2) + + +def only_once(fn): + """Decorate the given function to be a no-op after it is called exactly + once.""" + + once = [fn] + + def go(*arg, **kw): + if once: + once_fn = once.pop() + return once_fn(*arg, **kw) + + return go + + +_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py') +_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)') + + +def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): + """Chop extraneous lines off beginning and end of a traceback. + + :param tb: + a list of traceback lines as returned by ``traceback.format_stack()`` + + :param exclude_prefix: + a regular expression object matching lines to skip at beginning of + ``tb`` + + :param exclude_suffix: + a regular expression object matching lines to skip at end of ``tb`` + """ + start = 0 + end = len(tb) - 1 + while start <= end and exclude_prefix.search(tb[start]): + start += 1 + while start <= end and exclude_suffix.search(tb[end]): + end -= 1 + return tb[start:end + 1] + +NoneType = type(None) + + +def attrsetter(attrname): + code = \ + "def set(obj, value):"\ + " obj.%s = value" % attrname + env = locals().copy() + exec(code, env) + return env['set'] + + +class EnsureKWArgType(type): + """Apply translation of functions to accept **kw arguments if they + don't already. + + """ + def __init__(cls, clsname, bases, clsdict): + fn_reg = cls.ensure_kwarg + if fn_reg: + for key in clsdict: + m = re.match(fn_reg, key) + if m: + fn = clsdict[key] + spec = compat.inspect_getargspec(fn) + if not spec.keywords: + clsdict[key] = wrapped = cls._wrap_w_kw(fn) + setattr(cls, key, wrapped) + super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict) + + def _wrap_w_kw(self, fn): + + def wrap(*arg, **kw): + return fn(*arg) + return update_wrapper(wrap, fn) + + +def wrap_callable(wrapper, fn): + """Augment functools.update_wrapper() to work with objects with + a ``__call__()`` method. + + :param fn: + object with __call__ method + + """ + if hasattr(fn, '__name__'): + return update_wrapper(wrapper, fn) + else: + _f = wrapper + _f.__name__ = fn.__class__.__name__ + if hasattr(fn, '__module__'): + _f.__module__ = fn.__module__ + + if hasattr(fn.__call__, '__doc__') and fn.__call__.__doc__: + _f.__doc__ = fn.__call__.__doc__ + elif fn.__doc__: + _f.__doc__ = fn.__doc__ + + return _f diff --git a/app/lib/sqlalchemy/util/queue.py b/app/lib/sqlalchemy/util/queue.py new file mode 100644 index 0000000..fc13f2b --- /dev/null +++ b/app/lib/sqlalchemy/util/queue.py @@ -0,0 +1,199 @@ +# util/queue.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""An adaptation of Py2.3/2.4's Queue module which supports reentrant +behavior, using RLock instead of Lock for its mutex object. The +Queue object is used exclusively by the sqlalchemy.pool.QueuePool +class. + +This is to support the connection pool's usage of weakref callbacks to return +connections to the underlying Queue, which can in extremely +rare cases be invoked within the ``get()`` method of the Queue itself, +producing a ``put()`` inside the ``get()`` and therefore a reentrant +condition. + +""" + +from collections import deque +from time import time as _time +from .compat import threading + + +__all__ = ['Empty', 'Full', 'Queue'] + + +class Empty(Exception): + "Exception raised by Queue.get(block=0)/get_nowait()." + + pass + + +class Full(Exception): + "Exception raised by Queue.put(block=0)/put_nowait()." + + pass + + +class Queue: + def __init__(self, maxsize=0): + """Initialize a queue object with a given maximum size. + + If `maxsize` is <= 0, the queue size is infinite. + """ + + self._init(maxsize) + # mutex must be held whenever the queue is mutating. All methods + # that acquire mutex must release it before returning. mutex + # is shared between the two conditions, so acquiring and + # releasing the conditions also acquires and releases mutex. + self.mutex = threading.RLock() + # Notify not_empty whenever an item is added to the queue; a + # thread waiting to get is notified then. + self.not_empty = threading.Condition(self.mutex) + # Notify not_full whenever an item is removed from the queue; + # a thread waiting to put is notified then. + self.not_full = threading.Condition(self.mutex) + + def qsize(self): + """Return the approximate size of the queue (not reliable!).""" + + self.mutex.acquire() + n = self._qsize() + self.mutex.release() + return n + + def empty(self): + """Return True if the queue is empty, False otherwise (not + reliable!).""" + + self.mutex.acquire() + n = self._empty() + self.mutex.release() + return n + + def full(self): + """Return True if the queue is full, False otherwise (not + reliable!).""" + + self.mutex.acquire() + n = self._full() + self.mutex.release() + return n + + def put(self, item, block=True, timeout=None): + """Put an item into the queue. + + If optional args `block` is True and `timeout` is None (the + default), block if necessary until a free slot is + available. If `timeout` is a positive number, it blocks at + most `timeout` seconds and raises the ``Full`` exception if no + free slot was available within that time. Otherwise (`block` + is false), put an item on the queue if a free slot is + immediately available, else raise the ``Full`` exception + (`timeout` is ignored in that case). + """ + + self.not_full.acquire() + try: + if not block: + if self._full(): + raise Full + elif timeout is None: + while self._full(): + self.not_full.wait() + else: + if timeout < 0: + raise ValueError("'timeout' must be a positive number") + endtime = _time() + timeout + while self._full(): + remaining = endtime - _time() + if remaining <= 0.0: + raise Full + self.not_full.wait(remaining) + self._put(item) + self.not_empty.notify() + finally: + self.not_full.release() + + def put_nowait(self, item): + """Put an item into the queue without blocking. + + Only enqueue the item if a free slot is immediately available. + Otherwise raise the ``Full`` exception. + """ + return self.put(item, False) + + def get(self, block=True, timeout=None): + """Remove and return an item from the queue. + + If optional args `block` is True and `timeout` is None (the + default), block if necessary until an item is available. If + `timeout` is a positive number, it blocks at most `timeout` + seconds and raises the ``Empty`` exception if no item was + available within that time. Otherwise (`block` is false), + return an item if one is immediately available, else raise the + ``Empty`` exception (`timeout` is ignored in that case). + """ + self.not_empty.acquire() + try: + if not block: + if self._empty(): + raise Empty + elif timeout is None: + while self._empty(): + self.not_empty.wait() + else: + if timeout < 0: + raise ValueError("'timeout' must be a positive number") + endtime = _time() + timeout + while self._empty(): + remaining = endtime - _time() + if remaining <= 0.0: + raise Empty + self.not_empty.wait(remaining) + item = self._get() + self.not_full.notify() + return item + finally: + self.not_empty.release() + + def get_nowait(self): + """Remove and return an item from the queue without blocking. + + Only get an item if one is immediately available. Otherwise + raise the ``Empty`` exception. + """ + + return self.get(False) + + # Override these methods to implement other queue organizations + # (e.g. stack or priority queue). + # These will only be called with appropriate locks held + + # Initialize the queue representation + def _init(self, maxsize): + self.maxsize = maxsize + self.queue = deque() + + def _qsize(self): + return len(self.queue) + + # Check whether the queue is empty + def _empty(self): + return not self.queue + + # Check whether the queue is full + def _full(self): + return self.maxsize > 0 and len(self.queue) == self.maxsize + + # Put a new item in the queue + def _put(self, item): + self.queue.append(item) + + # Get an item from the queue + def _get(self): + return self.queue.popleft() diff --git a/app/lib/sqlalchemy/util/topological.py b/app/lib/sqlalchemy/util/topological.py new file mode 100644 index 0000000..18476fc --- /dev/null +++ b/app/lib/sqlalchemy/util/topological.py @@ -0,0 +1,100 @@ +# util/topological.py +# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Topological sorting algorithms.""" + +from ..exc import CircularDependencyError +from .. import util + +__all__ = ['sort', 'sort_as_subsets', 'find_cycles'] + + +def sort_as_subsets(tuples, allitems, deterministic_order=False): + + edges = util.defaultdict(set) + for parent, child in tuples: + edges[child].add(parent) + + Set = util.OrderedSet if deterministic_order else set + + todo = Set(allitems) + + while todo: + output = Set() + for node in todo: + if todo.isdisjoint(edges[node]): + output.add(node) + + if not output: + raise CircularDependencyError( + "Circular dependency detected.", + find_cycles(tuples, allitems), + _gen_edges(edges) + ) + + todo.difference_update(output) + yield output + + +def sort(tuples, allitems, deterministic_order=False): + """sort the given list of items by dependency. + + 'tuples' is a list of tuples representing a partial ordering. + 'deterministic_order' keeps items within a dependency tier in list order. + """ + + for set_ in sort_as_subsets(tuples, allitems, deterministic_order): + for s in set_: + yield s + + +def find_cycles(tuples, allitems): + # adapted from: + # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html + + edges = util.defaultdict(set) + for parent, child in tuples: + edges[parent].add(child) + nodes_to_test = set(edges) + + output = set() + + # we'd like to find all nodes that are + # involved in cycles, so we do the full + # pass through the whole thing for each + # node in the original list. + + # we can go just through parent edge nodes. + # if a node is only a child and never a parent, + # by definition it can't be part of a cycle. same + # if it's not in the edges at all. + for node in nodes_to_test: + stack = [node] + todo = nodes_to_test.difference(stack) + while stack: + top = stack[-1] + for node in edges[top]: + if node in stack: + cyc = stack[stack.index(node):] + todo.difference_update(cyc) + output.update(cyc) + + if node in todo: + stack.append(node) + todo.remove(node) + break + else: + node = stack.pop() + return output + + +def _gen_edges(edges): + return set([ + (right, left) + for left in edges + for right in edges[left] + ]) diff --git a/app/lib/virtualenv-15.1.0.dist-info/DESCRIPTION.rst b/app/lib/virtualenv-15.1.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..ad8ff24 --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,66 @@ +Virtualenv +========== + +`Mailing list `_ | +`Issues `_ | +`Github `_ | +`PyPI `_ | +User IRC: #pypa +Dev IRC: #pypa-dev + +Introduction +------------ + +``virtualenv`` is a tool to create isolated Python environments. + +The basic problem being addressed is one of dependencies and versions, +and indirectly permissions. Imagine you have an application that +needs version 1 of LibFoo, but another application requires version +2. How can you use both these applications? If you install +everything into ``/usr/lib/python2.7/site-packages`` (or whatever your +platform's standard location is), it's easy to end up in a situation +where you unintentionally upgrade an application that shouldn't be +upgraded. + +Or more generally, what if you want to install an application *and +leave it be*? If an application works, any change in its libraries or +the versions of those libraries can break the application. + +Also, what if you can't install packages into the global +``site-packages`` directory? For instance, on a shared host. + +In all these cases, ``virtualenv`` can help you. It creates an +environment that has its own installation directories, that doesn't +share libraries with other virtualenv environments (and optionally +doesn't access the globally installed libraries either). + +.. comment: + +Release History +=============== + +15.1.0 (2016-11-15) +------------------- + +* Support Python 3.6. + +* Upgrade setuptools to 28.0.0. + +* Upgrade pip to 9.0.1. + +* Don't install pre-release versions of pip, setuptools, or wheel from PyPI. + + +15.0.3 (2016-08-05) +------------------- + +* Test for given python path actually being an executable *file*, #939 + +* Only search for copy actual existing Tcl/Tk directories (PR #937) + +* Generically search for correct Tcl/Tk version (PR #926, PR #933) + +* Upgrade setuptools to 22.0.5 + +`Full Changelog `_. + diff --git a/app/lib/virtualenv-15.1.0.dist-info/METADATA b/app/lib/virtualenv-15.1.0.dist-info/METADATA new file mode 100644 index 0000000..2484167 --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/METADATA @@ -0,0 +1,87 @@ +Metadata-Version: 2.0 +Name: virtualenv +Version: 15.1.0 +Summary: Virtual Python Environment builder +Home-page: https://virtualenv.pypa.io/ +Author: Jannis Leidel, Carl Meyer and Brian Rosner +Author-email: python-virtualenv@groups.google.com +License: MIT +Keywords: setuptools deployment installation distutils +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 + +Virtualenv +========== + +`Mailing list `_ | +`Issues `_ | +`Github `_ | +`PyPI `_ | +User IRC: #pypa +Dev IRC: #pypa-dev + +Introduction +------------ + +``virtualenv`` is a tool to create isolated Python environments. + +The basic problem being addressed is one of dependencies and versions, +and indirectly permissions. Imagine you have an application that +needs version 1 of LibFoo, but another application requires version +2. How can you use both these applications? If you install +everything into ``/usr/lib/python2.7/site-packages`` (or whatever your +platform's standard location is), it's easy to end up in a situation +where you unintentionally upgrade an application that shouldn't be +upgraded. + +Or more generally, what if you want to install an application *and +leave it be*? If an application works, any change in its libraries or +the versions of those libraries can break the application. + +Also, what if you can't install packages into the global +``site-packages`` directory? For instance, on a shared host. + +In all these cases, ``virtualenv`` can help you. It creates an +environment that has its own installation directories, that doesn't +share libraries with other virtualenv environments (and optionally +doesn't access the globally installed libraries either). + +.. comment: + +Release History +=============== + +15.1.0 (2016-11-15) +------------------- + +* Support Python 3.6. + +* Upgrade setuptools to 28.0.0. + +* Upgrade pip to 9.0.1. + +* Don't install pre-release versions of pip, setuptools, or wheel from PyPI. + + +15.0.3 (2016-08-05) +------------------- + +* Test for given python path actually being an executable *file*, #939 + +* Only search for copy actual existing Tcl/Tk directories (PR #937) + +* Generically search for correct Tcl/Tk version (PR #926, PR #933) + +* Upgrade setuptools to 22.0.5 + +`Full Changelog `_. + diff --git a/app/lib/virtualenv-15.1.0.dist-info/RECORD b/app/lib/virtualenv-15.1.0.dist-info/RECORD new file mode 100644 index 0000000..67af4b7 --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/RECORD @@ -0,0 +1,16 @@ +virtualenv.py,sha256=RLUdapOdsMdI08LNZpNQKHs0_Og2HlW7Eg5l2loaqwI,99021 +virtualenv-15.1.0.dist-info/DESCRIPTION.rst,sha256=dZqgZ9h9pM5HRQ2_sT6pGEfkRNJHGEganV-rqxllSaQ,2037 +virtualenv-15.1.0.dist-info/METADATA,sha256=voSnmgASVBxq3aLdXwfYx5aqt02HQS8c_BlNo55G8nY,2860 +virtualenv-15.1.0.dist-info/RECORD,, +virtualenv-15.1.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +virtualenv-15.1.0.dist-info/entry_points.txt,sha256=9dyCcxYzN9Qg_2rMGt3qrWqQcGmFoUr4bGIlTxHJaZY,48 +virtualenv-15.1.0.dist-info/metadata.json,sha256=Qh5IZHl-kW5tlAb93-8umlpXCEfxwhQ5cqrLNKrHcdk,1151 +virtualenv-15.1.0.dist-info/top_level.txt,sha256=S3o2o0EDuQgoAzL5fq3yJvmfaSzlnF8tVfqgTLxqj1k,30 +virtualenv_support/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +virtualenv_support/argparse-1.4.0-py2.py3-none-any.whl,sha256=wxZH7baf09RlqEfqMVfTe-0flfGXYLEaR6qRwEtmYxQ,23000 +virtualenv_support/pip-9.0.1-py2.py3-none-any.whl,sha256=aQt2LAqEYMMDwInV0L4DT7FaXqK3W99WX0BCH1Qv77A,1254803 +virtualenv_support/setuptools-28.8.0-py2.py3-none-any.whl,sha256=LlnQaseY784pxWfuDgaHUU78pSmmZbjzZM9JfNENIbI,472830 +virtualenv_support/wheel-0.29.0-py2.py3-none-any.whl,sha256=6oAz_JkFgE5lL3VHTTNBCgdATBp43TyUmmaGO9EFDr0,66878 +/var/folders/z7/wt4t05zs4q743qnrmn9c0wt80000gn/T/tmpndp9fnzu/bin/virtualenv,sha256=o8vBD1L202vfGMfDZPzzJlnmF6vG7CxkcN1Dhtr_kMw,257 +virtualenv_support/__pycache__/__init__.cpython-34.pyc,, +__pycache__/virtualenv.cpython-34.pyc,, diff --git a/app/lib/virtualenv-15.1.0.dist-info/WHEEL b/app/lib/virtualenv-15.1.0.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/app/lib/virtualenv-15.1.0.dist-info/entry_points.txt b/app/lib/virtualenv-15.1.0.dist-info/entry_points.txt new file mode 100644 index 0000000..60fab42 --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +virtualenv = virtualenv:main + diff --git a/app/lib/virtualenv-15.1.0.dist-info/metadata.json b/app/lib/virtualenv-15.1.0.dist-info/metadata.json new file mode 100644 index 0000000..aa8c602 --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.commands": {"wrap_console": {"virtualenv": "virtualenv:main"}}, "python.details": {"contacts": [{"email": "python-virtualenv@groups.google.com", "name": "Jannis Leidel, Carl Meyer and Brian Rosner", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://virtualenv.pypa.io/"}}, "python.exports": {"console_scripts": {"virtualenv": "virtualenv:main"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["setuptools", "deployment", "installation", "distutils"], "license": "MIT", "metadata_version": "2.0", "name": "virtualenv", "summary": "Virtual Python Environment builder", "test_requires": [{"requires": ["mock", "pytest"]}], "version": "15.1.0"} \ No newline at end of file diff --git a/app/lib/virtualenv-15.1.0.dist-info/top_level.txt b/app/lib/virtualenv-15.1.0.dist-info/top_level.txt new file mode 100644 index 0000000..5b49e4c --- /dev/null +++ b/app/lib/virtualenv-15.1.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +virtualenv +virtualenv_support diff --git a/app/lib/virtualenv.py b/app/lib/virtualenv.py new file mode 100644 index 0000000..42cd1f4 --- /dev/null +++ b/app/lib/virtualenv.py @@ -0,0 +1,2332 @@ +#!/usr/bin/env python +"""Create a "virtual" Python installation""" + +import os +import sys + +# If we are running in a new interpreter to create a virtualenv, +# we do NOT want paths from our existing location interfering with anything, +# So we remove this file's directory from sys.path - most likely to be +# the previous interpreter's site-packages. Solves #705, #763, #779 +if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'): + for path in sys.path[:]: + if os.path.realpath(os.path.dirname(__file__)) == os.path.realpath(path): + sys.path.remove(path) + +import base64 +import codecs +import optparse +import re +import shutil +import logging +import zlib +import errno +import glob +import distutils.sysconfig +import struct +import subprocess +import pkgutil +import tempfile +import textwrap +from distutils.util import strtobool +from os.path import join + +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + +__version__ = "15.1.0" +virtualenv_version = __version__ # legacy + +if sys.version_info < (2, 6): + print('ERROR: %s' % sys.exc_info()[1]) + print('ERROR: this script requires Python 2.6 or greater.') + sys.exit(101) + +try: + basestring +except NameError: + basestring = str + +py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1]) + +is_jython = sys.platform.startswith('java') +is_pypy = hasattr(sys, 'pypy_version_info') +is_win = (sys.platform == 'win32') +is_cygwin = (sys.platform == 'cygwin') +is_darwin = (sys.platform == 'darwin') +abiflags = getattr(sys, 'abiflags', '') + +user_dir = os.path.expanduser('~') +if is_win: + default_storage_dir = os.path.join(user_dir, 'virtualenv') +else: + default_storage_dir = os.path.join(user_dir, '.virtualenv') +default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini') + +if is_pypy: + expected_exe = 'pypy' +elif is_jython: + expected_exe = 'jython' +else: + expected_exe = 'python' + +# Return a mapping of version -> Python executable +# Only provided for Windows, where the information in the registry is used +if not is_win: + def get_installed_pythons(): + return {} +else: + try: + import winreg + except ImportError: + import _winreg as winreg + + def get_installed_pythons(): + try: + python_core = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, + "Software\\Python\\PythonCore") + except WindowsError: + # No registered Python installations + return {} + i = 0 + versions = [] + while True: + try: + versions.append(winreg.EnumKey(python_core, i)) + i = i + 1 + except WindowsError: + break + exes = dict() + for ver in versions: + try: + path = winreg.QueryValue(python_core, "%s\\InstallPath" % ver) + except WindowsError: + continue + exes[ver] = join(path, "python.exe") + + winreg.CloseKey(python_core) + + # Add the major versions + # Sort the keys, then repeatedly update the major version entry + # Last executable (i.e., highest version) wins with this approach + for ver in sorted(exes): + exes[ver[0]] = exes[ver] + + return exes + +REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath', + 'fnmatch', 'locale', 'encodings', 'codecs', + 'stat', 'UserDict', 'readline', 'copy_reg', 'types', + 're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile', + 'zlib'] + +REQUIRED_FILES = ['lib-dynload', 'config'] + +majver, minver = sys.version_info[:2] +if majver == 2: + if minver >= 6: + REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc']) + if minver >= 7: + REQUIRED_MODULES.extend(['_weakrefset']) +elif majver == 3: + # Some extra modules are needed for Python 3, but different ones + # for different versions. + REQUIRED_MODULES.extend([ + '_abcoll', 'warnings', 'linecache', 'abc', 'io', '_weakrefset', + 'copyreg', 'tempfile', 'random', '__future__', 'collections', + 'keyword', 'tarfile', 'shutil', 'struct', 'copy', 'tokenize', + 'token', 'functools', 'heapq', 'bisect', 'weakref', 'reprlib' + ]) + if minver >= 2: + REQUIRED_FILES[-1] = 'config-%s' % majver + if minver >= 3: + import sysconfig + platdir = sysconfig.get_config_var('PLATDIR') + REQUIRED_FILES.append(platdir) + REQUIRED_MODULES.extend([ + 'base64', '_dummy_thread', 'hashlib', 'hmac', + 'imp', 'importlib', 'rlcompleter' + ]) + if minver >= 4: + REQUIRED_MODULES.extend([ + 'operator', + '_collections_abc', + '_bootlocale', + ]) + if minver >= 6: + REQUIRED_MODULES.extend(['enum']) + +if is_pypy: + # these are needed to correctly display the exceptions that may happen + # during the bootstrap + REQUIRED_MODULES.extend(['traceback', 'linecache']) + + if majver == 3: + # _functools is needed to import locale during stdio initialization and + # needs to be copied on PyPy because it's not built in + REQUIRED_MODULES.append('_functools') + + +class Logger(object): + + """ + Logging object for use in command-line script. Allows ranges of + levels, to avoid some redundancy of displayed information. + """ + + DEBUG = logging.DEBUG + INFO = logging.INFO + NOTIFY = (logging.INFO+logging.WARN)/2 + WARN = WARNING = logging.WARN + ERROR = logging.ERROR + FATAL = logging.FATAL + + LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL] + + def __init__(self, consumers): + self.consumers = consumers + self.indent = 0 + self.in_progress = None + self.in_progress_hanging = False + + def debug(self, msg, *args, **kw): + self.log(self.DEBUG, msg, *args, **kw) + + def info(self, msg, *args, **kw): + self.log(self.INFO, msg, *args, **kw) + + def notify(self, msg, *args, **kw): + self.log(self.NOTIFY, msg, *args, **kw) + + def warn(self, msg, *args, **kw): + self.log(self.WARN, msg, *args, **kw) + + def error(self, msg, *args, **kw): + self.log(self.ERROR, msg, *args, **kw) + + def fatal(self, msg, *args, **kw): + self.log(self.FATAL, msg, *args, **kw) + + def log(self, level, msg, *args, **kw): + if args: + if kw: + raise TypeError( + "You may give positional or keyword arguments, not both") + args = args or kw + rendered = None + for consumer_level, consumer in self.consumers: + if self.level_matches(level, consumer_level): + if (self.in_progress_hanging + and consumer in (sys.stdout, sys.stderr)): + self.in_progress_hanging = False + sys.stdout.write('\n') + sys.stdout.flush() + if rendered is None: + if args: + rendered = msg % args + else: + rendered = msg + rendered = ' '*self.indent + rendered + if hasattr(consumer, 'write'): + consumer.write(rendered+'\n') + else: + consumer(rendered) + + def start_progress(self, msg): + assert not self.in_progress, ( + "Tried to start_progress(%r) while in_progress %r" + % (msg, self.in_progress)) + if self.level_matches(self.NOTIFY, self._stdout_level()): + sys.stdout.write(msg) + sys.stdout.flush() + self.in_progress_hanging = True + else: + self.in_progress_hanging = False + self.in_progress = msg + + def end_progress(self, msg='done.'): + assert self.in_progress, ( + "Tried to end_progress without start_progress") + if self.stdout_level_matches(self.NOTIFY): + if not self.in_progress_hanging: + # Some message has been printed out since start_progress + sys.stdout.write('...' + self.in_progress + msg + '\n') + sys.stdout.flush() + else: + sys.stdout.write(msg + '\n') + sys.stdout.flush() + self.in_progress = None + self.in_progress_hanging = False + + def show_progress(self): + """If we are in a progress scope, and no log messages have been + shown, write out another '.'""" + if self.in_progress_hanging: + sys.stdout.write('.') + sys.stdout.flush() + + def stdout_level_matches(self, level): + """Returns true if a message at this level will go to stdout""" + return self.level_matches(level, self._stdout_level()) + + def _stdout_level(self): + """Returns the level that stdout runs at""" + for level, consumer in self.consumers: + if consumer is sys.stdout: + return level + return self.FATAL + + def level_matches(self, level, consumer_level): + """ + >>> l = Logger([]) + >>> l.level_matches(3, 4) + False + >>> l.level_matches(3, 2) + True + >>> l.level_matches(slice(None, 3), 3) + False + >>> l.level_matches(slice(None, 3), 2) + True + >>> l.level_matches(slice(1, 3), 1) + True + >>> l.level_matches(slice(2, 3), 1) + False + """ + if isinstance(level, slice): + start, stop = level.start, level.stop + if start is not None and start > consumer_level: + return False + if stop is not None and stop <= consumer_level: + return False + return True + else: + return level >= consumer_level + + @classmethod + def level_for_integer(cls, level): + levels = cls.LEVELS + if level < 0: + return levels[0] + if level >= len(levels): + return levels[-1] + return levels[level] + +# create a silent logger just to prevent this from being undefined +# will be overridden with requested verbosity main() is called. +logger = Logger([(Logger.LEVELS[-1], sys.stdout)]) + +def mkdir(path): + if not os.path.exists(path): + logger.info('Creating %s', path) + os.makedirs(path) + else: + logger.info('Directory %s already exists', path) + +def copyfileordir(src, dest, symlink=True): + if os.path.isdir(src): + shutil.copytree(src, dest, symlink) + else: + shutil.copy2(src, dest) + +def copyfile(src, dest, symlink=True): + if not os.path.exists(src): + # Some bad symlink in the src + logger.warn('Cannot find file %s (bad symlink)', src) + return + if os.path.exists(dest): + logger.debug('File %s already exists', dest) + return + if not os.path.exists(os.path.dirname(dest)): + logger.info('Creating parent directories for %s', os.path.dirname(dest)) + os.makedirs(os.path.dirname(dest)) + if not os.path.islink(src): + srcpath = os.path.abspath(src) + else: + srcpath = os.readlink(src) + if symlink and hasattr(os, 'symlink') and not is_win: + logger.info('Symlinking %s', dest) + try: + os.symlink(srcpath, dest) + except (OSError, NotImplementedError): + logger.info('Symlinking failed, copying to %s', dest) + copyfileordir(src, dest, symlink) + else: + logger.info('Copying to %s', dest) + copyfileordir(src, dest, symlink) + +def writefile(dest, content, overwrite=True): + if not os.path.exists(dest): + logger.info('Writing %s', dest) + with open(dest, 'wb') as f: + f.write(content.encode('utf-8')) + return + else: + with open(dest, 'rb') as f: + c = f.read() + if c != content.encode("utf-8"): + if not overwrite: + logger.notify('File %s exists with different content; not overwriting', dest) + return + logger.notify('Overwriting %s with new content', dest) + with open(dest, 'wb') as f: + f.write(content.encode('utf-8')) + else: + logger.info('Content %s already in place', dest) + +def rmtree(dir): + if os.path.exists(dir): + logger.notify('Deleting tree %s', dir) + shutil.rmtree(dir) + else: + logger.info('Do not need to delete %s; already gone', dir) + +def make_exe(fn): + if hasattr(os, 'chmod'): + oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777 + newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777 + os.chmod(fn, newmode) + logger.info('Changed mode of %s to %s', fn, oct(newmode)) + +def _find_file(filename, dirs): + for dir in reversed(dirs): + files = glob.glob(os.path.join(dir, filename)) + if files and os.path.isfile(files[0]): + return True, files[0] + return False, filename + +def file_search_dirs(): + here = os.path.dirname(os.path.abspath(__file__)) + dirs = [here, join(here, 'virtualenv_support')] + if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv': + # Probably some boot script; just in case virtualenv is installed... + try: + import virtualenv + except ImportError: + pass + else: + dirs.append(os.path.join( + os.path.dirname(virtualenv.__file__), 'virtualenv_support')) + return [d for d in dirs if os.path.isdir(d)] + + +class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter): + """ + Custom help formatter for use in ConfigOptionParser that updates + the defaults before expanding them, allowing them to show up correctly + in the help listing + """ + def expand_default(self, option): + if self.parser is not None: + self.parser.update_defaults(self.parser.defaults) + return optparse.IndentedHelpFormatter.expand_default(self, option) + + +class ConfigOptionParser(optparse.OptionParser): + """ + Custom option parser which updates its defaults by checking the + configuration files and environmental variables + """ + def __init__(self, *args, **kwargs): + self.config = ConfigParser.RawConfigParser() + self.files = self.get_config_files() + self.config.read(self.files) + optparse.OptionParser.__init__(self, *args, **kwargs) + + def get_config_files(self): + config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False) + if config_file and os.path.exists(config_file): + return [config_file] + return [default_config_file] + + def update_defaults(self, defaults): + """ + Updates the given defaults with values from the config files and + the environ. Does a little special handling for certain types of + options (lists). + """ + # Then go and look for the other sources of configuration: + config = {} + # 1. config files + config.update(dict(self.get_config_section('virtualenv'))) + # 2. environmental variables + config.update(dict(self.get_environ_vars())) + # Then set the options with those values + for key, val in config.items(): + key = key.replace('_', '-') + if not key.startswith('--'): + key = '--%s' % key # only prefer long opts + option = self.get_option(key) + if option is not None: + # ignore empty values + if not val: + continue + # handle multiline configs + if option.action == 'append': + val = val.split() + else: + option.nargs = 1 + if option.action == 'store_false': + val = not strtobool(val) + elif option.action in ('store_true', 'count'): + val = strtobool(val) + try: + val = option.convert_value(key, val) + except optparse.OptionValueError: + e = sys.exc_info()[1] + print("An error occurred during configuration: %s" % e) + sys.exit(3) + defaults[option.dest] = val + return defaults + + def get_config_section(self, name): + """ + Get a section of a configuration + """ + if self.config.has_section(name): + return self.config.items(name) + return [] + + def get_environ_vars(self, prefix='VIRTUALENV_'): + """ + Returns a generator with all environmental vars with prefix VIRTUALENV + """ + for key, val in os.environ.items(): + if key.startswith(prefix): + yield (key.replace(prefix, '').lower(), val) + + def get_default_values(self): + """ + Overridding to make updating the defaults after instantiation of + the option parser possible, update_defaults() does the dirty work. + """ + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return optparse.Values(self.defaults) + + defaults = self.update_defaults(self.defaults.copy()) # ours + for option in self._get_all_options(): + default = defaults.get(option.dest) + if isinstance(default, basestring): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + return optparse.Values(defaults) + + +def main(): + parser = ConfigOptionParser( + version=virtualenv_version, + usage="%prog [OPTIONS] DEST_DIR", + formatter=UpdatingDefaultsHelpFormatter()) + + parser.add_option( + '-v', '--verbose', + action='count', + dest='verbose', + default=0, + help="Increase verbosity.") + + parser.add_option( + '-q', '--quiet', + action='count', + dest='quiet', + default=0, + help='Decrease verbosity.') + + parser.add_option( + '-p', '--python', + dest='python', + metavar='PYTHON_EXE', + help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 ' + 'interpreter to create the new environment. The default is the interpreter that ' + 'virtualenv was installed with (%s)' % sys.executable) + + parser.add_option( + '--clear', + dest='clear', + action='store_true', + help="Clear out the non-root install and start from scratch.") + + parser.set_defaults(system_site_packages=False) + parser.add_option( + '--no-site-packages', + dest='system_site_packages', + action='store_false', + help="DEPRECATED. Retained only for backward compatibility. " + "Not having access to global site-packages is now the default behavior.") + + parser.add_option( + '--system-site-packages', + dest='system_site_packages', + action='store_true', + help="Give the virtual environment access to the global site-packages.") + + parser.add_option( + '--always-copy', + dest='symlink', + action='store_false', + default=True, + help="Always copy files rather than symlinking.") + + parser.add_option( + '--unzip-setuptools', + dest='unzip_setuptools', + action='store_true', + help="Unzip Setuptools when installing it.") + + parser.add_option( + '--relocatable', + dest='relocatable', + action='store_true', + help='Make an EXISTING virtualenv environment relocatable. ' + 'This fixes up scripts and makes all .pth files relative.') + + parser.add_option( + '--no-setuptools', + dest='no_setuptools', + action='store_true', + help='Do not install setuptools in the new virtualenv.') + + parser.add_option( + '--no-pip', + dest='no_pip', + action='store_true', + help='Do not install pip in the new virtualenv.') + + parser.add_option( + '--no-wheel', + dest='no_wheel', + action='store_true', + help='Do not install wheel in the new virtualenv.') + + default_search_dirs = file_search_dirs() + parser.add_option( + '--extra-search-dir', + dest="search_dirs", + action="append", + metavar='DIR', + default=default_search_dirs, + help="Directory to look for setuptools/pip distributions in. " + "This option can be used multiple times.") + + parser.add_option( + "--download", + dest="download", + default=True, + action="store_true", + help="Download preinstalled packages from PyPI.", + ) + + parser.add_option( + "--no-download", + '--never-download', + dest="download", + action="store_false", + help="Do not download preinstalled packages from PyPI.", + ) + + parser.add_option( + '--prompt', + dest='prompt', + help='Provides an alternative prompt prefix for this environment.') + + parser.add_option( + '--setuptools', + dest='setuptools', + action='store_true', + help="DEPRECATED. Retained only for backward compatibility. This option has no effect.") + + parser.add_option( + '--distribute', + dest='distribute', + action='store_true', + help="DEPRECATED. Retained only for backward compatibility. This option has no effect.") + + if 'extend_parser' in globals(): + extend_parser(parser) + + options, args = parser.parse_args() + + global logger + + if 'adjust_options' in globals(): + adjust_options(options, args) + + verbosity = options.verbose - options.quiet + logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)]) + + if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'): + env = os.environ.copy() + interpreter = resolve_interpreter(options.python) + if interpreter == sys.executable: + logger.warn('Already using interpreter %s' % interpreter) + else: + logger.notify('Running virtualenv with interpreter %s' % interpreter) + env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true' + file = __file__ + if file.endswith('.pyc'): + file = file[:-1] + popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env) + raise SystemExit(popen.wait()) + + if not args: + print('You must provide a DEST_DIR') + parser.print_help() + sys.exit(2) + if len(args) > 1: + print('There must be only one argument: DEST_DIR (you gave %s)' % ( + ' '.join(args))) + parser.print_help() + sys.exit(2) + + home_dir = args[0] + + if os.path.exists(home_dir) and os.path.isfile(home_dir): + logger.fatal('ERROR: File already exists and is not a directory.') + logger.fatal('Please provide a different path or delete the file.') + sys.exit(3) + + if os.environ.get('WORKING_ENV'): + logger.fatal('ERROR: you cannot run virtualenv while in a workingenv') + logger.fatal('Please deactivate your workingenv, then re-run this script') + sys.exit(3) + + if 'PYTHONHOME' in os.environ: + logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it') + del os.environ['PYTHONHOME'] + + if options.relocatable: + make_environment_relocatable(home_dir) + return + + create_environment(home_dir, + site_packages=options.system_site_packages, + clear=options.clear, + unzip_setuptools=options.unzip_setuptools, + prompt=options.prompt, + search_dirs=options.search_dirs, + download=options.download, + no_setuptools=options.no_setuptools, + no_pip=options.no_pip, + no_wheel=options.no_wheel, + symlink=options.symlink) + if 'after_install' in globals(): + after_install(options, home_dir) + +def call_subprocess(cmd, show_stdout=True, + filter_stdout=None, cwd=None, + raise_on_returncode=True, extra_env=None, + remove_from_env=None, stdin=None): + cmd_parts = [] + for part in cmd: + if len(part) > 45: + part = part[:20]+"..."+part[-20:] + if ' ' in part or '\n' in part or '"' in part or "'" in part: + part = '"%s"' % part.replace('"', '\\"') + if hasattr(part, 'decode'): + try: + part = part.decode(sys.getdefaultencoding()) + except UnicodeDecodeError: + part = part.decode(sys.getfilesystemencoding()) + cmd_parts.append(part) + cmd_desc = ' '.join(cmd_parts) + if show_stdout: + stdout = None + else: + stdout = subprocess.PIPE + logger.debug("Running command %s" % cmd_desc) + if extra_env or remove_from_env: + env = os.environ.copy() + if extra_env: + env.update(extra_env) + if remove_from_env: + for varname in remove_from_env: + env.pop(varname, None) + else: + env = None + try: + proc = subprocess.Popen( + cmd, stderr=subprocess.STDOUT, + stdin=None if stdin is None else subprocess.PIPE, + stdout=stdout, + cwd=cwd, env=env) + except Exception: + e = sys.exc_info()[1] + logger.fatal( + "Error %s while executing command %s" % (e, cmd_desc)) + raise + all_output = [] + if stdout is not None: + if stdin is not None: + proc.stdin.write(stdin) + proc.stdin.close() + + stdout = proc.stdout + encoding = sys.getdefaultencoding() + fs_encoding = sys.getfilesystemencoding() + while 1: + line = stdout.readline() + try: + line = line.decode(encoding) + except UnicodeDecodeError: + line = line.decode(fs_encoding) + if not line: + break + line = line.rstrip() + all_output.append(line) + if filter_stdout: + level = filter_stdout(line) + if isinstance(level, tuple): + level, line = level + logger.log(level, line) + if not logger.stdout_level_matches(level): + logger.show_progress() + else: + logger.info(line) + else: + proc.communicate(stdin) + proc.wait() + if proc.returncode: + if raise_on_returncode: + if all_output: + logger.notify('Complete output from command %s:' % cmd_desc) + logger.notify('\n'.join(all_output) + '\n----------------------------------------') + raise OSError( + "Command %s failed with error code %s" + % (cmd_desc, proc.returncode)) + else: + logger.warn( + "Command %s had error code %s" + % (cmd_desc, proc.returncode)) + +def filter_install_output(line): + if line.strip().startswith('running'): + return Logger.INFO + return Logger.DEBUG + +def find_wheels(projects, search_dirs): + """Find wheels from which we can import PROJECTS. + + Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return + a list of the first wheel found for each PROJECT + """ + + wheels = [] + + # Look through SEARCH_DIRS for the first suitable wheel. Don't bother + # about version checking here, as this is simply to get something we can + # then use to install the correct version. + for project in projects: + for dirname in search_dirs: + # This relies on only having "universal" wheels available. + # The pattern could be tightened to require -py2.py3-none-any.whl. + files = glob.glob(os.path.join(dirname, project + '-*.whl')) + if files: + wheels.append(os.path.abspath(files[0])) + break + else: + # We're out of luck, so quit with a suitable error + logger.fatal('Cannot find a wheel for %s' % (project,)) + + return wheels + +def install_wheel(project_names, py_executable, search_dirs=None, + download=False): + if search_dirs is None: + search_dirs = file_search_dirs() + + wheels = find_wheels(['setuptools', 'pip'], search_dirs) + pythonpath = os.pathsep.join(wheels) + + # PIP_FIND_LINKS uses space as the path separator and thus cannot have paths + # with spaces in them. Convert any of those to local file:// URL form. + try: + from urlparse import urljoin + from urllib import pathname2url + except ImportError: + from urllib.parse import urljoin + from urllib.request import pathname2url + def space_path2url(p): + if ' ' not in p: + return p + return urljoin('file:', pathname2url(os.path.abspath(p))) + findlinks = ' '.join(space_path2url(d) for d in search_dirs) + + SCRIPT = textwrap.dedent(""" + import sys + import pkgutil + import tempfile + import os + + import pip + + cert_data = pkgutil.get_data("pip._vendor.requests", "cacert.pem") + if cert_data is not None: + cert_file = tempfile.NamedTemporaryFile(delete=False) + cert_file.write(cert_data) + cert_file.close() + else: + cert_file = None + + try: + args = ["install", "--ignore-installed"] + if cert_file is not None: + args += ["--cert", cert_file.name] + args += sys.argv[1:] + + sys.exit(pip.main(args)) + finally: + if cert_file is not None: + os.remove(cert_file.name) + """).encode("utf8") + + cmd = [py_executable, '-'] + project_names + logger.start_progress('Installing %s...' % (', '.join(project_names))) + logger.indent += 2 + + env = { + "PYTHONPATH": pythonpath, + "JYTHONPATH": pythonpath, # for Jython < 3.x + "PIP_FIND_LINKS": findlinks, + "PIP_USE_WHEEL": "1", + "PIP_ONLY_BINARY": ":all:", + "PIP_USER": "0", + } + + if not download: + env["PIP_NO_INDEX"] = "1" + + try: + call_subprocess(cmd, show_stdout=False, extra_env=env, stdin=SCRIPT) + finally: + logger.indent -= 2 + logger.end_progress() + + +def create_environment(home_dir, site_packages=False, clear=False, + unzip_setuptools=False, + prompt=None, search_dirs=None, download=False, + no_setuptools=False, no_pip=False, no_wheel=False, + symlink=True): + """ + Creates a new environment in ``home_dir``. + + If ``site_packages`` is true, then the global ``site-packages/`` + directory will be on the path. + + If ``clear`` is true (default False) then the environment will + first be cleared. + """ + home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir) + + py_executable = os.path.abspath(install_python( + home_dir, lib_dir, inc_dir, bin_dir, + site_packages=site_packages, clear=clear, symlink=symlink)) + + install_distutils(home_dir) + + to_install = [] + + if not no_setuptools: + to_install.append('setuptools') + + if not no_pip: + to_install.append('pip') + + if not no_wheel: + to_install.append('wheel') + + if to_install: + install_wheel( + to_install, + py_executable, + search_dirs, + download=download, + ) + + install_activate(home_dir, bin_dir, prompt) + + install_python_config(home_dir, bin_dir, prompt) + +def is_executable_file(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + +def path_locations(home_dir): + """Return the path locations for the environment (where libraries are, + where scripts go, etc)""" + home_dir = os.path.abspath(home_dir) + # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its + # prefix arg is broken: http://bugs.python.org/issue3386 + if is_win: + # Windows has lots of problems with executables with spaces in + # the name; this function will remove them (using the ~1 + # format): + mkdir(home_dir) + if ' ' in home_dir: + import ctypes + GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW + size = max(len(home_dir)+1, 256) + buf = ctypes.create_unicode_buffer(size) + try: + u = unicode + except NameError: + u = str + ret = GetShortPathName(u(home_dir), buf, size) + if not ret: + print('Error: the path "%s" has a space in it' % home_dir) + print('We could not determine the short pathname for it.') + print('Exiting.') + sys.exit(3) + home_dir = str(buf.value) + lib_dir = join(home_dir, 'Lib') + inc_dir = join(home_dir, 'Include') + bin_dir = join(home_dir, 'Scripts') + if is_jython: + lib_dir = join(home_dir, 'Lib') + inc_dir = join(home_dir, 'Include') + bin_dir = join(home_dir, 'bin') + elif is_pypy: + lib_dir = home_dir + inc_dir = join(home_dir, 'include') + bin_dir = join(home_dir, 'bin') + elif not is_win: + lib_dir = join(home_dir, 'lib', py_version) + inc_dir = join(home_dir, 'include', py_version + abiflags) + bin_dir = join(home_dir, 'bin') + return home_dir, lib_dir, inc_dir, bin_dir + + +def change_prefix(filename, dst_prefix): + prefixes = [sys.prefix] + + if is_darwin: + prefixes.extend(( + os.path.join("/Library/Python", sys.version[:3], "site-packages"), + os.path.join(sys.prefix, "Extras", "lib", "python"), + os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"), + # Python 2.6 no-frameworks + os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"), + # System Python 2.7 on OSX Mountain Lion + os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages"))) + + if hasattr(sys, 'real_prefix'): + prefixes.append(sys.real_prefix) + if hasattr(sys, 'base_prefix'): + prefixes.append(sys.base_prefix) + prefixes = list(map(os.path.expanduser, prefixes)) + prefixes = list(map(os.path.abspath, prefixes)) + # Check longer prefixes first so we don't split in the middle of a filename + prefixes = sorted(prefixes, key=len, reverse=True) + filename = os.path.abspath(filename) + # On Windows, make sure drive letter is uppercase + if is_win and filename[0] in 'abcdefghijklmnopqrstuvwxyz': + filename = filename[0].upper() + filename[1:] + for i, prefix in enumerate(prefixes): + if is_win and prefix[0] in 'abcdefghijklmnopqrstuvwxyz': + prefixes[i] = prefix[0].upper() + prefix[1:] + for src_prefix in prefixes: + if filename.startswith(src_prefix): + _, relpath = filename.split(src_prefix, 1) + if src_prefix != os.sep: # sys.prefix == "/" + assert relpath[0] == os.sep + relpath = relpath[1:] + return join(dst_prefix, relpath) + assert False, "Filename %s does not start with any of these prefixes: %s" % \ + (filename, prefixes) + +def copy_required_modules(dst_prefix, symlink): + import imp + + for modname in REQUIRED_MODULES: + if modname in sys.builtin_module_names: + logger.info("Ignoring built-in bootstrap module: %s" % modname) + continue + try: + f, filename, _ = imp.find_module(modname) + except ImportError: + logger.info("Cannot import bootstrap module: %s" % modname) + else: + if f is not None: + f.close() + # special-case custom readline.so on OS X, but not for pypy: + if modname == 'readline' and sys.platform == 'darwin' and not ( + is_pypy or filename.endswith(join('lib-dynload', 'readline.so'))): + dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so') + elif modname == 'readline' and sys.platform == 'win32': + # special-case for Windows, where readline is not a + # standard module, though it may have been installed in + # site-packages by a third-party package + pass + else: + dst_filename = change_prefix(filename, dst_prefix) + copyfile(filename, dst_filename, symlink) + if filename.endswith('.pyc'): + pyfile = filename[:-1] + if os.path.exists(pyfile): + copyfile(pyfile, dst_filename[:-1], symlink) + +def copy_tcltk(src, dest, symlink): + """ copy tcl/tk libraries on Windows (issue #93) """ + for libversion in '8.5', '8.6': + for libname in 'tcl', 'tk': + srcdir = join(src, 'tcl', libname + libversion) + destdir = join(dest, 'tcl', libname + libversion) + # Only copy the dirs from the above combinations that exist + if os.path.exists(srcdir) and not os.path.exists(destdir): + copyfileordir(srcdir, destdir, symlink) + + +def subst_path(prefix_path, prefix, home_dir): + prefix_path = os.path.normpath(prefix_path) + prefix = os.path.normpath(prefix) + home_dir = os.path.normpath(home_dir) + if not prefix_path.startswith(prefix): + logger.warn('Path not in prefix %r %r', prefix_path, prefix) + return + return prefix_path.replace(prefix, home_dir, 1) + + +def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear, symlink=True): + """Install just the base environment, no distutils patches etc""" + if sys.executable.startswith(bin_dir): + print('Please use the *system* python to run this script') + return + + if clear: + rmtree(lib_dir) + ## FIXME: why not delete it? + ## Maybe it should delete everything with #!/path/to/venv/python in it + logger.notify('Not deleting %s', bin_dir) + + if hasattr(sys, 'real_prefix'): + logger.notify('Using real prefix %r' % sys.real_prefix) + prefix = sys.real_prefix + elif hasattr(sys, 'base_prefix'): + logger.notify('Using base prefix %r' % sys.base_prefix) + prefix = sys.base_prefix + else: + prefix = sys.prefix + mkdir(lib_dir) + fix_lib64(lib_dir, symlink) + stdlib_dirs = [os.path.dirname(os.__file__)] + if is_win: + stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs')) + elif is_darwin: + stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages')) + if hasattr(os, 'symlink'): + logger.info('Symlinking Python bootstrap modules') + else: + logger.info('Copying Python bootstrap modules') + logger.indent += 2 + try: + # copy required files... + for stdlib_dir in stdlib_dirs: + if not os.path.isdir(stdlib_dir): + continue + for fn in os.listdir(stdlib_dir): + bn = os.path.splitext(fn)[0] + if fn != 'site-packages' and bn in REQUIRED_FILES: + copyfile(join(stdlib_dir, fn), join(lib_dir, fn), symlink) + # ...and modules + copy_required_modules(home_dir, symlink) + finally: + logger.indent -= 2 + # ...copy tcl/tk + if is_win: + copy_tcltk(prefix, home_dir, symlink) + mkdir(join(lib_dir, 'site-packages')) + import site + site_filename = site.__file__ + if site_filename.endswith('.pyc') or site_filename.endswith('.pyo'): + site_filename = site_filename[:-1] + elif site_filename.endswith('$py.class'): + site_filename = site_filename.replace('$py.class', '.py') + site_filename_dst = change_prefix(site_filename, home_dir) + site_dir = os.path.dirname(site_filename_dst) + writefile(site_filename_dst, SITE_PY) + writefile(join(site_dir, 'orig-prefix.txt'), prefix) + site_packages_filename = join(site_dir, 'no-global-site-packages.txt') + if not site_packages: + writefile(site_packages_filename, '') + + if is_pypy or is_win: + stdinc_dir = join(prefix, 'include') + else: + stdinc_dir = join(prefix, 'include', py_version + abiflags) + if os.path.exists(stdinc_dir): + copyfile(stdinc_dir, inc_dir, symlink) + else: + logger.debug('No include dir %s' % stdinc_dir) + + platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1) + if platinc_dir != stdinc_dir: + platinc_dest = distutils.sysconfig.get_python_inc( + plat_specific=1, prefix=home_dir) + if platinc_dir == platinc_dest: + # Do platinc_dest manually due to a CPython bug; + # not http://bugs.python.org/issue3386 but a close cousin + platinc_dest = subst_path(platinc_dir, prefix, home_dir) + if platinc_dest: + # PyPy's stdinc_dir and prefix are relative to the original binary + # (traversing virtualenvs), whereas the platinc_dir is relative to + # the inner virtualenv and ignores the prefix argument. + # This seems more evolved than designed. + copyfile(platinc_dir, platinc_dest, symlink) + + # pypy never uses exec_prefix, just ignore it + if sys.exec_prefix != prefix and not is_pypy: + if is_win: + exec_dir = join(sys.exec_prefix, 'lib') + elif is_jython: + exec_dir = join(sys.exec_prefix, 'Lib') + else: + exec_dir = join(sys.exec_prefix, 'lib', py_version) + for fn in os.listdir(exec_dir): + copyfile(join(exec_dir, fn), join(lib_dir, fn), symlink) + + if is_jython: + # Jython has either jython-dev.jar and javalib/ dir, or just + # jython.jar + for name in 'jython-dev.jar', 'javalib', 'jython.jar': + src = join(prefix, name) + if os.path.exists(src): + copyfile(src, join(home_dir, name), symlink) + # XXX: registry should always exist after Jython 2.5rc1 + src = join(prefix, 'registry') + if os.path.exists(src): + copyfile(src, join(home_dir, 'registry'), symlink=False) + copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'), + symlink=False) + + mkdir(bin_dir) + py_executable = join(bin_dir, os.path.basename(sys.executable)) + if 'Python.framework' in prefix: + # OS X framework builds cause validation to break + # https://github.com/pypa/virtualenv/issues/322 + if os.environ.get('__PYVENV_LAUNCHER__'): + del os.environ["__PYVENV_LAUNCHER__"] + if re.search(r'/Python(?:-32|-64)*$', py_executable): + # The name of the python executable is not quite what + # we want, rename it. + py_executable = os.path.join( + os.path.dirname(py_executable), 'python') + + logger.notify('New %s executable in %s', expected_exe, py_executable) + pcbuild_dir = os.path.dirname(sys.executable) + pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth') + if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')): + logger.notify('Detected python running from build directory %s', pcbuild_dir) + logger.notify('Writing .pth file linking to build directory for *.pyd files') + writefile(pyd_pth, pcbuild_dir) + else: + pcbuild_dir = None + if os.path.exists(pyd_pth): + logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth) + os.unlink(pyd_pth) + + if sys.executable != py_executable: + ## FIXME: could I just hard link? + executable = sys.executable + shutil.copyfile(executable, py_executable) + make_exe(py_executable) + if is_win or is_cygwin: + pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe') + if os.path.exists(pythonw): + logger.info('Also created pythonw.exe') + shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe')) + python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe') + python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe') + if os.path.exists(python_d): + logger.info('Also created python_d.exe') + shutil.copyfile(python_d, python_d_dest) + elif os.path.exists(python_d_dest): + logger.info('Removed python_d.exe as it is no longer at the source') + os.unlink(python_d_dest) + # we need to copy the DLL to enforce that windows will load the correct one. + # may not exist if we are cygwin. + py_executable_dll = 'python%s%s.dll' % ( + sys.version_info[0], sys.version_info[1]) + py_executable_dll_d = 'python%s%s_d.dll' % ( + sys.version_info[0], sys.version_info[1]) + pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll) + pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d) + pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d) + if os.path.exists(pythondll): + logger.info('Also created %s' % py_executable_dll) + shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll)) + if os.path.exists(pythondll_d): + logger.info('Also created %s' % py_executable_dll_d) + shutil.copyfile(pythondll_d, pythondll_d_dest) + elif os.path.exists(pythondll_d_dest): + logger.info('Removed %s as the source does not exist' % pythondll_d_dest) + os.unlink(pythondll_d_dest) + if is_pypy: + # make a symlink python --> pypy-c + python_executable = os.path.join(os.path.dirname(py_executable), 'python') + if sys.platform in ('win32', 'cygwin'): + python_executable += '.exe' + logger.info('Also created executable %s' % python_executable) + copyfile(py_executable, python_executable, symlink) + + if is_win: + for name in ['libexpat.dll', 'libpypy.dll', 'libpypy-c.dll', + 'libeay32.dll', 'ssleay32.dll', 'sqlite3.dll', + 'tcl85.dll', 'tk85.dll']: + src = join(prefix, name) + if os.path.exists(src): + copyfile(src, join(bin_dir, name), symlink) + + for d in sys.path: + if d.endswith('lib_pypy'): + break + else: + logger.fatal('Could not find lib_pypy in sys.path') + raise SystemExit(3) + logger.info('Copying lib_pypy') + copyfile(d, os.path.join(home_dir, 'lib_pypy'), symlink) + + if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe: + secondary_exe = os.path.join(os.path.dirname(py_executable), + expected_exe) + py_executable_ext = os.path.splitext(py_executable)[1] + if py_executable_ext.lower() == '.exe': + # python2.4 gives an extension of '.4' :P + secondary_exe += py_executable_ext + if os.path.exists(secondary_exe): + logger.warn('Not overwriting existing %s script %s (you must use %s)' + % (expected_exe, secondary_exe, py_executable)) + else: + logger.notify('Also creating executable in %s' % secondary_exe) + shutil.copyfile(sys.executable, secondary_exe) + make_exe(secondary_exe) + + if '.framework' in prefix: + if 'Python.framework' in prefix: + logger.debug('MacOSX Python framework detected') + # Make sure we use the embedded interpreter inside + # the framework, even if sys.executable points to + # the stub executable in ${sys.prefix}/bin + # See http://groups.google.com/group/python-virtualenv/ + # browse_thread/thread/17cab2f85da75951 + original_python = os.path.join( + prefix, 'Resources/Python.app/Contents/MacOS/Python') + if 'EPD' in prefix: + logger.debug('EPD framework detected') + original_python = os.path.join(prefix, 'bin/python') + shutil.copy(original_python, py_executable) + + # Copy the framework's dylib into the virtual + # environment + virtual_lib = os.path.join(home_dir, '.Python') + + if os.path.exists(virtual_lib): + os.unlink(virtual_lib) + copyfile( + os.path.join(prefix, 'Python'), + virtual_lib, + symlink) + + # And then change the install_name of the copied python executable + try: + mach_o_change(py_executable, + os.path.join(prefix, 'Python'), + '@executable_path/../.Python') + except: + e = sys.exc_info()[1] + logger.warn("Could not call mach_o_change: %s. " + "Trying to call install_name_tool instead." % e) + try: + call_subprocess( + ["install_name_tool", "-change", + os.path.join(prefix, 'Python'), + '@executable_path/../.Python', + py_executable]) + except: + logger.fatal("Could not call install_name_tool -- you must " + "have Apple's development tools installed") + raise + + if not is_win: + # Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist + py_exe_version_major = 'python%s' % sys.version_info[0] + py_exe_version_major_minor = 'python%s.%s' % ( + sys.version_info[0], sys.version_info[1]) + py_exe_no_version = 'python' + required_symlinks = [ py_exe_no_version, py_exe_version_major, + py_exe_version_major_minor ] + + py_executable_base = os.path.basename(py_executable) + + if py_executable_base in required_symlinks: + # Don't try to symlink to yourself. + required_symlinks.remove(py_executable_base) + + for pth in required_symlinks: + full_pth = join(bin_dir, pth) + if os.path.exists(full_pth): + os.unlink(full_pth) + if symlink: + os.symlink(py_executable_base, full_pth) + else: + copyfile(py_executable, full_pth, symlink) + + cmd = [py_executable, '-c', 'import sys;out=sys.stdout;' + 'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))'] + logger.info('Testing executable with %s %s "%s"' % tuple(cmd)) + try: + proc = subprocess.Popen(cmd, + stdout=subprocess.PIPE) + proc_stdout, proc_stderr = proc.communicate() + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.EACCES: + logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e)) + sys.exit(100) + else: + raise e + + proc_stdout = proc_stdout.strip().decode("utf-8") + proc_stdout = os.path.normcase(os.path.abspath(proc_stdout)) + norm_home_dir = os.path.normcase(os.path.abspath(home_dir)) + if hasattr(norm_home_dir, 'decode'): + norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding()) + if proc_stdout != norm_home_dir: + logger.fatal( + 'ERROR: The executable %s is not functioning' % py_executable) + logger.fatal( + 'ERROR: It thinks sys.prefix is %r (should be %r)' + % (proc_stdout, norm_home_dir)) + logger.fatal( + 'ERROR: virtualenv is not compatible with this system or executable') + if is_win: + logger.fatal( + 'Note: some Windows users have reported this error when they ' + 'installed Python for "Only this user" or have multiple ' + 'versions of Python installed. Copying the appropriate ' + 'PythonXX.dll to the virtualenv Scripts/ directory may fix ' + 'this problem.') + sys.exit(100) + else: + logger.info('Got sys.prefix result: %r' % proc_stdout) + + pydistutils = os.path.expanduser('~/.pydistutils.cfg') + if os.path.exists(pydistutils): + logger.notify('Please make sure you remove any previous custom paths from ' + 'your %s file.' % pydistutils) + ## FIXME: really this should be calculated earlier + + fix_local_scheme(home_dir, symlink) + + if site_packages: + if os.path.exists(site_packages_filename): + logger.info('Deleting %s' % site_packages_filename) + os.unlink(site_packages_filename) + + return py_executable + + +def install_activate(home_dir, bin_dir, prompt=None): + if is_win or is_jython and os._name == 'nt': + files = { + 'activate.bat': ACTIVATE_BAT, + 'deactivate.bat': DEACTIVATE_BAT, + 'activate.ps1': ACTIVATE_PS, + } + + # MSYS needs paths of the form /c/path/to/file + drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/')) + home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail) + + # Run-time conditional enables (basic) Cygwin compatibility + home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" % + (home_dir, home_dir_msys)) + files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh) + + else: + files = {'activate': ACTIVATE_SH} + + # suppling activate.fish in addition to, not instead of, the + # bash script support. + files['activate.fish'] = ACTIVATE_FISH + + # same for csh/tcsh support... + files['activate.csh'] = ACTIVATE_CSH + + files['activate_this.py'] = ACTIVATE_THIS + + install_files(home_dir, bin_dir, prompt, files) + +def install_files(home_dir, bin_dir, prompt, files): + if hasattr(home_dir, 'decode'): + home_dir = home_dir.decode(sys.getfilesystemencoding()) + vname = os.path.basename(home_dir) + for name, content in files.items(): + content = content.replace('__VIRTUAL_PROMPT__', prompt or '') + content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname) + content = content.replace('__VIRTUAL_ENV__', home_dir) + content = content.replace('__VIRTUAL_NAME__', vname) + content = content.replace('__BIN_NAME__', os.path.basename(bin_dir)) + writefile(os.path.join(bin_dir, name), content) + +def install_python_config(home_dir, bin_dir, prompt=None): + if sys.platform == 'win32' or is_jython and os._name == 'nt': + files = {} + else: + files = {'python-config': PYTHON_CONFIG} + install_files(home_dir, bin_dir, prompt, files) + for name, content in files.items(): + make_exe(os.path.join(bin_dir, name)) + +def install_distutils(home_dir): + distutils_path = change_prefix(distutils.__path__[0], home_dir) + mkdir(distutils_path) + ## FIXME: maybe this prefix setting should only be put in place if + ## there's a local distutils.cfg with a prefix setting? + home_dir = os.path.abspath(home_dir) + ## FIXME: this is breaking things, removing for now: + #distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir + writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT) + writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False) + +def fix_local_scheme(home_dir, symlink=True): + """ + Platforms that use the "posix_local" install scheme (like Ubuntu with + Python 2.7) need to be given an additional "local" location, sigh. + """ + try: + import sysconfig + except ImportError: + pass + else: + if sysconfig._get_default_scheme() == 'posix_local': + local_path = os.path.join(home_dir, 'local') + if not os.path.exists(local_path): + os.mkdir(local_path) + for subdir_name in os.listdir(home_dir): + if subdir_name == 'local': + continue + copyfile(os.path.abspath(os.path.join(home_dir, subdir_name)), \ + os.path.join(local_path, subdir_name), symlink) + +def fix_lib64(lib_dir, symlink=True): + """ + Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y + instead of lib/pythonX.Y. If this is such a platform we'll just create a + symlink so lib64 points to lib + """ + # PyPy's library path scheme is not affected by this. + # Return early or we will die on the following assert. + if is_pypy: + logger.debug('PyPy detected, skipping lib64 symlinking') + return + # Check we have a lib64 library path + if not [p for p in distutils.sysconfig.get_config_vars().values() + if isinstance(p, basestring) and 'lib64' in p]: + return + + logger.debug('This system uses lib64; symlinking lib64 to lib') + + assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], ( + "Unexpected python lib dir: %r" % lib_dir) + lib_parent = os.path.dirname(lib_dir) + top_level = os.path.dirname(lib_parent) + lib_dir = os.path.join(top_level, 'lib') + lib64_link = os.path.join(top_level, 'lib64') + assert os.path.basename(lib_parent) == 'lib', ( + "Unexpected parent dir: %r" % lib_parent) + if os.path.lexists(lib64_link): + return + if symlink: + os.symlink('lib', lib64_link) + else: + copyfile('lib', lib64_link) + +def resolve_interpreter(exe): + """ + If the executable given isn't an absolute path, search $PATH for the interpreter + """ + # If the "executable" is a version number, get the installed executable for + # that version + orig_exe = exe + python_versions = get_installed_pythons() + if exe in python_versions: + exe = python_versions[exe] + + if os.path.abspath(exe) != exe: + paths = os.environ.get('PATH', '').split(os.pathsep) + for path in paths: + if os.path.exists(join(path, exe)): + exe = join(path, exe) + break + if not os.path.exists(exe): + logger.fatal('The path %s (from --python=%s) does not exist' % (exe, orig_exe)) + raise SystemExit(3) + if not is_executable(exe): + logger.fatal('The path %s (from --python=%s) is not an executable file' % (exe, orig_exe)) + raise SystemExit(3) + return exe + +def is_executable(exe): + """Checks a file is executable""" + return os.path.isfile(exe) and os.access(exe, os.X_OK) + +############################################################ +## Relocating the environment: + +def make_environment_relocatable(home_dir): + """ + Makes the already-existing environment use relative paths, and takes out + the #!-based environment selection in scripts. + """ + home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir) + activate_this = os.path.join(bin_dir, 'activate_this.py') + if not os.path.exists(activate_this): + logger.fatal( + 'The environment doesn\'t have a file %s -- please re-run virtualenv ' + 'on this environment to update it' % activate_this) + fixup_scripts(home_dir, bin_dir) + fixup_pth_and_egg_link(home_dir) + ## FIXME: need to fix up distutils.cfg + +OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3], + 'activate', 'activate.bat', 'activate_this.py', + 'activate.fish', 'activate.csh'] + +def fixup_scripts(home_dir, bin_dir): + if is_win: + new_shebang_args = ( + '%s /c' % os.path.normcase(os.environ.get('COMSPEC', 'cmd.exe')), + '', '.exe') + else: + new_shebang_args = ('/usr/bin/env', sys.version[:3], '') + + # This is what we expect at the top of scripts: + shebang = '#!%s' % os.path.normcase(os.path.join( + os.path.abspath(bin_dir), 'python%s' % new_shebang_args[2])) + # This is what we'll put: + new_shebang = '#!%s python%s%s' % new_shebang_args + + for filename in os.listdir(bin_dir): + filename = os.path.join(bin_dir, filename) + if not os.path.isfile(filename): + # ignore subdirs, e.g. .svn ones. + continue + lines = None + with open(filename, 'rb') as f: + try: + lines = f.read().decode('utf-8').splitlines() + except UnicodeDecodeError: + # This is probably a binary program instead + # of a script, so just ignore it. + continue + if not lines: + logger.warn('Script %s is an empty file' % filename) + continue + + old_shebang = lines[0].strip() + old_shebang = old_shebang[0:2] + os.path.normcase(old_shebang[2:]) + + if not old_shebang.startswith(shebang): + if os.path.basename(filename) in OK_ABS_SCRIPTS: + logger.debug('Cannot make script %s relative' % filename) + elif lines[0].strip() == new_shebang: + logger.info('Script %s has already been made relative' % filename) + else: + logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)' + % (filename, shebang)) + continue + logger.notify('Making script %s relative' % filename) + script = relative_script([new_shebang] + lines[1:]) + with open(filename, 'wb') as f: + f.write('\n'.join(script).encode('utf-8')) + + +def relative_script(lines): + "Return a script that'll work in a relocatable environment." + activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this" + # Find the last future statement in the script. If we insert the activation + # line before a future statement, Python will raise a SyntaxError. + activate_at = None + for idx, line in reversed(list(enumerate(lines))): + if line.split()[:3] == ['from', '__future__', 'import']: + activate_at = idx + 1 + break + if activate_at is None: + # Activate after the shebang. + activate_at = 1 + return lines[:activate_at] + ['', activate, ''] + lines[activate_at:] + +def fixup_pth_and_egg_link(home_dir, sys_path=None): + """Makes .pth and .egg-link files use relative paths""" + home_dir = os.path.normcase(os.path.abspath(home_dir)) + if sys_path is None: + sys_path = sys.path + for path in sys_path: + if not path: + path = '.' + if not os.path.isdir(path): + continue + path = os.path.normcase(os.path.abspath(path)) + if not path.startswith(home_dir): + logger.debug('Skipping system (non-environment) directory %s' % path) + continue + for filename in os.listdir(path): + filename = os.path.join(path, filename) + if filename.endswith('.pth'): + if not os.access(filename, os.W_OK): + logger.warn('Cannot write .pth file %s, skipping' % filename) + else: + fixup_pth_file(filename) + if filename.endswith('.egg-link'): + if not os.access(filename, os.W_OK): + logger.warn('Cannot write .egg-link file %s, skipping' % filename) + else: + fixup_egg_link(filename) + +def fixup_pth_file(filename): + lines = [] + prev_lines = [] + with open(filename) as f: + prev_lines = f.readlines() + for line in prev_lines: + line = line.strip() + if (not line or line.startswith('#') or line.startswith('import ') + or os.path.abspath(line) != line): + lines.append(line) + else: + new_value = make_relative_path(filename, line) + if line != new_value: + logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename)) + lines.append(new_value) + if lines == prev_lines: + logger.info('No changes to .pth file %s' % filename) + return + logger.notify('Making paths in .pth file %s relative' % filename) + with open(filename, 'w') as f: + f.write('\n'.join(lines) + '\n') + +def fixup_egg_link(filename): + with open(filename) as f: + link = f.readline().strip() + if os.path.abspath(link) != link: + logger.debug('Link in %s already relative' % filename) + return + new_link = make_relative_path(filename, link) + logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link)) + with open(filename, 'w') as f: + f.write(new_link) + +def make_relative_path(source, dest, dest_is_directory=True): + """ + Make a filename relative, where the filename is dest, and it is + being referred to from the filename source. + + >>> make_relative_path('/usr/share/something/a-file.pth', + ... '/usr/share/another-place/src/Directory') + '../another-place/src/Directory' + >>> make_relative_path('/usr/share/something/a-file.pth', + ... '/home/user/src/Directory') + '../../../home/user/src/Directory' + >>> make_relative_path('/usr/share/a-file.pth', '/usr/share/') + './' + """ + source = os.path.dirname(source) + if not dest_is_directory: + dest_filename = os.path.basename(dest) + dest = os.path.dirname(dest) + dest = os.path.normpath(os.path.abspath(dest)) + source = os.path.normpath(os.path.abspath(source)) + dest_parts = dest.strip(os.path.sep).split(os.path.sep) + source_parts = source.strip(os.path.sep).split(os.path.sep) + while dest_parts and source_parts and dest_parts[0] == source_parts[0]: + dest_parts.pop(0) + source_parts.pop(0) + full_parts = ['..']*len(source_parts) + dest_parts + if not dest_is_directory: + full_parts.append(dest_filename) + if not full_parts: + # Special case for the current directory (otherwise it'd be '') + return './' + return os.path.sep.join(full_parts) + + + +############################################################ +## Bootstrap script creation: + +def create_bootstrap_script(extra_text, python_version=''): + """ + Creates a bootstrap script, which is like this script but with + extend_parser, adjust_options, and after_install hooks. + + This returns a string that (written to disk of course) can be used + as a bootstrap script with your own customizations. The script + will be the standard virtualenv.py script, with your extra text + added (your extra text should be Python code). + + If you include these functions, they will be called: + + ``extend_parser(optparse_parser)``: + You can add or remove options from the parser here. + + ``adjust_options(options, args)``: + You can change options here, or change the args (if you accept + different kinds of arguments, be sure you modify ``args`` so it is + only ``[DEST_DIR]``). + + ``after_install(options, home_dir)``: + + After everything is installed, this function is called. This + is probably the function you are most likely to use. An + example would be:: + + def after_install(options, home_dir): + subprocess.call([join(home_dir, 'bin', 'easy_install'), + 'MyPackage']) + subprocess.call([join(home_dir, 'bin', 'my-package-script'), + 'setup', home_dir]) + + This example immediately installs a package, and runs a setup + script from that package. + + If you provide something like ``python_version='2.5'`` then the + script will start with ``#!/usr/bin/env python2.5`` instead of + ``#!/usr/bin/env python``. You can use this when the script must + be run with a particular Python version. + """ + filename = __file__ + if filename.endswith('.pyc'): + filename = filename[:-1] + with codecs.open(filename, 'r', encoding='utf-8') as f: + content = f.read() + py_exe = 'python%s' % python_version + content = (('#!/usr/bin/env %s\n' % py_exe) + + '## WARNING: This file is generated\n' + + content) + return content.replace('##EXT' 'END##', extra_text) + +##EXTEND## + +def convert(s): + b = base64.b64decode(s.encode('ascii')) + return zlib.decompress(b).decode('utf-8') + +##file site.py +SITE_PY = convert(""" +eJzFPf1z2zaWv/OvwMqToZTKdOJ0e3tO3RsncVrfuYm3yc7m1vXoKAmyWFMkS5C2tTd3f/u9DwAE ++CHb2+6cphNLJPDw8PC+8PAeOhqNTopCZkuxyZd1KoWScblYiyKu1kqs8lJU66Rc7hdxWW3h6eIm +vpZKVLlQWxVhqygInv/GT/BcfF4nyqAA3+K6yjdxlSziNN2KZFPkZSWXYlmXSXYtkiypkjhN/g4t +8iwSz387BsFZJmDmaSJLcStLBXCVyFfiYlut80yM6wLn/DL6Y/xqMhVqUSZFBQ1KjTNQZB1XQSbl +EtCElrUCUiaV3FeFXCSrZGEb3uV1uhRFGi+k+K//4qlR0zAMVL6Rd2tZSpEBMgBTAqwC8YCvSSkW ++VJGQryRixgH4OcNsQKGNsU1U0jGLBdpnl3DnDK5kErF5VaM53VFgAhlscwBpwQwqJI0De7y8kZN +YElpPe7gkYiZPfzJMHvAPHH8LucAjh+z4C9Zcj9l2MA9CK5aM9uUcpXcixjBwk95Lxcz/WycrMQy +Wa2ABlk1wSYBI6BEmswPClqOb/UKfXdAWFmujGEMiShzY35JPaLgrBJxqoBt6wJppAjzd3KexBlQ +I7uF4QAikDToG2eZqMqOQ7MTOQAocR0rkJKNEuNNnGTArD/GC0L7r0m2zO/UhCgAq6XEL7Wq3PmP +ewgArR0CTANcLLOadZYmNzLdTgCBz4B9KVWdVigQy6SUiyovE6kIAKC2FfIekJ6KuJSahMyZRm6n +RH+iSZLhwqKAocDjSyTJKrmuS5IwsUqAc4Er3n/8Sbw7fXN28kHzmAHGMnu9AZwBCi20gxMMIA5q +VR6kOQh0FJzjHxEvlyhk1zg+4NU0OHhwpYMxzL2I2n2cBQey68XVw8AcK1AmNFZA/f4bukzVGujz +Pw+sdxCcDFGFJs7f7tY5yGQWb6RYx8xfyBnBtxrOd1FRrV8DNyiEUwGpFC4OIpggPCCJS7NxnklR +AIulSSYnAVBoTm39VQRW+JBn+7TWLU4ACGWQwUvn2YRGzCRMtAvrNeoL03hLM9NNArvOm7wkxQH8 +ny1IF6VxdkM4KmIo/jaX10mWIULIC0G4F9LA6iYBTlxG4pxakV4wjUTI2otbokjUwEvIdMCT8j7e +FKmcsviibt2tRmgwWQmz1ilzHLSsSL3SqjVT7eW9w+hLi+sIzWpdSgBezz2hW+X5VMxBZxM2Rbxh +8arucuKcoEeeqBPyBLWEvvgdKHqiVL2R9iXyCmgWYqhgladpfgckOwoCIfawkTHKPnPCW3gH/wJc +/DeV1WIdBM5IFrAGhcgPgUIgYBJkprlaI+Fxm2bltpJJMtYUebmUJQ31OGIfMOKPbIxzDT7klTZq +PF1c5XyTVKiS5tpkJmzxsrBi/fia5w3TAMutiGamaUOnDU4vLdbxXBqXZC5XKAl6kV7bZYcxg54x +yRZXYsNWBt4BWWTCFqRfsaDSWVWSnACAwcIXZ0lRp9RIIYOJGAbaFAR/E6NJz7WzBOzNZjlAhcTm +ewH2B3D7O4jR3ToB+iwAAmgY1FKwfPOkKtFBaPRR4Bt905/HB049W2nbxEOu4iTVVj7OgjN6eFqW +JL4LWWCvqSaGghlmFbp21xnQEcV8NBoFgXGHtsp8zVVQldsjYAVhxpnN5nWChm82Q1Ovf6iARxHO +wF43287CAw1hOn0AKjldVmW+wdd2bp9AmcBY2CPYExekZSQ7yB4nvkbyuSq9ME3RdjvsLFAPBRc/ +nb4/+3L6SRyLy0alTdv67ArGPM1iYGuyCMBUrWEbXQYtUfElqPvEezDvxBRgz6g3ia+Mqxp4F1D/ +XNb0Gqax8F4Gpx9O3pyfzv7y6fSn2aezz6eAINgZGezRlNE81uAwqgiEA7hyqSJtX4NOD3rw5uST +fRDMEjX75mtgN3gyvpYVMHE5hhlPRbiJ7xUwaDilphPEsdMALHg4mYjvxOHz568OCVqxLbYADMyu +0xQfzrRFnyXZKg8n1PgXdumPWUlp/+3y6OsrcXwswl/i2zgMwIdqmjJL/Eji9HlbSOhawZ9xriZB +sJQrEL0biQI6fk5+8YQ7wJJAy1zb6V/yJDPvmSvdIUh/jKkH4DCbLdJYKWw8m4VABOrQ84EOETvX +KHVj6Fhs3a4TjQp+SgkLm2GXKf7Tg2I8p36IBqPodjGNQFw3i1hJbkXTh36zGeqs2WysBwRhJokB +h4vVUChME9RZZQJ+LXEe6rC5ylP8ifBRC5AA4tYKtSQukt46RbdxWks1diYFRByPW2RERZso4kdw +UcZgiZulm0za1DQ8A82AfGkOWrRsUQ4/e+DvgLoymzjc6PHei2mGmP477zQIB3A5Q1T3SrWgsHYU +F6cX4tWLw310Z2DPubTU8ZqjhU6yWtqHK1gtIw+MMPcy8uLSZYV6Fp8e7Ya5iezKdFlhpZe4lJv8 +Vi4BW2RgZ5XFT/QGduYwj0UMqwh6nfwBVqHGb4xxH8qzB2lB3wGotyEoZv3N0u9xMEBmChQRb6yJ +1HrXz6awKPPbBJ2N+Va/BFsJyhItpnFsAmfhPCZDkwgaArzgDCl1J0NQh2XNDivhjSDRXiwbxRoR +uHPU1Ff09SbL77IZ74SPUemOJ5Z1UbA082KDZgn2xHuwQoBkDhu7hmgMBVx+gbK1D8jD9GG6QFna +WwAgMPSKtmsOLLPVoynyrhGHRRiT14KEt5ToL9yaIWirZYjhQKK3kX1gtARCgslZBWdVg2YylDXT +DAZ2SOJz3XnEW1AfQIuKEZjNsYbGjQz9Lo9AOYtzVyk5/dAif/nyhdlGrSm+gojNcdLoQqzIWEbF +FgxrAjrBeGQcrSE2uAPnFsDUSrOm2P8k8oK9MVjPCy3b4AfA7q6qiqODg7u7u0hHF/Ly+kCtDv74 +p2+++dML1onLJfEPTMeRFh1qiw7oHXq00bfGAn1nVq7Fj0nmcyPBGkvyysgVRfy+r5NlLo72J1Z/ +Ihc3Zhr/Na4MKJCZGZSpDLQdNRg9U/vPoldqJJ6RdbZtxxP2S7RJtVbMt7rQo8rBEwC/ZZHXaKob +TlDiK7BusENfynl9HdrBPRtpfsBUUU7Hlgf2X14hBj5nGL4ypniGWoLYAi2+Q/qfmG1i8o60hkDy +oonq7J63/VrMEHf5eHm3vqYjNGaGiULuQInwmzxaAG3jruTgR7u2aPcc19Z8PENgLH1gmFc7lmMU +HMIF12LqSp3D1ejxgjTdsWoGBeOqRlDQ4CTOmdoaHNnIEEGid2M2+7ywugXQqRU5NPEBswrQwh2n +Y+3arOB4QsgDx+IlPZHgIh913r3gpa3TlAI6LR71qMKAvYVGO50DX44NgKkYlX8ZcUuzTfnYWhRe +gx5gOceAkMFWHWbCN64PONob9bBTx+oP9WYa94HARRpzLOpR0AnlYx6hVCBNxdjvOcTilrjdwXZa +HGIqs0wk0mpAuNrKo1eodhqmVZKh7nUWKVqkOXjFVisSIzXvfWeB9kH4uM+YaQnUZGjI4TQ6Jm/P +E8BQt8Pw2XWNgQY3DoMYbRJF1g3JtIZ/wK2g+AYFo4CWBM2CeayU+RP7HWTOzld/GWAPS2hkCLfp +kBvSsRgajnm/J5CMOhoDUpABCbvCSK4jq4MUOMxZIE+44bUclG6CESmQM8eCkJoB3Omlt8HBJxGe +gJCEIuT7SslCfCVGsHxtUX2c7v5dudQEIcZOA3IVdPTi2I1sOFGN41aUw2doP75BZyVFDhw8B5fH +DfS7bG6Y1gZdwFn3FbdFCjQyxWFGExfVK0MYN5j8h2OnRUMsM4hhKG8g70jHjDQJ7HJr0LDgBoy3 +5u2x9GM3YoF9x2GuDuXmHvZ/YZmoRa5Cipm0YxfuR3NFlzYW2/NkPoI/3gKMJlceJJnq+AVGWf6B +QUIPetgH3ZsshkWWcXmXZCEpME2/Y39pOnhYUnpG7uATbacOYKIY8Tx4X4KA0NHnAYgTagLYlctQ +abe/C3bnFEcWLncfeW7z5dGrqy5xp0MRHvvpX6rT+6qMFa5WyovGQoGr1TXgqHRhcnG21YeX+nAb +twllrmAXKT5++iKQEBzXvYu3T5t6w/CIzYNz8j4GddBrD5KrNTtiF0AEtSIyykH4dI58PLJPndyO +iT0ByJMYZseiGEiaT/4ROLsWCsbYX24zjKO1VQZ+4PU3X896IqMukt98PXpglBYx+sR+3PIE7cic +VLBrtqWMU3I1nD4UVMwa1rFtignrc9r+aR676vE5NVo29t3fAj8GCobUJfgIL6YN2bpTxY/vTg3C +03ZqB7DObtV89mgRYG+fz3+BHbLSQbXbOEnpXAEmv7+PytVs7jle0a89PEg7FYxDgr79l7p8AdwQ +cjRh0p2OdsZOTMC5ZxdsPkWsuqjs6RyC5gjMywtwjz+HFU6ve+B7Bge/r7p8IiBvTqMeMmpbbIZ4 +wQclhz1K9gnzfvqMf9dZP27mw4L1/zHLF/+cST5hKgaaNh4+rH5iuXbXAHuEeRpwO3e4hd2h+axy +ZZw7VklKPEfd9VzcUboCxVbxpAigLNnv64GDUqoPvd/WZclH16QCC1nu43HsVGCmlvH8ek3Mnjj4 +ICvExDZbUKzayevJ+4Qv1NFnO5Ow2Tf0c+c6NzErmd0mJfQFhTsOf/j442nYb0IwjgudHm9FHu83 +INwnMG6oiRM+pQ9T6Cld/nH10d66+AQ1GQEmIqzJ1iVsJxBs4gj9a/BARMg7sOVjdtyhL9ZycTOT +lDqAbIpdnaD4W3yNmNiMAj//S8UrSmKDmSzSGmnFjjdmH67qbEHnI5UE/0qnCmPqECUEcPhvlcbX +Ykydlxh60txI0anbuNTeZ1HmmJwq6mR5cJ0shfy1jlPc1svVCnDBwyv9KuLhKQIl3nFOAyctKrmo +y6TaAglileuzP0p/cBrOtzzRsYckH/MwATEh4kh8wmnjeybc0pDLBAf8Ew+cJO67sYOTrBDRc3if +5TMcdUY5vlNGqnsuT4+D9gg5ABgBUJj/aKIjd/4bSa/cA0Zac5eoqCU9UrqRhpycMYQynmCkg3/T +T58RXd4awPJ6GMvr3Vhet7G87sXy2sfyejeWrkjgwtqglZGEvsBV+1ijN9/GjTnxMKfxYs3tMPcT +czwBoijMBtvIFKdAe5EtPt8jIKS2nQNnetjkzyScVFrmHALXIJH78RBLb+ZN8rrTmbJxdGeeinFn +h3KI/L4HUUSpYnPqzvK2jKs48uTiOs3nILYW3WkDYCra6UQcK81uZ3OO7rYs1ejiPz//8PEDNkdQ +I5PeQN1wEdGw4FTGz+PyWnWlqdn8FcCO1NJPxKFuGuDeIyNrPMoe//OOMjyQccQdZSjkogAPgLK6 +bDM39ykMW891kpR+zkzOh03HYpRVo2ZSA0Q6ubh4d/L5ZEQhv9H/jlyBMbT1pcPFx7SwDbr+m9vc +Uhz7gFDr2FZj/Nw5ebRuOOJhG2vAdjzf1oPDxxjs3jCBP8t/KqVgSYBQkQ7+PoVQj945/Kb9UIc+ +hhE7yX/uyRo7K/adI3uOi+KIft+xQ3sA/7AT9xgzIIB2ocZmZ9DslVtK35rXHRR1gD7S1/vNe832 +1qu9k/EpaifR4wA6lLXNht0/75yGjZ6S1ZvT788+nJ+9uTj5/IPjAqIr9/HTwaE4/fGLoPwQNGDs +E8WYGlFhJhIYFrfQSSxz+K/GyM+yrjhIDL3enZ/rk5oNlrpg7jPanAiecxqThcZBM45C24c6/wgx +SvUGyakponQdqjnC/dKG61lUrvOjqVRpjs5qrbdeulbM1JTRuXYE0geNXVIwCE4xg1eUxV6ZXWHJ +J4C6zqoHKW2jbWJISkHBTrqAc/5lTle8QCl1hidNZ63oL0MX1/AqUkWawE7udWhlSXfD9JiGcfRD +e8DNePVpQKc7jKwb8qwHsUCr9Trkuen+k4bRfq0Bw4bB3sG8M0npIZSBjcltIsRGfJITynv4apde +r4GCBcODvgoX0TBdArOPYXMt1glsIIAn12B9cZ8AEFor4R8IHDnRAZljdkb4drPc/3OoCeK3/vnn +nuZVme7/TRSwCxKcShT2ENNt/A42PpGMxOnH95OQkaPUXPHnGssDwCGhAKgj7ZS/xCfos7GS6Urn +l/j6AF9oP4Fet7qXsih1937XOEQJeKbG5DU8U4Z+IaZ7WdhTnMqkBRorHyxmWEHopiGYz574tJZp +qvPdz96dn4LviMUYKEF87nYKw3G8BI/QdfIdVzi2QOEBO7wukY1LdGEpyWIZec16g9YoctTby8uw +60SB4W6vThS4jBPloj3GaTMsU04QISvDWphlZdZutUEKu22I4igzzBKzi5ISWH2eAF6mpzFviWCv +hKUeJgLPp8hJVpmMxTRZgB4FlQsKdQpCgsTFekbivDzjGHheKlMGBQ+LbZlcrys83YDOEZVgYPMf +T76cn32gsoTDV43X3cOcU9oJTDmJ5BhTBDHaAV/ctD/kqtmsj2f1K4SB2gf+tF9xdsoxD9Dpx4FF +/NN+xXVox85OkGcACqou2uKBGwCnW5/cNLLAuNp9MH7cFMAGMx8MxSKx7EUnerjz63KibdkyJRT3 +MS+fcICzKmxKmu7spqS1P3qOqwLPuZbj/kbwtk+2zGcOXW86b4aS39xPRwqxJBYw6rb2xzDZYZ2m +ejoOsw1xC21rtY39OXNipU67RYaiDEQcu50nLpP1K2HdnDnQS6PuABPfanSNJPaq8tHP2Uh7GB4m +ltidfYrpSGUsZAQwkiF17U8NPhRaBFAglP07diR3Onl+6M3RsQYPz1HrLrCNP4Ai1Lm4VOORl8CJ +8OVXdhz5FaGFevRIhI6nkskst3li+Llbo1f50p9jrwxQEBPFroyzazlmWFMD8yuf2AMhWNK2Hqkv +k6s+wyLOwDm9H+Dwrlz0H5wY1FqM0Gl3I7dtdeSTBxv0loLsJJgPvozvQPcXdTXmlRw4h+6tpRuG ++jBEzD6Epvr0fRxiOObXcGB9GsC91NCw0MP7deDsktfGOLLWPraqmkL7QnuwixK2ZpWiYxmnONH4 +otYLaAzucWPyR/apThSyv3vqxJyYkAXKg7sgvbmNdINWOGHE5UpcOZpQOnxTTaPfLeWtTMFogJEd +Y7XDL7baYRLZcEpvHthvxu5ie7Htx43eNJgdmXIMRIAKMXoDPbsQanDAFf5Z70Ti7Iac47d/PZuK +tx9+gn/fyI9gQbHmcSr+BqOLt3kJ20ou2qXbFLCAo+L9Yl4rLIwkaHRCwRdPoLd24ZEXT0N0ZYlf +UmIVpMBk2nLDt50AijxBKmRv3ANTLwG/TUFXywk1DmLfWoz0S6TBcI0L1oUc6JbRutqkaCac4Eiz +iJej87O3px8+nUbVPTK2+Tlygid+HhZORx8Nl3gMNhX2yaLGJ1eOv/yDTIsed1nvNU29DO41RQjb +kcLuL/kmjdjuKeISAwai2C7zRYQtgdO5RK+6A/954mwrH7TvnnFFWOOJPjxrnHh8DNQQP7f1zwga +Uh89J+pJCMVzrBXjx9Go3wJPBUW04c/zm7ulGxDXRT80wTamzazHfnerAtdMZw3PchLhdWyXwdSB +pkmsNvOFWx/4MRP6IhRQbnS8IVdxnVZCZrCVor093UgBCt4t6WMJYVZhK0Z1bhSdSe/irXJyj2Il +RjjqiIrq8RyGAoWw9f4xvmEzgLWGouYSaIBOiNK2KXe6qnqxZgnmnRBRryff4C7JXrnJL5rCPChv +jBeN/wrzRG+RMbqWlZ4/PxhPLl82CQ4UjF54Bb2LAoydyyZ7oDGL58+fj8S/Pez0MCpRmuc34I0B +7F5n5ZxeDxhsPTm7Wl2H3ryJgB8Xa3kJD64oaG6f1xlFJHd0pQWR9q+BEeLahJYZTfuWOeZYXcnn +y9yCz6m0wfhLltB1RxhRkqhs9a1RGG0y0kQsCYohjNUiSUKOTsB6bPMaa/Ewuqj5Rd4DxycIZopv +8WCMd9hrdCwpb9Zyj0XnWIwI8IhSyng0KmamajTAc3ax1WjOzrKkaspIXrhnpvoKgMreYqT5SsR3 +KBlmHi1iOGWdHqs2jnW+k0W9jUq+uHTjjK1Z8uuHcAfWBknLVyuDKTw0i7TIZbkw5hRXLFkklQPG +tEM43JkubyLrEwU9KI1AvZNVWFqJtm//YNfFxfQjHR/vm5F01lBlL8TimFCctfIKo6gZn6JPlpCW +b82XCYzygaLZ2hPwxhJ/0LFUrCHw7u1wyxnrTN/HwWkbzSUdAIfugLIK0rKjpyOci8csfGbagVs0 +8EM7c8LtNimrOk5n+tqHGfppM3uervG0ZXA7CzyttwK+fQ6O777O2AfHwSTXID0x49ZUZByLlY5M +RG5lmV+EVeTo5R2yrwQ+BVJmOTP10CZ2dGnZ1Raa6gRHR8UjqK9M8dKAQ26qZjoFJy7mU0pvMuUO +A86zn29JV1eI78T41VQctnY+i2KLNzkBss+Woe+KUTeYihMMMHNs34shvjsW45dT8ccd0KOBAY4O +3RHa+9gWhEEgr66eTMY0mRPZwr4U9of76hxG0PSM4+SqTf4umb4lKv1ri0pcIagTlV+2E5VbYw/u +WzsfH8lwA4pjlcjl/jOFJNRIN7p5mMEJPyyg37M5Wrp2vKmoocK5OWxG7ho96GhE4zbbQUxRulZf +XL+LuoYNp71zwKTJtFIV7S1zmMao0WsRFQDM+o7S8Bve7QLvNSlc/2zwiFUXAViwPREEXenJB2ZN +w0ZQH3QEn6QBHmAUEeJhaqMoXMl6goiEdA8OMdFXrUNsh+N/d+bhEoOho9AOlt98vQtPVzB7izp6 +FnR3pYUnsra8ollu8+kPzHmM0tf1NwmMA6URHXBWzVWV5GYeYfYy30GT2yzmDV4GSSfTaBJT6bpN +vJXmW7/Qj6HYASWTwVqAJ1Wv8CD5lu62PFGU9IZX1Hx9+HJqKoMZkJ7Aq+jVV/oKSOpmLj/wfeyp +3rvBS93vMPoXB1hS+b3tq85uhqZ13LoLyh8spOjZJJpZOjSG6eE6kGbNYoF3JjbEZN/aXgDyHryd +Ofg55vLTHBw22JBGfei6GqOR3iHVNiDAD5uMIcl5VNdGkSLSu4RtSHnuUpxPFgXdq9+CYAgBOX8d +8xt0BeviyIbYjE3Bk8+xm82Jn+qmt+6M7Qka2+om3DV97r9r7rpFYGdukhk6c/frS10a6L7DVrSP +Bhze0IR4VIlEo/H7jYlrB6Y6h6Y/Qq8/SH63E850wKw8BMZk7GC8n9hTY2/M/iZeuN8xIWyfL2R2 +y4l7nY3WtDs2o83xj/EUOPkFn9sbBiijaak5kPdLdMPejHNkZ/L6Ws1ivN1xRptsyufq7J7Mtu09 +Xc4nY7U1uy28tAhAGG7Smbducj0wBuhKvmWa06Gc22kEDU1Jw04WskqWbBL01g7ARRwxpf4mEM9p +xKNUYqBb1WVRwm54pO8i5jydvtTmBqgJ4G1idWNQNz2m+mpaUqyUHGZKkDlO20ryASKwEe+YhtnM +vgNeedFcs5BMLTPIrN7IMq6aK4b8jIAENl3NCFR0jovrhOcaqWxxiYtYYnnDQQoDZPb7V7Cx9DbV +O+5VmFht93h2oh465PuUKxscY2S4OLm31wu611ot6Wpr1zu0zRqus1cqwTKYu/JIR+pYGb/V93fx +HbMcyUf/0uEfkHe38tLPQrfqjL1bi4bzzFUI3Qub8MYAMs599zB2OKB742JrA2zH9/WFZZSOhznQ +2FJR++S9CqcZbdJEkDBh9IEIkl8U8MQIkgf/kREkfWsmGBqNj9YDvWUCD4SaWD24V1A2jAB9ZkAk +PMBuXWBoTOXYTbovcpXcj+yF0qwrnUo+Yx6QI7t3kxEIvmpSuRnK3lVwuyJIvnTR4+/PP745OSda +zC5O3v7HyfeUlIXHJS1b9egQW5bvM7X3vfRvN9ymE2n6Bm+w7bkhlmuYNITO+04OQg+E/nq1vgVt +KzL39VCHTt1PtxMgvnvaLahDKrsXcscv0zUmbvpMK0870E85qdb8cjITzCNzUsfi0JzEmffN4YmW +0U5seWjhnPTWrjrR/qq+BXQg7j2xSda0Anhmgvxlj0xMxYwNzLOD0v7ffFBmOFYbmht0QAoX0rnJ +kS5xZFCV//8TKUHZxbi3Y0dxau/mpnZ8PKTspfN49ruQkSGIV+436s7PFfalTAeoEASs8PQ9hYyI +0X/6QNWmHzxT4nKfCov3Udlc2V+4Ztq5/WuCSQaVve9LcYISH7NC41WduokDtk+nAzl9dBqVr5xK +FtB8B0DnRjwVsDf6S6wQ51sRwsZRu2SYHEt01Jf1Ocij3XSwN7R6IfaHyk7dskshXg43XLYqO3WP +Q+6hHuihalPc51hgzNIcqicV3xFkPs4UdMGX53zgGbre9sPX28uXR/ZwAfkdXzuKhLLJRo5hv3Sy +MXdeKul0J2Ypp5Suh3s1JySsW1w5UNknGNrbdEpSBvY/Js+BIY289/0hM9PDu3p/1MbUst4RTEmM +n6kJTcsp4tG42yeT7nQbtdUFwgVJjwDSUYEAC8F0dKOTILrlLO/xC70bnNd0Ha97whQ6UkHJYj5H +cA/j+zX4tbtTIfGjujOKpj83aHOgXnIQbvYduNXEC4UMm4T21Bs+GHABuCa7v//LR/TvpjHa7oe7 +/Grb6lVvHSD7spj5iplBLRKZxxEYGdCbY9LWWC5hBB2voWno6DJUMzfkC3T8KJsWL9umDQY5szPt +AVijEPwfucjncQ== +""") + +##file activate.sh +ACTIVATE_SH = convert(""" +eJytVd9v2kAMfs9fYQLq2m4MscdNVKMqEkgtVIQxbeuUHolpTgsXdHehpT/+9/mSEBJS2MOaB0ji +z77P9menDpOAK5jzEGERKw0zhFihD/dcB2CrKJYewoyLFvM0XzGNNpzOZbSAGVPBqVWHdRSDx4SI +NMhYANfgc4meDteW5ePGC45P4MkCumKhUENzDsu1H3lw1vJx1RJxGMKns6O2lWDqINGgotAHFCsu +I7FAoWHFJGezEFWGqsEvaD5C42naHb93X+A3+elYCgVaxgh8DmQAys9HL2SS0mIaWBgm7mTN/O3G +kzu6vHCng/HkW/fSve5O+hTOpnhfQAcoEry5jKVjNypoO0fgwzKSOgHm79KUK06Jfc7/RebHpD8a +9kdXvT2UcnuFWG6p0stNB0mWUUQ1q3uiGRVEMfXHR03dTuQATPjwqIIPcB9wL4CArRAY/ZHJixYL +Y9YBtcAoLQtFevOoI9QaHcEdMSAB0d08kuZhyUiSmav6CPCdVBnFOjNrLu6yMCWgKRA0TInBC5i4 +QwX3JG/mm581GKnSsSSxJTFHf9MAKr8w5T/vOv1mUurn5/zlT6fvTntjZzAaNl9rQ5JkU5KIc0GX +inagwU57T2eddqWlTrvaS6d9sImZeUMkhWysveF0m37NcGub9Dpgi0j4qGiOzATjDr06OBjOYQOo +7RBoGtNm9Denv1i0LVI7lxJDXLHSSBeWRflsyyqw7diuW3h0XdvK6lBMyaoMG1UyHdTsoYBuue75 +YOgOu1c91/2cwYpznPPeDoQpGL2xSm09NKp7BsvQ2hnT3aMs07lUnskpxewvBk73/LLnXo9HV9eT +ijB3hWBO2ygoiWg/bKuZxqCCQq0DD3vkWIVvI2KosIw+vqW1gIItEG5KJb+xb09g65ktwYKgTc51 +uGJ/EFQs0ayEWLCQM5V9N4g+1+8UbXOJzF8bqhKtIqIwicWvzNFROZJlpfD8A7Vc044R0FxkcezG +VzsV75usvTdYef+57v5n1b225qhXfwEmxHEs +""") + +##file activate.fish +ACTIVATE_FISH = convert(""" +eJyFVVFv2zYQftevuMoOnBS1gr0WGIZ08RADSRw4boBhGGhGOsUcKFIjKbUu9uN7lC2JsrXWDzZM +fnf38e6+uwlsdsJCLiRCUVkHrwiVxYy+hHqDbQKvQl3z1ImaO0xyYXdbeP9FuJ1QwMFUSnmcP4dL +2DlXfry+9v/sDqVMUl3AFVi0Vmj1PokmcKtBaecNQTjIhMHUyX0SRXmlKIpWkGEbDuYZzBZfCVcL +4youUdVQ6AyBqwwMusoocBrcDsmpKbgEQgijVYHKJbMI6DMhoEUHWmbhLdTcCP4q0TYokYNDev5c +QTxlq/tb9rJcbz7f3LOnm81d3GD8x3uav30FfwrnwCEOYRyAKot+FvXPzd3q8W71sBiJ3d2dMugu +fsxjCPsBmz+Wz3fsab16eNqw1ctivV7eBnwm8EzeuQIsSrcHqVMqwHbqq8/aarKSO+oYKhKXUn9p +SmWw0DVBdQ7bBlwaTR62bc+1tpaYb5PhUyScu48CRgvDLQbtMrMnMQ6dY5022JDRRrwJxWUfJwwP +ge0YIAVGfcUC1M8s8MxitFZjmR9W64hui7p4fBlWMZ5y81b/9cvfMbz7FWZKq4yOTeW1hbNBEWU+ +b+/ejXMu95lOx696uXb8Go4T+Kw8R2EMSqx5KLkkCkQ+ZBZFbZsHL4OYseAvY3EPO5MYTBuhDZQa +TwPza8Y+LR/Z483Dgjwd4R3f7bTXx9Znkw6T6PAL83/hRD3jNAKFjuEx9NJkq5t+fabLvdvRwbw4 +nEFTzwO6U+q34cvY7fL55tP94tg58XEA/q7LfdPsaUXFoEIMJdHF5iSW0+48CnDQ82G7n3XzAD6q +Bmo5XuOA0NQ67ir7AXJtQhtLKO7XhC0l39PGOBsHPvzBuHUSjoOnA0ldozGC9gZ5rek3+y3ALHO/ +kT7AP379lQZLSnFDLtwWihfYxw4nZd+ZR7myfkI2ZTRCuRxmF/bCzkbhcElvYamW9PbDGrvqPKC0 ++D/uLi/sFcxGjOHylYagZzzsjjhw206RQwrWIwOxS2dnk+40xOjX8bTPegz/gdWVSXuaowNuOLda +wYyNuRPSTcd/B48Ppeg= +""") + +##file activate.csh +ACTIVATE_CSH = convert(""" +eJx1U2FP2zAQ/e5f8TAV3Soo+0zXbYUiDQkKQgVp2ibjJNfFUuIg22nVf885SVFLO3+I7Lt3fr6X +d8eY58ZjYQpCWfuAhFB7yrAyIYf0Ve1SQmLsuU6DWepAw9TnEoOFq0rwdjAUx/hV1Ui1tVWAqy1M +QGYcpaFYx+yVI67LkKwx1UuTEaYGl4X2Bl+zJpAlP/6V2hTDtCq/DYXQhdEeGW040Q/Eb+t9V/e3 +U/V88zh/mtyqh8n8J47G+IKTE3gKZJdoYrK3h5MRU1tGYS83gqNc+3yEgyyP93cP820evHLvr2H8 +kaYB/peoyY7aVHzpJnE9e+6I5Z+ji4GMTNJWNuOQq6MA1N25p8pW9HWdVWlfsNpPDbdxjgpaahuw +1M7opCA/FFu1uwxC7L8KUqmto1KyQe3rx0I0Eovdf7BVe67U5c1MzSZ310pddGheZoFPWyytRkzU +aCA/I+RkBXhFXr5aWV0SxjhUI6jwdAj8kmhPzX7nTfJFkM3MImp2VdVFFq1vLHSU5szYQK4Ri+Jd +xlW2JBtOGcyYVW7SnB3v6RS91g3gKapZ0oWxbHVteYIIq3iv7QeuSrUj6KSqQ+yqsxDj1ivNQxKF +YON10Q+NH/ARS95i5Tuqq2Vxfvc23f/FO6zrtXXmJr+ZtMY9/A15ZXFWtmch2rEQ4g1ryVHH +""") + +##file activate.bat +ACTIVATE_BAT = convert(""" +eJx9Ul9LhEAQfxf8DoOclI/dYyFkaCmcq4gZQTBUrincuZFbff12T133TM+nnd35/Zvxlr7XDFhV +mUZHOVhFlOWP3g4DUriIWoVomYZpNBWUtGpaWgImO191pFkSpzlcmgaI70jVX7n2Qp8tuByg+46O +CMHbMq64T+nmlJt082D1T44muCDk2prgEHF4mdI9RaS/QwSt3zSyIAaftRccvqVTBziD1x/WlPD5 +xd729NDBb8Nr4DU9QNMKsJeH9pkhPedhQsIkDuCDCa6A+NF9IevVFAohkqizdHetg/tkWvPoftWJ +MCqnOxv7/x7Np6yv9P2Ker5dmX8yNyCkkWnbZy3N5LarczlqL8htx2EM9rQ/2H5BvIsIEi8OEG8U ++g8CsNTr +""") + +##file deactivate.bat +DEACTIVATE_BAT = convert(""" +eJyFkN0KgkAUhO8F32EQpHqFQEjQUPAPMaErqVxzId3IrV6/XST/UDx3c86c4WMO5FYysKJQFVVp +CEfqxsnJ9DI7SA25i20fFqs3HO+GYLsDZ7h8GM3xfLHrg1QNvpSX4CWpQGvokZk4uqrQAjXjyElB +a5IjCz0r+2dHcehHCe5MZNmB5R7TdqMqECMptHZh6DN/utb7Zs6Cej8OXYE5J04YOKFvD4GkHuJ0 +pilSd1jG6n87tDZ+BUwUOepI6CGSkFMYWf0ihvT33Qj1A+tCkSI= +""") + +##file activate.ps1 +ACTIVATE_PS = convert(""" +eJylWdmO41hyfW+g/0FTU7C7IXeJIqmtB/3AnZRIStxF2kaBm7gv4ipyMF/mB3+Sf8GXVGVl1tLT +43ECSqR4b5wbETeWE8z/+a///vNCDaN6cYtSf5G1dbNw/IVXNIu6aCvX9xa3qsgWl0IJ/7IYinbh +2nkOVqs2X0TNjz/8eeFFle826fBhQRaLBkD9uviw+LCy3Sbq7Mb/UNbrH3+YNtLcVaB+Xbipb+eL +tly0eVsD/M6u6g8//vC+dquobH5VWU75eMFUdvHb4n02RHlXuHYTFfmHbHCLLLNz70NpN+GrBI4p +1EeSk4FAXaZR88u0vPip8usi7fznt3fvP+OuPnx49/Pil4td+XnzigIAPoqYQH2J8v4z+C+8b98m +Q25t7k76LIK0cOz0V89/MXXx0+Lf6z5q3PA/F+/FIif9uqnaadFf/PzXSXYBfqIb2NeApecJwPzI +dlL/149nnvyoc7KqYfzTAT8v/voUmX7e+3n364tffl/oVaDyswKY/7J18e6bve8Wv9RuUfqfLHmK +/u139Hwx+9ePRep97KKqae30YwmCo2y+0vTz1k+rv7159B3pb1SOGj97Pe8/flfkC1Vn/7xYR4n6 +lypNEGDDV5f7lcjil3S+4++p881Wv6qKyn5GQg1yJwcp4BZ5E+Wt/z1P/umbiHir4J8Xip/eFt6n +9T/9gU9eY+7zUX97Jlmb136ziKrKT/3OzpvP8VX/+MObSP0lL3LvVZlJ9v1b8357jXyw8rXxYPXN +11n4UzJ8G8S/vUbuJ6RPj999DbtS5kys//JusXwrNLnvT99cFlBNwXCe+niRz8JF/ezNr9Pze+H6 +18W7d5PPvozW7+387Zto/v4pL8BvbxTzvIW9KCv/Fj0WzVQb/YXbVlPZWTz3/9vCaRtQbPN/Bb+j +2rUrDxTVD68gfQXu/ZewAFX53U/vf/rD2P3558W7+W79Po1y/xXoX/6RFHyNIoVjgAG4H0RTcAe5 +3bSVv3DSwk2mZYHjFB8zj6fC4sLOFTHJJQrwzFYJgso0ApOoBzFiRzzQKjIQCCbQMIFJGCKqGUyS +8AkjiF2wTwmMEbcEUvq8Nj+X0f4YcCQmYRiOY7eRbAJDqzm1chOoNstbJ8oTBhZQ2NcfgaB6QjLp +U4+SWFjQGCZpyqby8V4JkPGs9eH1BscXIrTG24QxXLIgCLYNsIlxSYLA6SjAeg7HAg4/kpiIB8k9 +TCLm0EM4gKIxEj8IUj2dQeqSxEwYVH88qiRlCLjEYGuNIkJB1BA5dHOZdGAoUFk54WOqEojkuf4Q +Ig3WY+96TDlKLicMC04h0+gDCdYHj0kz2xBDj9ECDU5zJ0tba6RKgXBneewhBG/xJ5m5FX+WSzsn +wnHvKhcOciw9NunZ0BUF0n0IJAcJMdcLqgQb0zP19dl8t9PzmMBjkuIF7KkvHgqEovUPOsY0PBB1 +HCtUUhch83qEJPjQcNQDsgj0cRqx2ZbnnlrlUjE1EX2wFJyyDa/0GLrmKDEFepdWlsbmVU45Wiwt +eFM6mfs4kxg8yc4YmKDy67dniLV5FUeO5AKNPZaOQQ++gh+dXE7dbJ1aTDr7S4WPd8sQoQkDyODg +XnEu/voeKRAXZxB/e2xaJ4LTFLPYEJ15Ltb87I45l+P6OGFA5F5Ix8A4ORV6M1NH1uMuZMnmFtLi +VpYed+gSq9JDBoHc05J4OhKetrk1p0LYiKipxLMe3tYS7c5V7O1KcPU8BJGdLfcswhoFCSGQqJ8f +ThyQKy5EWFtHVuNhvTnkeTc8JMpN5li3buURh0+3ZGuzdwM55kon+8urbintjdQJf9U1D0ah+hNh +i1XNu4fSKbTC5AikGEaj0CYM1dpuli7EoqUt7929f1plxGGNZnixFSFP2qzhlZMonu2bB9OWSqYx +VuHKWNGJI8kqUhMTRtk0vJ5ycZ60JlodlmN3D9XiEj/cG2lSt+WV3OtMgt1Tf4/Z+1BaCus740kx +Nvj78+jMd9tq537Xz/mNFyiHb0HdwHytJ3uQUzKkYhK7wjGtx3oKX43YeYoJVtqDSrCnQFzMemCS +2bPSvP+M4yZFi/iZhAjL4UOeMfa7Ex8HKBqw4umOCPh+imOP6yVTwG2MplB+wtg97olEtykNZ6wg +FJBNXSTJ3g0CCTEEMdUjjcaBDjhJ9fyINXgQVHhA0bjk9lhhhhOGzcqQSxYdj3iIN2xGEOODx4qj +Q2xikJudC1ujCVOtiRwhga5nPdhe1gSa649bLJ0wCuLMcEYIeSy25YcDQHJb95nfowv3rQnin0fE +zIXFkM/EwSGxvCCMgEPNcDp/wph1gMEa8Xd1qAWOwWZ/KhjlqzgisBpDDDXz9Cmov46GYBKHC4zZ +84HJnXoTxyWNBbXV4LK/r+OEwSN45zBp7Cub3gIYIvYlxon5BzDgtPUYfXAMPbENGrI+YVGSeTQ5 +i8NMB5UCcC+YRGIBhgs0xhAGwSgYwywpbu4vpCSTdEKrsy8osXMUnHQYenQHbOBofLCNNTg3CRRj +A1nXY2MZcjnXI+oQ2Zk+561H4CqoW61tbPKv65Y7fqc3TDUF9CA3F3gM0e0JQ0TPADJFJXVzphpr +2FzwAY8apGCju1QGOiUVO5KV6/hKbtgVN6hRVwpRYtu+/OC6w2bCcGzZQ8NCc4WejNEjFxOIgR3o +QqR1ZK0IaUxZ9nbL7GWJIjxBARUhAMnYrq/S0tVOjzlOSYRqeIZxaSaOBX5HSR3MFekOXVdUPbjX +nru61fDwI8HRYPUS7a6Inzq9JLjokU6P6OzT4UCH+Nha+JrU4VqEo4rRHQJhVuulAnvFhYz5NWFT +aS/bKxW6J3e46y4PLagGrCDKcq5B9EmP+s1QMCaxHNeM7deGEV3WPn3CeKjndlygdPyoIcNaL3dd +bdqPs47frcZ3aNWQ2Tk+rjFR01Ul4XnQQB6CSKA+cZusD0CP3F2Ph0e78baybgioepG12luSpFXi +bHbI6rGLDsGEodMObDG7uyxfCeU+1OiyXYk8fnGu0SpbpRoEuWdSUlNi5bd9nBxYqZGrq7Qa7zV+ +VLazLcelzzP9+n6+xUtWx9OVJZW3gk92XGGkstTJ/LreFVFF2feLpXGGuQqq6/1QbWPyhJXIXIMs +7ySVlzMYqoPmnmrobbeauMIxrCr3sM+qs5HpwmmFt7SM3aRNQWpCrmeAXY28EJ9uc966urGKBL9H +18MtDE5OX97GDOHxam11y5LCAzcwtkUu8wqWI1dWgHyxGZdY8mC3lXzbzncLZ2bIUxTD2yW7l9eY +gBUo7uj02ZI3ydUViL7oAVFag37JsjYG8o4Csc5R7SeONGF8yZP+7xxi9scnHvHPcogJ44VH/LMc +Yu6Vn3jEzCFw9Eqq1ENQAW8aqbUwSiAqi+nZ+OkZJKpBL66Bj8z+ATqb/8qDIJUeNRTwrI0YrVmb +9FArKVEbCWUNSi8ipfVv+STgkpSsUhcBg541eeKLoBpLGaiHTNoK0r4nn3tZqrcIULtq20Df+FVQ +Sa0MnWxTugMuzD410sQygF4qdntbswiJMqjs014Irz/tm+pd5oygJ0fcdNbMg165Pqi7EkYGAXcB +dwxioCDA3+BY9+JjuOmJu/xyX2GJtaKSQcOZxyqFzTaa6/ot21sez0BtKjirROKRm2zuai02L0N+ +ULaX8H5P6VwsGPbYOY7sAy5FHBROMrMzFVPYhFHZ7M3ZCZa2hsT4jGow6TGtG8Nje9405uMUjdF4 +PtKQjw6yZOmPUmO8LjFWS4aPCfE011N+l3EdYq09O3iQJ9a01B3KXiMF1WmtZ+l1gmyJ/ibAHZil +vQzdOl6g9PoSJ4TM4ghTnTndEVMOmsSSu+SCVlGCOLQRaw9oLzamSWP62VuxPZ77mZYdfTRGuNBi +KyhZL32S2YckO/tU7y4Bf+QKKibQSKCTDWPUwWaE8yCBeL5FjpbQuAlb53mGX1jptLeRotREbx96 +gnicYz0496dYauCjpTCA4VA0cdLJewzRmZeTwuXWD0talJsSF9J1Pe72nkaHSpULgNeK1+o+9yi0 +YpYwXZyvaZatK2eL0U0ZY6ekZkFPdC8JTF4Yo1ytawNfepqUKEhwznp6HO6+2l7L2R9Q3N49JMIe +Z+ax1mVaWussz98QbNTRPo1xu4W33LJpd9H14dd66ype7UktfEDi3oUTccJ4nODjwBKFxS7lYWiq +XoHu/b7ZVcK5TbRD0F/2GShg2ywwUl07k4LLqhofKxFBNd1grWY+Zt/cPtacBpV9ys2z1moMLrT3 +W0Elrjtt5y/dvDQYtObYS97pqj0eqmwvD3jCPRqamGthLiF0XkgB6IdHLBBwDGPiIDh7oPaRmTrN +tYA/yQKFxRiok+jM6ciJq/ZgiOi5+W4DEmufPEubeSuYJaM3/JHEevM08yJAXUQwb9LS2+8FOfds +FfOe3Bel6EDSjIEIKs4o9tyt67L1ylQlzhe0Q+7ue/bJnWMcD3q6wDSIQi8ThnRM65aqLWesi/ZM +xhHmQvfKBbWcC194IPjbBLYR9JTPITbzwRcu+OSFHDHNSYCLt29sAHO6Gf0h/2UO9Xwvhrjhczyx +Ygz6CqP4IwxQj5694Q1Pe2IR+KF/yy+5PvCL/vgwv5mPp9n4kx7fnY/nmV++410qF/ZVCMyv5nAP +pkeOSce53yJ6ahF4aMJi52by1HcCj9mDT5i+7TF6RoPaLL+cN1hXem2DmX/mdIbeeqwQOLD5lKO/ +6FM4x77w6D5wMx3g0IAfa2D/pgY9a7bFQbinLDPz5dZi9ATIrd0cB5xfC0BfCCZO7TKP0jQ2Meih +nRXhkA3smTAnDN9IW2vA++lsgNuZ2QP0UhqyjUPrDmgfWP2bWWiKA+YiEK7xou8cY0+d3/bk0oHR +QLrq4KzDYF/ljQDmNhBHtkVNuoDey6TTeaD3SHO/Bf4d3IwGdqQp6FuhmwFbmbQBssDXVKDBYOpk +Jy7wxOaSRwr0rDmGbsFdCM+7XU/84JPu3D/gW7QXgzlvbjixn99/8CpWFUQWHFEz/RyXvzNXTTOd +OXLNNFc957Jn/YikNzEpUdRNxXcC6b76ccTwMGoKj5X7c7TvHFgc3Tf4892+5A+iR+D8OaaE6ACe +gdgHcyCoPm/xiDCWP+OZRjpzfj5/2u0i4qQfmIEOsTV9Hw6jZ3Agnh6hiwjDtGYxWvt5TiWEuabN +77YCyRXwO8P8wdzG/8489KwfFBZWI6Vvx76gmlOc03JI1HEfXYZEL4sNFQ3+bqf7e2hdSWQknwKF +ICJjGyDs3fdmnnxubKXebpQYLjPgEt9GTzKkUgTvOoQa1J7N3nv4sR6uvYFLhkXZ+pbCoU3K9bfq +gF7W82tNutRRZExad+k4GYYsCfmEbvizS4jsRr3fdzqjEthpEwm7pmN7OgVzRbrktjrFw1lc0vM8 +V7dyTJ71qlsd7v3KhmHzeJB35pqEOk2pEe5uPeCToNkmedmxcKbIj+MZzjFSsvCmimaMQB1uJJKa ++hoWUi7aEFLvIxKxJavqpggXBIk2hr0608dIgnfG5ZEprqmH0b0YSy6jVXTCuIB+WER4d5BPVy9Q +M4taX0RIlDYxQ2CjBuq78AAcHQf5qoKP8BXHnDnd/+ed5fS+csL4g3eWqECaL+8suy9r8hx7c+4L +EegEWdqAWN1w1NezP34xsxLkvRRI0DRzKOg0U+BKfQY128YlYsbwSczEg2LqKxRmcgiwHdhc9MQJ +IwKQHlgBejWeMGDYYxTOQUiJOmIjJbzIzHH6lAMP+y/fR0v1g4wx4St8fcqTt3gz5wc+xXFZZ3qI +JpXI5iJk7xmNL2tYsDpcqu0375Snd5EKsIvg8u5szTOyZ4v06Ny2TZXRpHUSinh4IFp8Eoi7GINJ +02lPJnS/9jSxolJwp2slPMIEbjleWw3eec4XaetyEnSSqTPRZ9fVA0cPXMqzrPYQQyrRux3LaAh1 +wujbgcObg1nt4iiJ5IMbc/WNPc280I2T4nTkdwG8H6iS5xO2WfsFsruBwf2QkgZlb6w7om2G65Lr +r2Gl4dk63F8rCEHoUJ3fW+pU2Srjlmcbp+JXY3DMifEI22HcHAvT7zzXiMTr7VbUR5a2lZtJkk4k +1heZZFdru8ucCWMTr3Z4eNnjLm7LW7rcN7QjMpxrsCzjxndeyFUX7deIs3PQkgyH8k6luI0uUyLr +va47TBjM4JmNHFzGPcP6BV6cYgQy8VQYZe5GmzZHMxyBYhGiUdekZQ/qwyxC3WGylQGdUpSf9ZCP +a7qPdJd31fPRC0TOgzupO7nLuBGr2A02yuUQwt2KQG31sW8Gd9tQiHq+hPDt4OzJuY4pS8XRsepY +tsd7dVEfJFmc15IYqwHverrpWyS1rFZibDPW1hUUb+85CGUzSBSTK8hpvee/ZxonW51TUXekMy3L +uy25tMTg4mqbSLQQJ+skiQu2toIfBFYrOWql+EQipgfT15P1aq6FDK3xgSjIGWde0BPftYchDTdM +i4QdudHFkN0u6fSKiT09QLv2mtSblt5nNzBR6UReePNs+khE4rHcXuoK21igUKHl1c3MXMgPu7y8 +rKQDxR6N/rffXv+lROXet/9Q+l9I4D1U +""") + +##file distutils-init.py +DISTUTILS_INIT = convert(""" +eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E +UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB +C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss +aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT +0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9 +oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE +NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c +f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8 +p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk +vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw +hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh +cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw +buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ +5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh +gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC +1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL +MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6 +84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK +0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO +kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG +qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h +kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9 +GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ= +""") + +##file distutils.cfg +DISTUTILS_CFG = convert(""" +eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH +xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg +9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q= +""") + +##file activate_this.py +ACTIVATE_THIS = convert(""" +eJyNU01v2zAMvetXEB4K21jnDOstQA4dMGCHbeihlyEIDMWmE62yJEiKE//7kXKdpEWLzYBt8evx +kRSzLPs6wiEoswM8YdMpjUXcq1Dz6RZa1cSiTkJdr86GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe +5a3p0cRKiEe2NtLAFikftnDco0ko/SFEVgEZ8aRCZDIPY9xbA8pE9M4jfW/B2CjiHq9zbJVZuOQq +siwTIvpxKYCembPAU4Muwi/Z4zfvrZ/MXipKeB8C+qisSZYiWfjJfs+0/MFMdWn1hJcO5U7G/SLa +xVx8zU6VG/PXLXvfsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCJN9dzKaoexyB/uH79TnjwvxcW0ntSb +yZ8jq1Z5Q1UXsyy3gf9nbjTEj7NzQMfCJa/YSmrQ+2D/BqfiOi6sclrGzvoeVivIj8rcfcmnIQRF +7XCyeZI7DFe5/lhlCs5PRf5QW66VXT/NrlQ46oD/D6InkOmi3IQcbhKxAX2g4a+Xd5s3UtCtG2py +m8eg6WYWqR6SL5OjKMGfSrYt/6kxxQtOpeAgj1LXBNmpE2ElmCSIy5H0zFd8gJ924HWijWhb2hRC +6wNEm1QdDZtuSZcEprIUBo/XRNcbQe1OUbQ/r3hPTaPJJDNtFLu8KHV5XoNr3Eo6h6YtOKw8e8yw +VF5PnJ+ts3a9/Mz38RpG/AUSzYUW +""") + +##file python-config +PYTHON_CONFIG = convert(""" +eJyNVV1P2zAUfc+v8ODBiSABxlulTipbO6p1LWqBgVhlhcZpPYUkctzSivHfd6+dpGloGH2Ja/ue +e+65Hz78xNhtf3x90xmw7vCWsRPGLvpDNuz87MKfdKMWSWxZ4ilNpCLZJiuWc66SVFUOZkkcirll +rfxIBAzOMtImDzSVPBRrekwoX/OZu/0r4lm0DHiG60g86u8sjPw5rCyy86NRkB8QuuBRSqfAKESn +3orLTCQxE3GYkC9tYp8fk89OSwNsmXgizrhUtnumeSgeo5GbLUMk49Rv+2nK48Cm/qMwfp333J2/ +dVcAGE0CIQHBsgIeEr4Wij0LtWDLzJ9ze5YEvH2WI6CHTAVcSu9ZCsXtgxu81CIvp6/k4eXsdfo7 +PvDCRD75yi41QitfzlcPp1OI7i/1/iQitqnr0iMgQ+A6wa+IKwwdxyk9IiXNAzgquTFU8NIxAVjM +osm1Zz526e+shQ4hKRVci69nPC3Kw4NQEmkQ65E7OodxorSvxjvpBjQHDmWFIQ1mlmzlS5vedseT +/mgIEsMJ7Lxz2bLAF9M5xeLEhdbHxpWOw0GdkJApMVBRF1y+a0z3c9WZPAXGFcFrJgCIB+024uad +0CrzmEoRa3Ub4swNIHPGf7QDV+2uj2OiFWsChgCwjKqN6rp5izpbH6Wc1O1TclQTP/XVwi6anTr1 +1sbubjZLI1+VptPSdCfwnFBrB1jvebrTA9uUhU2/9gad7xPqeFkaQcnnLbCViZK8d7R1kxzFrIJV +8EaLYmKYpvGVkig+3C5HCXbM1jGCGekiM2pRCVPyRyXYdPf6kcbWEQ36F5V4Gq9N7icNNw+JHwRE +LTgxRXACpvnQv/PuT0xCCAywY/K4hE6Now2qDwaSE5FB+1agsoUveYDepS83qFcF1NufvULD3fTl +g6Hgf7WBt6lzMeiyyWVn3P1WVbwaczHmTzE9A5SyItTVgFYyvs/L/fXlaNgbw8v3azT+0eikVlWD +/vBHbzQumP23uBCjsYdrL9OWARwxs/nuLOzeXbPJTa/Xv6sUmQir5pC1YRLz3eA+CD8Z0XpcW8v9 +MZWF36ryyXXf3yBIz6nzqz8Muyz0m5Qj7OexfYo/Ph3LqvkHUg7AuA== +""") + +MH_MAGIC = 0xfeedface +MH_CIGAM = 0xcefaedfe +MH_MAGIC_64 = 0xfeedfacf +MH_CIGAM_64 = 0xcffaedfe +FAT_MAGIC = 0xcafebabe +BIG_ENDIAN = '>' +LITTLE_ENDIAN = '<' +LC_LOAD_DYLIB = 0xc +maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint') + + +class fileview(object): + """ + A proxy for file-like objects that exposes a given view of a file. + Modified from macholib. + """ + + def __init__(self, fileobj, start=0, size=maxint): + if isinstance(fileobj, fileview): + self._fileobj = fileobj._fileobj + else: + self._fileobj = fileobj + self._start = start + self._end = start + size + self._pos = 0 + + def __repr__(self): + return '' % ( + self._start, self._end, self._fileobj) + + def tell(self): + return self._pos + + def _checkwindow(self, seekto, op): + if not (self._start <= seekto <= self._end): + raise IOError("%s to offset %d is outside window [%d, %d]" % ( + op, seekto, self._start, self._end)) + + def seek(self, offset, whence=0): + seekto = offset + if whence == os.SEEK_SET: + seekto += self._start + elif whence == os.SEEK_CUR: + seekto += self._start + self._pos + elif whence == os.SEEK_END: + seekto += self._end + else: + raise IOError("Invalid whence argument to seek: %r" % (whence,)) + self._checkwindow(seekto, 'seek') + self._fileobj.seek(seekto) + self._pos = seekto - self._start + + def write(self, bytes): + here = self._start + self._pos + self._checkwindow(here, 'write') + self._checkwindow(here + len(bytes), 'write') + self._fileobj.seek(here, os.SEEK_SET) + self._fileobj.write(bytes) + self._pos += len(bytes) + + def read(self, size=maxint): + assert size >= 0 + here = self._start + self._pos + self._checkwindow(here, 'read') + size = min(size, self._end - here) + self._fileobj.seek(here, os.SEEK_SET) + bytes = self._fileobj.read(size) + self._pos += len(bytes) + return bytes + + +def read_data(file, endian, num=1): + """ + Read a given number of 32-bits unsigned integers from the given file + with the given endianness. + """ + res = struct.unpack(endian + 'L' * num, file.read(num * 4)) + if len(res) == 1: + return res[0] + return res + + +def mach_o_change(path, what, value): + """ + Replace a given name (what) in any LC_LOAD_DYLIB command found in + the given binary with a new name (value), provided it's shorter. + """ + + def do_macho(file, bits, endian): + # Read Mach-O header (the magic number is assumed read by the caller) + cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6) + # 64-bits header has one more field. + if bits == 64: + read_data(file, endian) + # The header is followed by ncmds commands + for n in range(ncmds): + where = file.tell() + # Read command header + cmd, cmdsize = read_data(file, endian, 2) + if cmd == LC_LOAD_DYLIB: + # The first data field in LC_LOAD_DYLIB commands is the + # offset of the name, starting from the beginning of the + # command. + name_offset = read_data(file, endian) + file.seek(where + name_offset, os.SEEK_SET) + # Read the NUL terminated string + load = file.read(cmdsize - name_offset).decode() + load = load[:load.index('\0')] + # If the string is what is being replaced, overwrite it. + if load == what: + file.seek(where + name_offset, os.SEEK_SET) + file.write(value.encode() + '\0'.encode()) + # Seek to the next command + file.seek(where + cmdsize, os.SEEK_SET) + + def do_file(file, offset=0, size=maxint): + file = fileview(file, offset, size) + # Read magic number + magic = read_data(file, BIG_ENDIAN) + if magic == FAT_MAGIC: + # Fat binaries contain nfat_arch Mach-O binaries + nfat_arch = read_data(file, BIG_ENDIAN) + for n in range(nfat_arch): + # Read arch header + cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5) + do_file(file, offset, size) + elif magic == MH_MAGIC: + do_macho(file, 32, BIG_ENDIAN) + elif magic == MH_CIGAM: + do_macho(file, 32, LITTLE_ENDIAN) + elif magic == MH_MAGIC_64: + do_macho(file, 64, BIG_ENDIAN) + elif magic == MH_CIGAM_64: + do_macho(file, 64, LITTLE_ENDIAN) + + assert(len(what) >= len(value)) + + with open(path, 'r+b') as f: + do_file(f) + + +if __name__ == '__main__': + main() + +# TODO: +# Copy python.exe.manifest +# Monkeypatch distutils.sysconfig diff --git a/src/application/views/admin/__init__.py b/app/lib/virtualenv_support/__init__.py similarity index 100% rename from src/application/views/admin/__init__.py rename to app/lib/virtualenv_support/__init__.py diff --git a/app/lib/virtualenv_support/argparse-1.4.0-py2.py3-none-any.whl b/app/lib/virtualenv_support/argparse-1.4.0-py2.py3-none-any.whl new file mode 100644 index 0000000..dfef51d Binary files /dev/null and b/app/lib/virtualenv_support/argparse-1.4.0-py2.py3-none-any.whl differ diff --git a/app/lib/virtualenv_support/pip-9.0.1-py2.py3-none-any.whl b/app/lib/virtualenv_support/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index 0000000..4b8ecc6 Binary files /dev/null and b/app/lib/virtualenv_support/pip-9.0.1-py2.py3-none-any.whl differ diff --git a/app/lib/virtualenv_support/setuptools-28.8.0-py2.py3-none-any.whl b/app/lib/virtualenv_support/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index 0000000..502e3cb Binary files /dev/null and b/app/lib/virtualenv_support/setuptools-28.8.0-py2.py3-none-any.whl differ diff --git a/app/lib/virtualenv_support/wheel-0.29.0-py2.py3-none-any.whl b/app/lib/virtualenv_support/wheel-0.29.0-py2.py3-none-any.whl new file mode 100644 index 0000000..506d5e5 Binary files /dev/null and b/app/lib/virtualenv_support/wheel-0.29.0-py2.py3-none-any.whl differ diff --git a/app/lib/werkzeug/__init__.py b/app/lib/werkzeug/__init__.py new file mode 100644 index 0000000..ac216f0 --- /dev/null +++ b/app/lib/werkzeug/__init__.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +""" + werkzeug + ~~~~~~~~ + + Werkzeug is the Swiss Army knife of Python web development. + + It provides useful classes and functions for any WSGI application to make + the life of a python web developer much easier. All of the provided + classes are independent from each other so you can mix it with any other + library. + + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +from types import ModuleType +import sys + +from werkzeug._compat import iteritems + +__version__ = '0.12.1' + + +# This import magic raises concerns quite often which is why the implementation +# and motivation is explained here in detail now. +# +# The majority of the functions and classes provided by Werkzeug work on the +# HTTP and WSGI layer. There is no useful grouping for those which is why +# they are all importable from "werkzeug" instead of the modules where they are +# implemented. The downside of that is, that now everything would be loaded at +# once, even if unused. +# +# The implementation of a lazy-loading module in this file replaces the +# werkzeug package when imported from within. Attribute access to the werkzeug +# module will then lazily import from the modules that implement the objects. + + +# import mapping to objects in other modules +all_by_module = { + 'werkzeug.debug': ['DebuggedApplication'], + 'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', 'LocalStack', + 'release_local'], + 'werkzeug.serving': ['run_simple'], + 'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ', + 'run_wsgi_app'], + 'werkzeug.testapp': ['test_app'], + 'werkzeug.exceptions': ['abort', 'Aborter'], + 'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote', + 'url_quote_plus', 'url_unquote', 'url_unquote_plus', + 'url_fix', 'Href', 'iri_to_uri', 'uri_to_iri'], + 'werkzeug.formparser': ['parse_form_data'], + 'werkzeug.utils': ['escape', 'environ_property', 'append_slash_redirect', + 'redirect', 'cached_property', 'import_string', + 'dump_cookie', 'parse_cookie', 'unescape', + 'format_string', 'find_modules', 'header_property', + 'html', 'xhtml', 'HTMLBuilder', 'validate_arguments', + 'ArgumentValidationError', 'bind_arguments', + 'secure_filename'], + 'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info', + 'peek_path_info', 'SharedDataMiddleware', + 'DispatcherMiddleware', 'ClosingIterator', 'FileWrapper', + 'make_line_iter', 'LimitedStream', 'responder', + 'wrap_file', 'extract_path_info'], + 'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers', + 'EnvironHeaders', 'ImmutableList', + 'ImmutableDict', 'ImmutableMultiDict', + 'TypeConversionDict', + 'ImmutableTypeConversionDict', 'Accept', + 'MIMEAccept', 'CharsetAccept', + 'LanguageAccept', 'RequestCacheControl', + 'ResponseCacheControl', 'ETags', 'HeaderSet', + 'WWWAuthenticate', 'Authorization', + 'FileMultiDict', 'CallbackDict', 'FileStorage', + 'OrderedMultiDict', 'ImmutableOrderedMultiDict' + ], + 'werkzeug.useragents': ['UserAgent'], + 'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', 'cookie_date', + 'parse_cache_control_header', 'is_resource_modified', + 'parse_accept_header', 'parse_set_header', 'quote_etag', + 'unquote_etag', 'generate_etag', 'dump_header', + 'parse_list_header', 'parse_dict_header', + 'parse_authorization_header', + 'parse_www_authenticate_header', 'remove_entity_headers', + 'is_entity_header', 'remove_hop_by_hop_headers', + 'parse_options_header', 'dump_options_header', + 'is_hop_by_hop_header', 'unquote_header_value', + 'quote_header_value', 'HTTP_STATUS_CODES'], + 'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', 'Response', + 'AcceptMixin', 'ETagRequestMixin', + 'ETagResponseMixin', 'ResponseStreamMixin', + 'CommonResponseDescriptorsMixin', 'UserAgentMixin', + 'AuthorizationMixin', 'WWWAuthenticateMixin', + 'CommonRequestDescriptorsMixin'], + 'werkzeug.security': ['generate_password_hash', 'check_password_hash'], + # the undocumented easteregg ;-) + 'werkzeug._internal': ['_easteregg'] +} + +# modules that should be imported when accessed as attributes of werkzeug +attribute_modules = frozenset(['exceptions', 'routing', 'script']) + + +object_origins = {} +for module, items in iteritems(all_by_module): + for item in items: + object_origins[item] = module + + +class module(ModuleType): + + """Automatically import objects from the modules.""" + + def __getattr__(self, name): + if name in object_origins: + module = __import__(object_origins[name], None, None, [name]) + for extra_name in all_by_module[module.__name__]: + setattr(self, extra_name, getattr(module, extra_name)) + return getattr(module, name) + elif name in attribute_modules: + __import__('werkzeug.' + name) + return ModuleType.__getattribute__(self, name) + + def __dir__(self): + """Just show what we want to show.""" + result = list(new_module.__all__) + result.extend(('__file__', '__path__', '__doc__', '__all__', + '__docformat__', '__name__', '__path__', + '__package__', '__version__')) + return result + +# keep a reference to this module so that it's not garbage collected +old_module = sys.modules['werkzeug'] + + +# setup the new module and patch it into the dict of loaded modules +new_module = sys.modules['werkzeug'] = module('werkzeug') +new_module.__dict__.update({ + '__file__': __file__, + '__package__': 'werkzeug', + '__path__': __path__, + '__doc__': __doc__, + '__version__': __version__, + '__all__': tuple(object_origins) + tuple(attribute_modules), + '__docformat__': 'restructuredtext en' +}) + + +# Due to bootstrapping issues we need to import exceptions here. +# Don't ask :-( +__import__('werkzeug.exceptions') diff --git a/app/lib/werkzeug/_compat.py b/app/lib/werkzeug/_compat.py new file mode 100644 index 0000000..fda038b --- /dev/null +++ b/app/lib/werkzeug/_compat.py @@ -0,0 +1,206 @@ +# flake8: noqa +# This whole file is full of lint errors +import codecs +import sys +import operator +import functools +import warnings + +try: + import builtins +except ImportError: + import __builtin__ as builtins + + +PY2 = sys.version_info[0] == 2 +WIN = sys.platform.startswith('win') + +_identity = lambda x: x + +if PY2: + unichr = unichr + text_type = unicode + string_types = (str, unicode) + integer_types = (int, long) + + iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs) + itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs) + iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs) + + iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs) + iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs) + + int_to_byte = chr + iter_bytes = iter + + exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') + + def fix_tuple_repr(obj): + def __repr__(self): + cls = self.__class__ + return '%s(%s)' % (cls.__name__, ', '.join( + '%s=%r' % (field, self[index]) + for index, field in enumerate(cls._fields) + )) + obj.__repr__ = __repr__ + return obj + + def implements_iterator(cls): + cls.next = cls.__next__ + del cls.__next__ + return cls + + def implements_to_string(cls): + cls.__unicode__ = cls.__str__ + cls.__str__ = lambda x: x.__unicode__().encode('utf-8') + return cls + + def native_string_result(func): + def wrapper(*args, **kwargs): + return func(*args, **kwargs).encode('utf-8') + return functools.update_wrapper(wrapper, func) + + def implements_bool(cls): + cls.__nonzero__ = cls.__bool__ + del cls.__bool__ + return cls + + from itertools import imap, izip, ifilter + range_type = xrange + + from StringIO import StringIO + from cStringIO import StringIO as BytesIO + NativeStringIO = BytesIO + + def make_literal_wrapper(reference): + return _identity + + def normalize_string_tuple(tup): + """Normalizes a string tuple to a common type. Following Python 2 + rules, upgrades to unicode are implicit. + """ + if any(isinstance(x, text_type) for x in tup): + return tuple(to_unicode(x) for x in tup) + return tup + + def try_coerce_native(s): + """Try to coerce a unicode string to native if possible. Otherwise, + leave it as unicode. + """ + try: + return to_native(s) + except UnicodeError: + return s + + wsgi_get_bytes = _identity + + def wsgi_decoding_dance(s, charset='utf-8', errors='replace'): + return s.decode(charset, errors) + + def wsgi_encoding_dance(s, charset='utf-8', errors='replace'): + if isinstance(s, bytes): + return s + return s.encode(charset, errors) + + def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'): + if x is None: + return None + if isinstance(x, (bytes, bytearray, buffer)): + return bytes(x) + if isinstance(x, unicode): + return x.encode(charset, errors) + raise TypeError('Expected bytes') + + def to_native(x, charset=sys.getdefaultencoding(), errors='strict'): + if x is None or isinstance(x, str): + return x + return x.encode(charset, errors) + +else: + unichr = chr + text_type = str + string_types = (str, ) + integer_types = (int, ) + + iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs)) + itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs)) + iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs)) + + iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs)) + iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs)) + + int_to_byte = operator.methodcaller('to_bytes', 1, 'big') + iter_bytes = functools.partial(map, int_to_byte) + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + fix_tuple_repr = _identity + implements_iterator = _identity + implements_to_string = _identity + implements_bool = _identity + native_string_result = _identity + imap = map + izip = zip + ifilter = filter + range_type = range + + from io import StringIO, BytesIO + NativeStringIO = StringIO + + _latin1_encode = operator.methodcaller('encode', 'latin1') + + def make_literal_wrapper(reference): + if isinstance(reference, text_type): + return _identity + return _latin1_encode + + def normalize_string_tuple(tup): + """Ensures that all types in the tuple are either strings + or bytes. + """ + tupiter = iter(tup) + is_text = isinstance(next(tupiter, None), text_type) + for arg in tupiter: + if isinstance(arg, text_type) != is_text: + raise TypeError('Cannot mix str and bytes arguments (got %s)' + % repr(tup)) + return tup + + try_coerce_native = _identity + wsgi_get_bytes = _latin1_encode + + def wsgi_decoding_dance(s, charset='utf-8', errors='replace'): + return s.encode('latin1').decode(charset, errors) + + def wsgi_encoding_dance(s, charset='utf-8', errors='replace'): + if isinstance(s, text_type): + s = s.encode(charset) + return s.decode('latin1', errors) + + def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'): + if x is None: + return None + if isinstance(x, (bytes, bytearray, memoryview)): # noqa + return bytes(x) + if isinstance(x, str): + return x.encode(charset, errors) + raise TypeError('Expected bytes') + + def to_native(x, charset=sys.getdefaultencoding(), errors='strict'): + if x is None or isinstance(x, str): + return x + return x.decode(charset, errors) + + +def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict', + allow_none_charset=False): + if x is None: + return None + if not isinstance(x, bytes): + return text_type(x) + if charset is None and allow_none_charset: + return x + return x.decode(charset, errors) diff --git a/app/lib/werkzeug/_internal.py b/app/lib/werkzeug/_internal.py new file mode 100644 index 0000000..3d1ee09 --- /dev/null +++ b/app/lib/werkzeug/_internal.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +""" + werkzeug._internal + ~~~~~~~~~~~~~~~~~~ + + This module provides internally used helpers and constants. + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import re +import string +import inspect +from weakref import WeakKeyDictionary +from datetime import datetime, date +from itertools import chain + +from werkzeug._compat import iter_bytes, text_type, BytesIO, int_to_byte, \ + range_type, integer_types + + +_logger = None +_empty_stream = BytesIO() +_signature_cache = WeakKeyDictionary() +_epoch_ord = date(1970, 1, 1).toordinal() +_cookie_params = set((b'expires', b'path', b'comment', + b'max-age', b'secure', b'httponly', + b'version')) +_legal_cookie_chars = (string.ascii_letters + + string.digits + + u"!#$%&'*+-.^_`|~:").encode('ascii') + +_cookie_quoting_map = { + b',': b'\\054', + b';': b'\\073', + b'"': b'\\"', + b'\\': b'\\\\', +} +for _i in chain(range_type(32), range_type(127, 256)): + _cookie_quoting_map[int_to_byte(_i)] = ('\\%03o' % _i).encode('latin1') + + +_octal_re = re.compile(b'\\\\[0-3][0-7][0-7]') +_quote_re = re.compile(b'[\\\\].') +_legal_cookie_chars_re = b'[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]' +_cookie_re = re.compile(b""" + (?P[^=]+) + \s*=\s* + (?P + "(?:[^\\\\"]|\\\\.)*" | + (?:.*?) + ) + \s*; +""", flags=re.VERBOSE) + + +class _Missing(object): + + def __repr__(self): + return 'no value' + + def __reduce__(self): + return '_missing' + +_missing = _Missing() + + +def _get_environ(obj): + env = getattr(obj, 'environ', obj) + assert isinstance(env, dict), \ + '%r is not a WSGI environment (has to be a dict)' % type(obj).__name__ + return env + + +def _log(type, message, *args, **kwargs): + """Log into the internal werkzeug logger.""" + global _logger + if _logger is None: + import logging + _logger = logging.getLogger('werkzeug') + # Only set up a default log handler if the + # end-user application didn't set anything up. + if not logging.root.handlers and _logger.level == logging.NOTSET: + _logger.setLevel(logging.INFO) + handler = logging.StreamHandler() + _logger.addHandler(handler) + getattr(_logger, type)(message.rstrip(), *args, **kwargs) + + +def _parse_signature(func): + """Return a signature object for the function.""" + if hasattr(func, 'im_func'): + func = func.im_func + + # if we have a cached validator for this function, return it + parse = _signature_cache.get(func) + if parse is not None: + return parse + + # inspect the function signature and collect all the information + if hasattr(inspect, 'getfullargspec'): + tup = inspect.getfullargspec(func) + else: + tup = inspect.getargspec(func) + positional, vararg_var, kwarg_var, defaults = tup[:4] + defaults = defaults or () + arg_count = len(positional) + arguments = [] + for idx, name in enumerate(positional): + if isinstance(name, list): + raise TypeError('cannot parse functions that unpack tuples ' + 'in the function signature') + try: + default = defaults[idx - arg_count] + except IndexError: + param = (name, False, None) + else: + param = (name, True, default) + arguments.append(param) + arguments = tuple(arguments) + + def parse(args, kwargs): + new_args = [] + missing = [] + extra = {} + + # consume as many arguments as positional as possible + for idx, (name, has_default, default) in enumerate(arguments): + try: + new_args.append(args[idx]) + except IndexError: + try: + new_args.append(kwargs.pop(name)) + except KeyError: + if has_default: + new_args.append(default) + else: + missing.append(name) + else: + if name in kwargs: + extra[name] = kwargs.pop(name) + + # handle extra arguments + extra_positional = args[arg_count:] + if vararg_var is not None: + new_args.extend(extra_positional) + extra_positional = () + if kwargs and kwarg_var is None: + extra.update(kwargs) + kwargs = {} + + return new_args, kwargs, missing, extra, extra_positional, \ + arguments, vararg_var, kwarg_var + _signature_cache[func] = parse + return parse + + +def _date_to_unix(arg): + """Converts a timetuple, integer or datetime object into the seconds from + epoch in utc. + """ + if isinstance(arg, datetime): + arg = arg.utctimetuple() + elif isinstance(arg, integer_types + (float,)): + return int(arg) + year, month, day, hour, minute, second = arg[:6] + days = date(year, month, 1).toordinal() - _epoch_ord + day - 1 + hours = days * 24 + hour + minutes = hours * 60 + minute + seconds = minutes * 60 + second + return seconds + + +class _DictAccessorProperty(object): + + """Baseclass for `environ_property` and `header_property`.""" + read_only = False + + def __init__(self, name, default=None, load_func=None, dump_func=None, + read_only=None, doc=None): + self.name = name + self.default = default + self.load_func = load_func + self.dump_func = dump_func + if read_only is not None: + self.read_only = read_only + self.__doc__ = doc + + def __get__(self, obj, type=None): + if obj is None: + return self + storage = self.lookup(obj) + if self.name not in storage: + return self.default + rv = storage[self.name] + if self.load_func is not None: + try: + rv = self.load_func(rv) + except (ValueError, TypeError): + rv = self.default + return rv + + def __set__(self, obj, value): + if self.read_only: + raise AttributeError('read only property') + if self.dump_func is not None: + value = self.dump_func(value) + self.lookup(obj)[self.name] = value + + def __delete__(self, obj): + if self.read_only: + raise AttributeError('read only property') + self.lookup(obj).pop(self.name, None) + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, + self.name + ) + + +def _cookie_quote(b): + buf = bytearray() + all_legal = True + _lookup = _cookie_quoting_map.get + _push = buf.extend + + for char in iter_bytes(b): + if char not in _legal_cookie_chars: + all_legal = False + char = _lookup(char, char) + _push(char) + + if all_legal: + return bytes(buf) + return bytes(b'"' + buf + b'"') + + +def _cookie_unquote(b): + if len(b) < 2: + return b + if b[:1] != b'"' or b[-1:] != b'"': + return b + + b = b[1:-1] + + i = 0 + n = len(b) + rv = bytearray() + _push = rv.extend + + while 0 <= i < n: + o_match = _octal_re.search(b, i) + q_match = _quote_re.search(b, i) + if not o_match and not q_match: + rv.extend(b[i:]) + break + j = k = -1 + if o_match: + j = o_match.start(0) + if q_match: + k = q_match.start(0) + if q_match and (not o_match or k < j): + _push(b[i:k]) + _push(b[k + 1:k + 2]) + i = k + 2 + else: + _push(b[i:j]) + rv.append(int(b[j + 1:j + 4], 8)) + i = j + 4 + + return bytes(rv) + + +def _cookie_parse_impl(b): + """Lowlevel cookie parsing facility that operates on bytes.""" + i = 0 + n = len(b) + + while i < n: + match = _cookie_re.search(b + b';', i) + if not match: + break + + key = match.group('key').strip() + value = match.group('val') + i = match.end(0) + + # Ignore parameters. We have no interest in them. + if key.lower() not in _cookie_params: + yield _cookie_unquote(key), _cookie_unquote(value) + + +def _encode_idna(domain): + # If we're given bytes, make sure they fit into ASCII + if not isinstance(domain, text_type): + domain.decode('ascii') + return domain + + # Otherwise check if it's already ascii, then return + try: + return domain.encode('ascii') + except UnicodeError: + pass + + # Otherwise encode each part separately + parts = domain.split('.') + for idx, part in enumerate(parts): + parts[idx] = part.encode('idna') + return b'.'.join(parts) + + +def _decode_idna(domain): + # If the input is a string try to encode it to ascii to + # do the idna decoding. if that fails because of an + # unicode error, then we already have a decoded idna domain + if isinstance(domain, text_type): + try: + domain = domain.encode('ascii') + except UnicodeError: + return domain + + # Decode each part separately. If a part fails, try to + # decode it with ascii and silently ignore errors. This makes + # most sense because the idna codec does not have error handling + parts = domain.split(b'.') + for idx, part in enumerate(parts): + try: + parts[idx] = part.decode('idna') + except UnicodeError: + parts[idx] = part.decode('ascii', 'ignore') + + return '.'.join(parts) + + +def _make_cookie_domain(domain): + if domain is None: + return None + domain = _encode_idna(domain) + if b':' in domain: + domain = domain.split(b':', 1)[0] + if b'.' in domain: + return domain + raise ValueError( + 'Setting \'domain\' for a cookie on a server running locally (ex: ' + 'localhost) is not supported by complying browsers. You should ' + 'have something like: \'127.0.0.1 localhost dev.localhost\' on ' + 'your hosts file and then point your server to run on ' + '\'dev.localhost\' and also set \'domain\' for \'dev.localhost\'' + ) + + +def _easteregg(app=None): + """Like the name says. But who knows how it works?""" + def bzzzzzzz(gyver): + import base64 + import zlib + return zlib.decompress(base64.b64decode(gyver)).decode('ascii') + gyver = u'\n'.join([x + (77 - len(x)) * u' ' for x in bzzzzzzz(b''' +eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m +9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz +4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY +jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc +q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5 +jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317 +8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ +v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r +XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu +LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk +iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI +tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE +1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI +GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN +Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A +QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG +8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu +jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz +DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8 +MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4 +GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i +RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi +Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c +NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS +pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ +sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm +p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ +krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/ +nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt +mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p +7f2zLkGNv8b191cD/3vs9Q833z8t''').splitlines()]) + + def easteregged(environ, start_response): + def injecting_start_response(status, headers, exc_info=None): + headers.append(('X-Powered-By', 'Werkzeug')) + return start_response(status, headers, exc_info) + if app is not None and environ.get('QUERY_STRING') != 'macgybarchakku': + return app(environ, injecting_start_response) + injecting_start_response('200 OK', [('Content-Type', 'text/html')]) + return [(u''' + + + +About Werkzeug + + + +

Werkzeug

+

the Swiss Army knife of Python web development.

+
%s\n\n\n
+ +''' % gyver).encode('latin1')] + return easteregged diff --git a/app/lib/werkzeug/_reloader.py b/app/lib/werkzeug/_reloader.py new file mode 100644 index 0000000..c4ca271 --- /dev/null +++ b/app/lib/werkzeug/_reloader.py @@ -0,0 +1,267 @@ +import os +import sys +import time +import subprocess +import threading +from itertools import chain + +from werkzeug._internal import _log +from werkzeug._compat import PY2, iteritems, text_type + + +def _iter_module_files(): + """This iterates over all relevant Python files. It goes through all + loaded files from modules, all files in folders of already loaded modules + as well as all files reachable through a package. + """ + # The list call is necessary on Python 3 in case the module + # dictionary modifies during iteration. + for module in list(sys.modules.values()): + if module is None: + continue + filename = getattr(module, '__file__', None) + if filename: + old = None + while not os.path.isfile(filename): + old = filename + filename = os.path.dirname(filename) + if filename == old: + break + else: + if filename[-4:] in ('.pyc', '.pyo'): + filename = filename[:-1] + yield filename + + +def _find_observable_paths(extra_files=None): + """Finds all paths that should be observed.""" + rv = set(os.path.abspath(x) for x in sys.path) + + for filename in extra_files or (): + rv.add(os.path.dirname(os.path.abspath(filename))) + + for module in list(sys.modules.values()): + fn = getattr(module, '__file__', None) + if fn is None: + continue + fn = os.path.abspath(fn) + rv.add(os.path.dirname(fn)) + + return _find_common_roots(rv) + + +def _get_args_for_reloading(): + """Returns the executable. This contains a workaround for windows + if the executable is incorrectly reported to not have the .exe + extension which can cause bugs on reloading. + """ + rv = [sys.executable] + py_script = sys.argv[0] + if os.name == 'nt' and not os.path.exists(py_script) and \ + os.path.exists(py_script + '.exe'): + py_script += '.exe' + rv.append(py_script) + rv.extend(sys.argv[1:]) + return rv + + +def _find_common_roots(paths): + """Out of some paths it finds the common roots that need monitoring.""" + paths = [x.split(os.path.sep) for x in paths] + root = {} + for chunks in sorted(paths, key=len, reverse=True): + node = root + for chunk in chunks: + node = node.setdefault(chunk, {}) + node.clear() + + rv = set() + + def _walk(node, path): + for prefix, child in iteritems(node): + _walk(child, path + (prefix,)) + if not node: + rv.add('/'.join(path)) + _walk(root, ()) + return rv + + +class ReloaderLoop(object): + name = None + + # monkeypatched by testsuite. wrapping with `staticmethod` is required in + # case time.sleep has been replaced by a non-c function (e.g. by + # `eventlet.monkey_patch`) before we get here + _sleep = staticmethod(time.sleep) + + def __init__(self, extra_files=None, interval=1): + self.extra_files = set(os.path.abspath(x) + for x in extra_files or ()) + self.interval = interval + + def run(self): + pass + + def restart_with_reloader(self): + """Spawn a new Python interpreter with the same arguments as this one, + but running the reloader thread. + """ + while 1: + _log('info', ' * Restarting with %s' % self.name) + args = _get_args_for_reloading() + new_environ = os.environ.copy() + new_environ['WERKZEUG_RUN_MAIN'] = 'true' + + # a weird bug on windows. sometimes unicode strings end up in the + # environment and subprocess.call does not like this, encode them + # to latin1 and continue. + if os.name == 'nt' and PY2: + for key, value in iteritems(new_environ): + if isinstance(value, text_type): + new_environ[key] = value.encode('iso-8859-1') + + exit_code = subprocess.call(args, env=new_environ, + close_fds=False) + if exit_code != 3: + return exit_code + + def trigger_reload(self, filename): + self.log_reload(filename) + sys.exit(3) + + def log_reload(self, filename): + filename = os.path.abspath(filename) + _log('info', ' * Detected change in %r, reloading' % filename) + + +class StatReloaderLoop(ReloaderLoop): + name = 'stat' + + def run(self): + mtimes = {} + while 1: + for filename in chain(_iter_module_files(), + self.extra_files): + try: + mtime = os.stat(filename).st_mtime + except OSError: + continue + + old_time = mtimes.get(filename) + if old_time is None: + mtimes[filename] = mtime + continue + elif mtime > old_time: + self.trigger_reload(filename) + self._sleep(self.interval) + + +class WatchdogReloaderLoop(ReloaderLoop): + + def __init__(self, *args, **kwargs): + ReloaderLoop.__init__(self, *args, **kwargs) + from watchdog.observers import Observer + from watchdog.events import FileSystemEventHandler + self.observable_paths = set() + + def _check_modification(filename): + if filename in self.extra_files: + self.trigger_reload(filename) + dirname = os.path.dirname(filename) + if dirname.startswith(tuple(self.observable_paths)): + if filename.endswith(('.pyc', '.pyo')): + self.trigger_reload(filename[:-1]) + elif filename.endswith('.py'): + self.trigger_reload(filename) + + class _CustomHandler(FileSystemEventHandler): + + def on_created(self, event): + _check_modification(event.src_path) + + def on_modified(self, event): + _check_modification(event.src_path) + + def on_moved(self, event): + _check_modification(event.src_path) + _check_modification(event.dest_path) + + def on_deleted(self, event): + _check_modification(event.src_path) + + reloader_name = Observer.__name__.lower() + if reloader_name.endswith('observer'): + reloader_name = reloader_name[:-8] + reloader_name += ' reloader' + + self.name = reloader_name + + self.observer_class = Observer + self.event_handler = _CustomHandler() + self.should_reload = False + + def trigger_reload(self, filename): + # This is called inside an event handler, which means throwing + # SystemExit has no effect. + # https://github.com/gorakhargosh/watchdog/issues/294 + self.should_reload = True + self.log_reload(filename) + + def run(self): + watches = {} + observer = self.observer_class() + observer.start() + + while not self.should_reload: + to_delete = set(watches) + paths = _find_observable_paths(self.extra_files) + for path in paths: + if path not in watches: + try: + watches[path] = observer.schedule( + self.event_handler, path, recursive=True) + except OSError: + # Clear this path from list of watches We don't want + # the same error message showing again in the next + # iteration. + watches[path] = None + to_delete.discard(path) + for path in to_delete: + watch = watches.pop(path, None) + if watch is not None: + observer.unschedule(watch) + self.observable_paths = paths + self._sleep(self.interval) + + sys.exit(3) + + +reloader_loops = { + 'stat': StatReloaderLoop, + 'watchdog': WatchdogReloaderLoop, +} + +try: + __import__('watchdog.observers') +except ImportError: + reloader_loops['auto'] = reloader_loops['stat'] +else: + reloader_loops['auto'] = reloader_loops['watchdog'] + + +def run_with_reloader(main_func, extra_files=None, interval=1, + reloader_type='auto'): + """Run the given function in an independent python interpreter.""" + import signal + reloader = reloader_loops[reloader_type](extra_files, interval) + signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) + try: + if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': + t = threading.Thread(target=main_func, args=()) + t.setDaemon(True) + t.start() + reloader.run() + else: + sys.exit(reloader.restart_with_reloader()) + except KeyboardInterrupt: + pass diff --git a/app/lib/werkzeug/contrib/__init__.py b/app/lib/werkzeug/contrib/__init__.py new file mode 100644 index 0000000..3e572eb --- /dev/null +++ b/app/lib/werkzeug/contrib/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.contrib + ~~~~~~~~~~~~~~~~ + + Contains user-submitted code that other users may find useful, but which + is not part of the Werkzeug core. Anyone can write code for inclusion in + the `contrib` package. All modules in this package are distributed as an + add-on library and thus are not part of Werkzeug itself. + + This file itself is mostly for informational purposes and to tell the + Python interpreter that `contrib` is a package. + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" diff --git a/src/lib/werkzeug/contrib/atom.py b/app/lib/werkzeug/contrib/atom.py similarity index 87% rename from src/lib/werkzeug/contrib/atom.py rename to app/lib/werkzeug/contrib/atom.py index 7aaa2fb..51d1a4e 100644 --- a/src/lib/werkzeug/contrib/atom.py +++ b/app/lib/werkzeug/contrib/atom.py @@ -18,12 +18,14 @@ def atom_feed(request): updated=post.last_update, published=post.pub_date) return feed.get_response() - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from datetime import datetime + from werkzeug.utils import escape from werkzeug.wrappers import BaseResponse +from werkzeug._compat import implements_to_string, string_types XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml' @@ -42,10 +44,15 @@ def _make_text_block(name, content, content_type=None): def format_iso8601(obj): """Format a datetime object for iso8601""" - return obj.strftime('%Y-%m-%dT%H:%M:%SZ') + iso8601 = obj.isoformat() + if obj.tzinfo: + return iso8601 + return iso8601 + 'Z' +@implements_to_string class AtomFeed(object): + """A helper class that creates Atom feeds. :param title: the title of the feed. Required. @@ -58,6 +65,7 @@ class AtomFeed(object): :param updated: the time the feed was modified the last time. Must be a :class:`datetime.datetime` object. If not present the latest entry's `updated` is used. + Treated as UTC if naive datetime. :param feed_url: the URL to the feed. Should be the URL that was requested. :param author: the author of the feed. Must be either a string (the @@ -115,7 +123,7 @@ def __init__(self, title=None, entries=None, **kwargs): self.entries = entries and list(entries) or [] if not hasattr(self.author, '__iter__') \ - or isinstance(self.author, (basestring, dict)): + or isinstance(self.author, string_types + (dict,)): self.author = [self.author] for i, author in enumerate(self.author): if not isinstance(author, dict): @@ -151,7 +159,7 @@ def generate(self): """Return a generator that yields pieces of XML.""" # atom demands either an author element in every entry or a global one if not self.author: - if False in map(lambda e: bool(e.author), self.entries): + if any(not e.author for e in self.entries): self.author = ({'name': 'Unknown author'},) if not self.updated: @@ -164,13 +172,13 @@ def generate(self): yield u' %s\n' % escape(self.id) yield u' %s\n' % format_iso8601(self.updated) if self.url: - yield u' \n' % escape(self.url, True) + yield u' \n' % escape(self.url) if self.feed_url: yield u' \n' % \ - escape(self.feed_url, True) + escape(self.feed_url) for link in self.links: - yield u' \n' % ''.join('%s="%s" ' % \ - (k, escape(link[k], True)) for k in link) + yield u' \n' % ''.join('%s="%s" ' % + (k, escape(link[k])) for k in link) for author in self.author: yield u' \n' yield u' %s\n' % escape(author['name']) @@ -193,9 +201,9 @@ def generate(self): if generator_name or generator_url or generator_version: tmp = [u' %s\n' % escape(generator_name)) yield u''.join(tmp) for entry in self.entries: @@ -215,14 +223,13 @@ def __call__(self, environ, start_response): """Use the class as WSGI response object.""" return self.get_response()(environ, start_response) - def __unicode__(self): - return self.to_string() - def __str__(self): - return self.to_string().encode('utf-8') + return self.to_string() +@implements_to_string class FeedEntry(object): + """Represents a single entry in a feed. :param title: the title of the entry. Required. @@ -238,15 +245,17 @@ class FeedEntry(object): :param id: a globally unique id for the entry. Must be an URI. If not present the URL is used, but one of both is required. :param updated: the time the entry was modified the last time. Must - be a :class:`datetime.datetime` object. Required. - :param author: the author of the feed. Must be either a string (the + be a :class:`datetime.datetime` object. Treated as + UTC if naive datetime. Required. + :param author: the author of the entry. Must be either a string (the name) or a dict with name (required) and uri or email (both optional). Can be a list of (may be mixed, too) strings and dicts, too, if there are - multiple authors. Required if not every entry has an + multiple authors. Required if the feed does not have an author element. :param published: the time the entry was initially published. Must - be a :class:`datetime.datetime` object. + be a :class:`datetime.datetime` object. Treated as + UTC if naive datetime. :param rights: copyright information for the entry. :param rights_type: the type attribute for the rights element. One of ``'html'``, ``'text'`` or ``'xhtml'``. Default is @@ -254,6 +263,8 @@ class FeedEntry(object): :param links: additional links. Must be a list of dictionaries with href (required) and rel, type, hreflang, title, length (all optional) + :param categories: categories for the entry. Must be a list of dictionaries + with term (required), scheme and label (all optional) :param xml_base: The xml base (url) for this feed item. If not provided it will default to the item url. @@ -273,14 +284,15 @@ def __init__(self, title=None, content=None, feed_url=None, **kwargs): self.updated = kwargs.get('updated') self.summary = kwargs.get('summary') self.summary_type = kwargs.get('summary_type', 'html') - self.author = kwargs.get('author') + self.author = kwargs.get('author', ()) self.published = kwargs.get('published') self.rights = kwargs.get('rights') self.links = kwargs.get('links', []) + self.categories = kwargs.get('categories', []) self.xml_base = kwargs.get('xml_base', feed_url) if not hasattr(self.author, '__iter__') \ - or isinstance(self.author, (basestring, dict)): + or isinstance(self.author, string_types + (dict,)): self.author = [self.author] for i, author in enumerate(self.author): if not isinstance(author, dict): @@ -303,7 +315,7 @@ def generate(self): """Yields pieces of ATOM XML.""" base = '' if self.xml_base: - base = ' xml:base="%s"' % escape(self.xml_base, True) + base = ' xml:base="%s"' % escape(self.xml_base) yield u'\n' % base yield u' ' + _make_text_block('title', self.title, self.title_type) yield u' %s\n' % escape(self.id) @@ -322,8 +334,11 @@ def generate(self): yield u' %s\n' % escape(author['email']) yield u' \n' for link in self.links: - yield u' \n' % ''.join('%s="%s" ' % \ - (k, escape(link[k], True)) for k in link) + yield u' \n' % ''.join('%s="%s" ' % + (k, escape(link[k])) for k in link) + for category in self.categories: + yield u' \n' % ''.join('%s="%s" ' % + (k, escape(category[k])) for k in category) if self.summary: yield u' ' + _make_text_block('summary', self.summary, self.summary_type) @@ -336,8 +351,5 @@ def to_string(self): """Convert the feed item into a unicode object.""" return u''.join(self.generate()) - def __unicode__(self): - return self.to_string() - def __str__(self): - return self.to_string().encode('utf-8') + return self.to_string() diff --git a/app/lib/werkzeug/contrib/cache.py b/app/lib/werkzeug/contrib/cache.py new file mode 100644 index 0000000..170b8cd --- /dev/null +++ b/app/lib/werkzeug/contrib/cache.py @@ -0,0 +1,858 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.contrib.cache + ~~~~~~~~~~~~~~~~~~~~~~ + + The main problem with dynamic Web sites is, well, they're dynamic. Each + time a user requests a page, the webserver executes a lot of code, queries + the database, renders templates until the visitor gets the page he sees. + + This is a lot more expensive than just loading a file from the file system + and sending it to the visitor. + + For most Web applications, this overhead isn't a big deal but once it + becomes, you will be glad to have a cache system in place. + + How Caching Works + ================= + + Caching is pretty simple. Basically you have a cache object lurking around + somewhere that is connected to a remote cache or the file system or + something else. When the request comes in you check if the current page + is already in the cache and if so, you're returning it from the cache. + Otherwise you generate the page and put it into the cache. (Or a fragment + of the page, you don't have to cache the full thing) + + Here is a simple example of how to cache a sidebar for 5 minutes:: + + def get_sidebar(user): + identifier = 'sidebar_for/user%d' % user.id + value = cache.get(identifier) + if value is not None: + return value + value = generate_sidebar_for(user=user) + cache.set(identifier, value, timeout=60 * 5) + return value + + Creating a Cache Object + ======================= + + To create a cache object you just import the cache system of your choice + from the cache module and instantiate it. Then you can start working + with that object: + + >>> from werkzeug.contrib.cache import SimpleCache + >>> c = SimpleCache() + >>> c.set("foo", "value") + >>> c.get("foo") + 'value' + >>> c.get("missing") is None + True + + Please keep in mind that you have to create the cache and put it somewhere + you have access to it (either as a module global you can import or you just + put it into your WSGI application). + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import os +import re +import errno +import tempfile +import platform +from hashlib import md5 +from time import time +try: + import cPickle as pickle +except ImportError: # pragma: no cover + import pickle + +from werkzeug._compat import iteritems, string_types, text_type, \ + integer_types, to_native +from werkzeug.posixemulation import rename + + +def _items(mappingorseq): + """Wrapper for efficient iteration over mappings represented by dicts + or sequences:: + + >>> for k, v in _items((i, i*i) for i in xrange(5)): + ... assert k*k == v + + >>> for k, v in _items(dict((i, i*i) for i in xrange(5))): + ... assert k*k == v + + """ + if hasattr(mappingorseq, 'items'): + return iteritems(mappingorseq) + return mappingorseq + + +class BaseCache(object): + + """Baseclass for the cache systems. All the cache systems implement this + API or a superset of it. + + :param default_timeout: the default timeout (in seconds) that is used if + no timeout is specified on :meth:`set`. A timeout + of 0 indicates that the cache never expires. + """ + + def __init__(self, default_timeout=300): + self.default_timeout = default_timeout + + def _normalize_timeout(self, timeout): + if timeout is None: + timeout = self.default_timeout + return timeout + + def get(self, key): + """Look up key in the cache and return the value for it. + + :param key: the key to be looked up. + :returns: The value if it exists and is readable, else ``None``. + """ + return None + + def delete(self, key): + """Delete `key` from the cache. + + :param key: the key to delete. + :returns: Whether the key existed and has been deleted. + :rtype: boolean + """ + return True + + def get_many(self, *keys): + """Returns a list of values for the given keys. + For each key a item in the list is created:: + + foo, bar = cache.get_many("foo", "bar") + + Has the same error handling as :meth:`get`. + + :param keys: The function accepts multiple keys as positional + arguments. + """ + return map(self.get, keys) + + def get_dict(self, *keys): + """Like :meth:`get_many` but return a dict:: + + d = cache.get_dict("foo", "bar") + foo = d["foo"] + bar = d["bar"] + + :param keys: The function accepts multiple keys as positional + arguments. + """ + return dict(zip(keys, self.get_many(*keys))) + + def set(self, key, value, timeout=None): + """Add a new key/value to the cache (overwrites value, if key already + exists in the cache). + + :param key: the key to set + :param value: the value for the key + :param timeout: the cache timeout for the key in seconds (if not + specified, it uses the default timeout). A timeout of + 0 idicates that the cache never expires. + :returns: ``True`` if key has been updated, ``False`` for backend + errors. Pickling errors, however, will raise a subclass of + ``pickle.PickleError``. + :rtype: boolean + """ + return True + + def add(self, key, value, timeout=None): + """Works like :meth:`set` but does not overwrite the values of already + existing keys. + + :param key: the key to set + :param value: the value for the key + :param timeout: the cache timeout for the key in seconds (if not + specified, it uses the default timeout). A timeout of + 0 idicates that the cache never expires. + :returns: Same as :meth:`set`, but also ``False`` for already + existing keys. + :rtype: boolean + """ + return True + + def set_many(self, mapping, timeout=None): + """Sets multiple keys and values from a mapping. + + :param mapping: a mapping with the keys/values to set. + :param timeout: the cache timeout for the key in seconds (if not + specified, it uses the default timeout). A timeout of + 0 idicates that the cache never expires. + :returns: Whether all given keys have been set. + :rtype: boolean + """ + rv = True + for key, value in _items(mapping): + if not self.set(key, value, timeout): + rv = False + return rv + + def delete_many(self, *keys): + """Deletes multiple keys at once. + + :param keys: The function accepts multiple keys as positional + arguments. + :returns: Whether all given keys have been deleted. + :rtype: boolean + """ + return all(self.delete(key) for key in keys) + + def has(self, key): + """Checks if a key exists in the cache without returning it. This is a + cheap operation that bypasses loading the actual data on the backend. + + This method is optional and may not be implemented on all caches. + + :param key: the key to check + """ + raise NotImplementedError( + '%s doesn\'t have an efficient implementation of `has`. That ' + 'means it is impossible to check whether a key exists without ' + 'fully loading the key\'s data. Consider using `self.get` ' + 'explicitly if you don\'t care about performance.' + ) + + def clear(self): + """Clears the cache. Keep in mind that not all caches support + completely clearing the cache. + + :returns: Whether the cache has been cleared. + :rtype: boolean + """ + return True + + def inc(self, key, delta=1): + """Increments the value of a key by `delta`. If the key does + not yet exist it is initialized with `delta`. + + For supporting caches this is an atomic operation. + + :param key: the key to increment. + :param delta: the delta to add. + :returns: The new value or ``None`` for backend errors. + """ + value = (self.get(key) or 0) + delta + return value if self.set(key, value) else None + + def dec(self, key, delta=1): + """Decrements the value of a key by `delta`. If the key does + not yet exist it is initialized with `-delta`. + + For supporting caches this is an atomic operation. + + :param key: the key to increment. + :param delta: the delta to subtract. + :returns: The new value or `None` for backend errors. + """ + value = (self.get(key) or 0) - delta + return value if self.set(key, value) else None + + +class NullCache(BaseCache): + + """A cache that doesn't cache. This can be useful for unit testing. + + :param default_timeout: a dummy parameter that is ignored but exists + for API compatibility with other caches. + """ + + +class SimpleCache(BaseCache): + + """Simple memory cache for single process environments. This class exists + mainly for the development server and is not 100% thread safe. It tries + to use as many atomic operations as possible and no locks for simplicity + but it could happen under heavy load that keys are added multiple times. + + :param threshold: the maximum number of items the cache stores before + it starts deleting some. + :param default_timeout: the default timeout that is used if no timeout is + specified on :meth:`~BaseCache.set`. A timeout of + 0 indicates that the cache never expires. + """ + + def __init__(self, threshold=500, default_timeout=300): + BaseCache.__init__(self, default_timeout) + self._cache = {} + self.clear = self._cache.clear + self._threshold = threshold + + def _prune(self): + if len(self._cache) > self._threshold: + now = time() + toremove = [] + for idx, (key, (expires, _)) in enumerate(self._cache.items()): + if (expires != 0 and expires <= now) or idx % 3 == 0: + toremove.append(key) + for key in toremove: + self._cache.pop(key, None) + + def _normalize_timeout(self, timeout): + timeout = BaseCache._normalize_timeout(self, timeout) + if timeout > 0: + timeout = time() + timeout + return timeout + + def get(self, key): + try: + expires, value = self._cache[key] + if expires == 0 or expires > time(): + return pickle.loads(value) + except (KeyError, pickle.PickleError): + return None + + def set(self, key, value, timeout=None): + expires = self._normalize_timeout(timeout) + self._prune() + self._cache[key] = (expires, pickle.dumps(value, + pickle.HIGHEST_PROTOCOL)) + return True + + def add(self, key, value, timeout=None): + expires = self._normalize_timeout(timeout) + self._prune() + item = (expires, pickle.dumps(value, + pickle.HIGHEST_PROTOCOL)) + if key in self._cache: + return False + self._cache.setdefault(key, item) + return True + + def delete(self, key): + return self._cache.pop(key, None) is not None + + def has(self, key): + try: + expires, value = self._cache[key] + return expires == 0 or expires > time() + except KeyError: + return False + +_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match + + +class MemcachedCache(BaseCache): + + """A cache that uses memcached as backend. + + The first argument can either be an object that resembles the API of a + :class:`memcache.Client` or a tuple/list of server addresses. In the + event that a tuple/list is passed, Werkzeug tries to import the best + available memcache library. + + This cache looks into the following packages/modules to find bindings for + memcached: + + - ``pylibmc`` + - ``google.appengine.api.memcached`` + - ``memcached`` + + Implementation notes: This cache backend works around some limitations in + memcached to simplify the interface. For example unicode keys are encoded + to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return + the keys in the same format as passed. Furthermore all get methods + silently ignore key errors to not cause problems when untrusted user data + is passed to the get methods which is often the case in web applications. + + :param servers: a list or tuple of server addresses or alternatively + a :class:`memcache.Client` or a compatible client. + :param default_timeout: the default timeout that is used if no timeout is + specified on :meth:`~BaseCache.set`. A timeout of + 0 indicates taht the cache never expires. + :param key_prefix: a prefix that is added before all keys. This makes it + possible to use the same memcached server for different + applications. Keep in mind that + :meth:`~BaseCache.clear` will also clear keys with a + different prefix. + """ + + def __init__(self, servers=None, default_timeout=300, key_prefix=None): + BaseCache.__init__(self, default_timeout) + if servers is None or isinstance(servers, (list, tuple)): + if servers is None: + servers = ['127.0.0.1:11211'] + self._client = self.import_preferred_memcache_lib(servers) + if self._client is None: + raise RuntimeError('no memcache module found') + else: + # NOTE: servers is actually an already initialized memcache + # client. + self._client = servers + + self.key_prefix = to_native(key_prefix) + + def _normalize_key(self, key): + key = to_native(key, 'utf-8') + if self.key_prefix: + key = self.key_prefix + key + return key + + def _normalize_timeout(self, timeout): + timeout = BaseCache._normalize_timeout(self, timeout) + if timeout > 0: + timeout = int(time()) + timeout + return timeout + + def get(self, key): + key = self._normalize_key(key) + # memcached doesn't support keys longer than that. Because often + # checks for so long keys can occur because it's tested from user + # submitted data etc we fail silently for getting. + if _test_memcached_key(key): + return self._client.get(key) + + def get_dict(self, *keys): + key_mapping = {} + have_encoded_keys = False + for key in keys: + encoded_key = self._normalize_key(key) + if not isinstance(key, str): + have_encoded_keys = True + if _test_memcached_key(key): + key_mapping[encoded_key] = key + d = rv = self._client.get_multi(key_mapping.keys()) + if have_encoded_keys or self.key_prefix: + rv = {} + for key, value in iteritems(d): + rv[key_mapping[key]] = value + if len(rv) < len(keys): + for key in keys: + if key not in rv: + rv[key] = None + return rv + + def add(self, key, value, timeout=None): + key = self._normalize_key(key) + timeout = self._normalize_timeout(timeout) + return self._client.add(key, value, timeout) + + def set(self, key, value, timeout=None): + key = self._normalize_key(key) + timeout = self._normalize_timeout(timeout) + return self._client.set(key, value, timeout) + + def get_many(self, *keys): + d = self.get_dict(*keys) + return [d[key] for key in keys] + + def set_many(self, mapping, timeout=None): + new_mapping = {} + for key, value in _items(mapping): + key = self._normalize_key(key) + new_mapping[key] = value + + timeout = self._normalize_timeout(timeout) + failed_keys = self._client.set_multi(new_mapping, timeout) + return not failed_keys + + def delete(self, key): + key = self._normalize_key(key) + if _test_memcached_key(key): + return self._client.delete(key) + + def delete_many(self, *keys): + new_keys = [] + for key in keys: + key = self._normalize_key(key) + if _test_memcached_key(key): + new_keys.append(key) + return self._client.delete_multi(new_keys) + + def has(self, key): + key = self._normalize_key(key) + if _test_memcached_key(key): + return self._client.append(key, '') + return False + + def clear(self): + return self._client.flush_all() + + def inc(self, key, delta=1): + key = self._normalize_key(key) + return self._client.incr(key, delta) + + def dec(self, key, delta=1): + key = self._normalize_key(key) + return self._client.decr(key, delta) + + def import_preferred_memcache_lib(self, servers): + """Returns an initialized memcache client. Used by the constructor.""" + try: + import pylibmc + except ImportError: + pass + else: + return pylibmc.Client(servers) + + try: + from google.appengine.api import memcache + except ImportError: + pass + else: + return memcache.Client() + + try: + import memcache + except ImportError: + pass + else: + return memcache.Client(servers) + + +# backwards compatibility +GAEMemcachedCache = MemcachedCache + + +class RedisCache(BaseCache): + + """Uses the Redis key-value store as a cache backend. + + The first argument can be either a string denoting address of the Redis + server or an object resembling an instance of a redis.Redis class. + + Note: Python Redis API already takes care of encoding unicode strings on + the fly. + + .. versionadded:: 0.7 + + .. versionadded:: 0.8 + `key_prefix` was added. + + .. versionchanged:: 0.8 + This cache backend now properly serializes objects. + + .. versionchanged:: 0.8.3 + This cache backend now supports password authentication. + + .. versionchanged:: 0.10 + ``**kwargs`` is now passed to the redis object. + + :param host: address of the Redis server or an object which API is + compatible with the official Python Redis client (redis-py). + :param port: port number on which Redis server listens for connections. + :param password: password authentication for the Redis server. + :param db: db (zero-based numeric index) on Redis Server to connect. + :param default_timeout: the default timeout that is used if no timeout is + specified on :meth:`~BaseCache.set`. A timeout of + 0 indicates that the cache never expires. + :param key_prefix: A prefix that should be added to all keys. + + Any additional keyword arguments will be passed to ``redis.Redis``. + """ + + def __init__(self, host='localhost', port=6379, password=None, + db=0, default_timeout=300, key_prefix=None, **kwargs): + BaseCache.__init__(self, default_timeout) + if isinstance(host, string_types): + try: + import redis + except ImportError: + raise RuntimeError('no redis module found') + if kwargs.get('decode_responses', None): + raise ValueError('decode_responses is not supported by ' + 'RedisCache.') + self._client = redis.Redis(host=host, port=port, password=password, + db=db, **kwargs) + else: + self._client = host + self.key_prefix = key_prefix or '' + + def _normalize_timeout(self, timeout): + timeout = BaseCache._normalize_timeout(self, timeout) + if timeout == 0: + timeout = -1 + return timeout + + def dump_object(self, value): + """Dumps an object into a string for redis. By default it serializes + integers as regular string and pickle dumps everything else. + """ + t = type(value) + if t in integer_types: + return str(value).encode('ascii') + return b'!' + pickle.dumps(value) + + def load_object(self, value): + """The reversal of :meth:`dump_object`. This might be called with + None. + """ + if value is None: + return None + if value.startswith(b'!'): + try: + return pickle.loads(value[1:]) + except pickle.PickleError: + return None + try: + return int(value) + except ValueError: + # before 0.8 we did not have serialization. Still support that. + return value + + def get(self, key): + return self.load_object(self._client.get(self.key_prefix + key)) + + def get_many(self, *keys): + if self.key_prefix: + keys = [self.key_prefix + key for key in keys] + return [self.load_object(x) for x in self._client.mget(keys)] + + def set(self, key, value, timeout=None): + timeout = self._normalize_timeout(timeout) + dump = self.dump_object(value) + if timeout == -1: + result = self._client.set(name=self.key_prefix + key, + value=dump) + else: + result = self._client.setex(name=self.key_prefix + key, + value=dump, time=timeout) + return result + + def add(self, key, value, timeout=None): + timeout = self._normalize_timeout(timeout) + dump = self.dump_object(value) + return ( + self._client.setnx(name=self.key_prefix + key, value=dump) and + self._client.expire(name=self.key_prefix + key, time=timeout) + ) + + def set_many(self, mapping, timeout=None): + timeout = self._normalize_timeout(timeout) + # Use transaction=False to batch without calling redis MULTI + # which is not supported by twemproxy + pipe = self._client.pipeline(transaction=False) + + for key, value in _items(mapping): + dump = self.dump_object(value) + if timeout == -1: + pipe.set(name=self.key_prefix + key, value=dump) + else: + pipe.setex(name=self.key_prefix + key, value=dump, + time=timeout) + return pipe.execute() + + def delete(self, key): + return self._client.delete(self.key_prefix + key) + + def delete_many(self, *keys): + if not keys: + return + if self.key_prefix: + keys = [self.key_prefix + key for key in keys] + return self._client.delete(*keys) + + def has(self, key): + return self._client.exists(self.key_prefix + key) + + def clear(self): + status = False + if self.key_prefix: + keys = self._client.keys(self.key_prefix + '*') + if keys: + status = self._client.delete(*keys) + else: + status = self._client.flushdb() + return status + + def inc(self, key, delta=1): + return self._client.incr(name=self.key_prefix + key, amount=delta) + + def dec(self, key, delta=1): + return self._client.decr(name=self.key_prefix + key, amount=delta) + + +class FileSystemCache(BaseCache): + + """A cache that stores the items on the file system. This cache depends + on being the only user of the `cache_dir`. Make absolutely sure that + nobody but this cache stores files there or otherwise the cache will + randomly delete files therein. + + :param cache_dir: the directory where cache files are stored. + :param threshold: the maximum number of items the cache stores before + it starts deleting some. + :param default_timeout: the default timeout that is used if no timeout is + specified on :meth:`~BaseCache.set`. A timeout of + 0 indicates that the cache never expires. + :param mode: the file mode wanted for the cache files, default 0600 + """ + + #: used for temporary files by the FileSystemCache + _fs_transaction_suffix = '.__wz_cache' + + def __init__(self, cache_dir, threshold=500, default_timeout=300, + mode=0o600): + BaseCache.__init__(self, default_timeout) + self._path = cache_dir + self._threshold = threshold + self._mode = mode + + try: + os.makedirs(self._path) + except OSError as ex: + if ex.errno != errno.EEXIST: + raise + + def _normalize_timeout(self, timeout): + timeout = BaseCache._normalize_timeout(self, timeout) + if timeout != 0: + timeout = time() + timeout + return int(timeout) + + def _list_dir(self): + """return a list of (fully qualified) cache filenames + """ + return [os.path.join(self._path, fn) for fn in os.listdir(self._path) + if not fn.endswith(self._fs_transaction_suffix)] + + def _prune(self): + entries = self._list_dir() + if len(entries) > self._threshold: + now = time() + for idx, fname in enumerate(entries): + try: + remove = False + with open(fname, 'rb') as f: + expires = pickle.load(f) + remove = (expires != 0 and expires <= now) or idx % 3 == 0 + + if remove: + os.remove(fname) + except (IOError, OSError): + pass + + def clear(self): + for fname in self._list_dir(): + try: + os.remove(fname) + except (IOError, OSError): + return False + return True + + def _get_filename(self, key): + if isinstance(key, text_type): + key = key.encode('utf-8') # XXX unicode review + hash = md5(key).hexdigest() + return os.path.join(self._path, hash) + + def get(self, key): + filename = self._get_filename(key) + try: + with open(filename, 'rb') as f: + pickle_time = pickle.load(f) + if pickle_time == 0 or pickle_time >= time(): + return pickle.load(f) + else: + os.remove(filename) + return None + except (IOError, OSError, pickle.PickleError): + return None + + def add(self, key, value, timeout=None): + filename = self._get_filename(key) + if not os.path.exists(filename): + return self.set(key, value, timeout) + return False + + def set(self, key, value, timeout=None): + timeout = self._normalize_timeout(timeout) + filename = self._get_filename(key) + self._prune() + try: + fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix, + dir=self._path) + with os.fdopen(fd, 'wb') as f: + pickle.dump(timeout, f, 1) + pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) + rename(tmp, filename) + os.chmod(filename, self._mode) + except (IOError, OSError): + return False + else: + return True + + def delete(self, key): + try: + os.remove(self._get_filename(key)) + except (IOError, OSError): + return False + else: + return True + + def has(self, key): + filename = self._get_filename(key) + try: + with open(filename, 'rb') as f: + pickle_time = pickle.load(f) + if pickle_time == 0 or pickle_time >= time(): + return True + else: + os.remove(filename) + return False + except (IOError, OSError, pickle.PickleError): + return False + + +class UWSGICache(BaseCache): + """ Implements the cache using uWSGI's caching framework. + + .. note:: + This class cannot be used when running under PyPy, because the uWSGI + API implementation for PyPy is lacking the needed functionality. + + :param default_timeout: The default timeout in seconds. + :param cache: The name of the caching instance to connect to, for + example: mycache@localhost:3031, defaults to an empty string, which + means uWSGI will cache in the local instance. If the cache is in the + same instance as the werkzeug app, you only have to provide the name of + the cache. + """ + def __init__(self, default_timeout=300, cache=''): + BaseCache.__init__(self, default_timeout) + + if platform.python_implementation() == 'PyPy': + raise RuntimeError("uWSGI caching does not work under PyPy, see " + "the docs for more details.") + + try: + import uwsgi + self._uwsgi = uwsgi + except ImportError: + raise RuntimeError("uWSGI could not be imported, are you " + "running under uWSGI?") + + self.cache = cache + + def get(self, key): + rv = self._uwsgi.cache_get(key, self.cache) + if rv is None: + return + return pickle.loads(rv) + + def delete(self, key): + return self._uwsgi.cache_del(key, self.cache) + + def set(self, key, value, timeout=None): + return self._uwsgi.cache_update(key, pickle.dumps(value), + self._normalize_timeout(timeout), + self.cache) + + def add(self, key, value, timeout=None): + return self._uwsgi.cache_set(key, pickle.dumps(value), + self._normalize_timeout(timeout), + self.cache) + + def clear(self): + return self._uwsgi.cache_clear(self.cache) + + def has(self, key): + return self._uwsgi.cache_exists(key, self.cache) is not None diff --git a/app/lib/werkzeug/contrib/fixers.py b/app/lib/werkzeug/contrib/fixers.py new file mode 100644 index 0000000..b88a861 --- /dev/null +++ b/app/lib/werkzeug/contrib/fixers.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.contrib.fixers + ~~~~~~~~~~~~~~~~~~~~~~~ + + .. versionadded:: 0.5 + + This module includes various helpers that fix bugs in web servers. They may + be necessary for some versions of a buggy web server but not others. We try + to stay updated with the status of the bugs as good as possible but you have + to make sure whether they fix the problem you encounter. + + If you notice bugs in webservers not fixed in this module consider + contributing a patch. + + :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +from werkzeug.http import parse_options_header, parse_cache_control_header, \ + parse_set_header +from werkzeug.useragents import UserAgent +from werkzeug.datastructures import Headers, ResponseCacheControl + + +class CGIRootFix(object): + + """Wrap the application in this middleware if you are using FastCGI or CGI + and you have problems with your app root being set to the cgi script's path + instead of the path users are going to visit + + .. versionchanged:: 0.9 + Added `app_root` parameter and renamed from `LighttpdCGIRootFix`. + + :param app: the WSGI application + :param app_root: Defaulting to ``'/'``, you can set this to something else + if your app is mounted somewhere else. + """ + + def __init__(self, app, app_root='/'): + self.app = app + self.app_root = app_root + + def __call__(self, environ, start_response): + # only set PATH_INFO for older versions of Lighty or if no + # server software is provided. That's because the test was + # added in newer Werkzeug versions and we don't want to break + # people's code if they are using this fixer in a test that + # does not set the SERVER_SOFTWARE key. + if 'SERVER_SOFTWARE' not in environ or \ + environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28': + environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \ + environ.get('PATH_INFO', '') + environ['SCRIPT_NAME'] = self.app_root.strip('/') + return self.app(environ, start_response) + +# backwards compatibility +LighttpdCGIRootFix = CGIRootFix + + +class PathInfoFromRequestUriFix(object): + + """On windows environment variables are limited to the system charset + which makes it impossible to store the `PATH_INFO` variable in the + environment without loss of information on some systems. + + This is for example a problem for CGI scripts on a Windows Apache. + + This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`, + `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the + fix can only be applied if the webserver supports either of these + variables. + + :param app: the WSGI application + """ + + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL': + if key not in environ: + continue + request_uri = unquote(environ[key]) + script_name = unquote(environ.get('SCRIPT_NAME', '')) + if request_uri.startswith(script_name): + environ['PATH_INFO'] = request_uri[len(script_name):] \ + .split('?', 1)[0] + break + return self.app(environ, start_response) + + +class ProxyFix(object): + + """This middleware can be applied to add HTTP proxy support to an + application that was not designed with HTTP proxies in mind. It + sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. While + Werkzeug-based applications already can use + :py:func:`werkzeug.wsgi.get_host` to retrieve the current host even if + behind proxy setups, this middleware can be used for applications which + access the WSGI environment directly. + + If you have more than one proxy server in front of your app, set + `num_proxies` accordingly. + + Do not use this middleware in non-proxy setups for security reasons. + + The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in + the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and + `werkzeug.proxy_fix.orig_http_host`. + + :param app: the WSGI application + :param num_proxies: the number of proxy servers in front of the app. + """ + + def __init__(self, app, num_proxies=1): + self.app = app + self.num_proxies = num_proxies + + def get_remote_addr(self, forwarded_for): + """Selects the new remote addr from the given list of ips in + X-Forwarded-For. By default it picks the one that the `num_proxies` + proxy server provides. Before 0.9 it would always pick the first. + + .. versionadded:: 0.8 + """ + if len(forwarded_for) >= self.num_proxies: + return forwarded_for[-self.num_proxies] + + def __call__(self, environ, start_response): + getter = environ.get + forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '') + forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',') + forwarded_host = getter('HTTP_X_FORWARDED_HOST', '') + environ.update({ + 'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'), + 'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'), + 'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST') + }) + forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x] + remote_addr = self.get_remote_addr(forwarded_for) + if remote_addr is not None: + environ['REMOTE_ADDR'] = remote_addr + if forwarded_host: + environ['HTTP_HOST'] = forwarded_host + if forwarded_proto: + environ['wsgi.url_scheme'] = forwarded_proto + return self.app(environ, start_response) + + +class HeaderRewriterFix(object): + + """This middleware can remove response headers and add others. This + is for example useful to remove the `Date` header from responses if you + are using a server that adds that header, no matter if it's present or + not or to add `X-Powered-By` headers:: + + app = HeaderRewriterFix(app, remove_headers=['Date'], + add_headers=[('X-Powered-By', 'WSGI')]) + + :param app: the WSGI application + :param remove_headers: a sequence of header keys that should be + removed. + :param add_headers: a sequence of ``(key, value)`` tuples that should + be added. + """ + + def __init__(self, app, remove_headers=None, add_headers=None): + self.app = app + self.remove_headers = set(x.lower() for x in (remove_headers or ())) + self.add_headers = list(add_headers or ()) + + def __call__(self, environ, start_response): + def rewriting_start_response(status, headers, exc_info=None): + new_headers = [] + for key, value in headers: + if key.lower() not in self.remove_headers: + new_headers.append((key, value)) + new_headers += self.add_headers + return start_response(status, new_headers, exc_info) + return self.app(environ, rewriting_start_response) + + +class InternetExplorerFix(object): + + """This middleware fixes a couple of bugs with Microsoft Internet + Explorer. Currently the following fixes are applied: + + - removing of `Vary` headers for unsupported mimetypes which + causes troubles with caching. Can be disabled by passing + ``fix_vary=False`` to the constructor. + see: http://support.microsoft.com/kb/824847/en-us + + - removes offending headers to work around caching bugs in + Internet Explorer if `Content-Disposition` is set. Can be + disabled by passing ``fix_attach=False`` to the constructor. + + If it does not detect affected Internet Explorer versions it won't touch + the request / response. + """ + + # This code was inspired by Django fixers for the same bugs. The + # fix_vary and fix_attach fixers were originally implemented in Django + # by Michael Axiak and is available as part of the Django project: + # http://code.djangoproject.com/ticket/4148 + + def __init__(self, app, fix_vary=True, fix_attach=True): + self.app = app + self.fix_vary = fix_vary + self.fix_attach = fix_attach + + def fix_headers(self, environ, headers, status=None): + if self.fix_vary: + header = headers.get('content-type', '') + mimetype, options = parse_options_header(header) + if mimetype not in ('text/html', 'text/plain', 'text/sgml'): + headers.pop('vary', None) + + if self.fix_attach and 'content-disposition' in headers: + pragma = parse_set_header(headers.get('pragma', '')) + pragma.discard('no-cache') + header = pragma.to_header() + if not header: + headers.pop('pragma', '') + else: + headers['Pragma'] = header + header = headers.get('cache-control', '') + if header: + cc = parse_cache_control_header(header, + cls=ResponseCacheControl) + cc.no_cache = None + cc.no_store = False + header = cc.to_header() + if not header: + headers.pop('cache-control', '') + else: + headers['Cache-Control'] = header + + def run_fixed(self, environ, start_response): + def fixing_start_response(status, headers, exc_info=None): + headers = Headers(headers) + self.fix_headers(environ, headers, status) + return start_response(status, headers.to_wsgi_list(), exc_info) + return self.app(environ, fixing_start_response) + + def __call__(self, environ, start_response): + ua = UserAgent(environ) + if ua.browser != 'msie': + return self.app(environ, start_response) + return self.run_fixed(environ, start_response) diff --git a/app/lib/werkzeug/contrib/iterio.py b/app/lib/werkzeug/contrib/iterio.py new file mode 100644 index 0000000..c0ced37 --- /dev/null +++ b/app/lib/werkzeug/contrib/iterio.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +r""" + werkzeug.contrib.iterio + ~~~~~~~~~~~~~~~~~~~~~~~ + + This module implements a :class:`IterIO` that converts an iterator into + a stream object and the other way round. Converting streams into + iterators requires the `greenlet`_ module. + + To convert an iterator into a stream all you have to do is to pass it + directly to the :class:`IterIO` constructor. In this example we pass it + a newly created generator:: + + def foo(): + yield "something\n" + yield "otherthings" + stream = IterIO(foo()) + print stream.read() # read the whole iterator + + The other way round works a bit different because we have to ensure that + the code execution doesn't take place yet. An :class:`IterIO` call with a + callable as first argument does two things. The function itself is passed + an :class:`IterIO` stream it can feed. The object returned by the + :class:`IterIO` constructor on the other hand is not an stream object but + an iterator:: + + def foo(stream): + stream.write("some") + stream.write("thing") + stream.flush() + stream.write("otherthing") + iterator = IterIO(foo) + print iterator.next() # prints something + print iterator.next() # prints otherthing + iterator.next() # raises StopIteration + + .. _greenlet: https://github.com/python-greenlet/greenlet + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +try: + import greenlet +except ImportError: + greenlet = None + +from werkzeug._compat import implements_iterator + + +def _mixed_join(iterable, sentinel): + """concatenate any string type in an intelligent way.""" + iterator = iter(iterable) + first_item = next(iterator, sentinel) + if isinstance(first_item, bytes): + return first_item + b''.join(iterator) + return first_item + u''.join(iterator) + + +def _newline(reference_string): + if isinstance(reference_string, bytes): + return b'\n' + return u'\n' + + +@implements_iterator +class IterIO(object): + + """Instances of this object implement an interface compatible with the + standard Python :class:`file` object. Streams are either read-only or + write-only depending on how the object is created. + + If the first argument is an iterable a file like object is returned that + returns the contents of the iterable. In case the iterable is empty + read operations will return the sentinel value. + + If the first argument is a callable then the stream object will be + created and passed to that function. The caller itself however will + not receive a stream but an iterable. The function will be be executed + step by step as something iterates over the returned iterable. Each + call to :meth:`flush` will create an item for the iterable. If + :meth:`flush` is called without any writes in-between the sentinel + value will be yielded. + + Note for Python 3: due to the incompatible interface of bytes and + streams you should set the sentinel value explicitly to an empty + bytestring (``b''``) if you are expecting to deal with bytes as + otherwise the end of the stream is marked with the wrong sentinel + value. + + .. versionadded:: 0.9 + `sentinel` parameter was added. + """ + + def __new__(cls, obj, sentinel=''): + try: + iterator = iter(obj) + except TypeError: + return IterI(obj, sentinel) + return IterO(iterator, sentinel) + + def __iter__(self): + return self + + def tell(self): + if self.closed: + raise ValueError('I/O operation on closed file') + return self.pos + + def isatty(self): + if self.closed: + raise ValueError('I/O operation on closed file') + return False + + def seek(self, pos, mode=0): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def truncate(self, size=None): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def write(self, s): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def writelines(self, list): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def read(self, n=-1): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def readlines(self, sizehint=0): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def readline(self, length=None): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def flush(self): + if self.closed: + raise ValueError('I/O operation on closed file') + raise IOError(9, 'Bad file descriptor') + + def __next__(self): + if self.closed: + raise StopIteration() + line = self.readline() + if not line: + raise StopIteration() + return line + + +class IterI(IterIO): + + """Convert an stream into an iterator.""" + + def __new__(cls, func, sentinel=''): + if greenlet is None: + raise RuntimeError('IterI requires greenlet support') + stream = object.__new__(cls) + stream._parent = greenlet.getcurrent() + stream._buffer = [] + stream.closed = False + stream.sentinel = sentinel + stream.pos = 0 + + def run(): + func(stream) + stream.close() + + g = greenlet.greenlet(run, stream._parent) + while 1: + rv = g.switch() + if not rv: + return + yield rv[0] + + def close(self): + if not self.closed: + self.closed = True + self._flush_impl() + + def write(self, s): + if self.closed: + raise ValueError('I/O operation on closed file') + if s: + self.pos += len(s) + self._buffer.append(s) + + def writelines(self, list): + for item in list: + self.write(item) + + def flush(self): + if self.closed: + raise ValueError('I/O operation on closed file') + self._flush_impl() + + def _flush_impl(self): + data = _mixed_join(self._buffer, self.sentinel) + self._buffer = [] + if not data and self.closed: + self._parent.switch() + else: + self._parent.switch((data,)) + + +class IterO(IterIO): + + """Iter output. Wrap an iterator and give it a stream like interface.""" + + def __new__(cls, gen, sentinel=''): + self = object.__new__(cls) + self._gen = gen + self._buf = None + self.sentinel = sentinel + self.closed = False + self.pos = 0 + return self + + def __iter__(self): + return self + + def _buf_append(self, string): + '''Replace string directly without appending to an empty string, + avoiding type issues.''' + if not self._buf: + self._buf = string + else: + self._buf += string + + def close(self): + if not self.closed: + self.closed = True + if hasattr(self._gen, 'close'): + self._gen.close() + + def seek(self, pos, mode=0): + if self.closed: + raise ValueError('I/O operation on closed file') + if mode == 1: + pos += self.pos + elif mode == 2: + self.read() + self.pos = min(self.pos, self.pos + pos) + return + elif mode != 0: + raise IOError('Invalid argument') + buf = [] + try: + tmp_end_pos = len(self._buf) + while pos > tmp_end_pos: + item = next(self._gen) + tmp_end_pos += len(item) + buf.append(item) + except StopIteration: + pass + if buf: + self._buf_append(_mixed_join(buf, self.sentinel)) + self.pos = max(0, pos) + + def read(self, n=-1): + if self.closed: + raise ValueError('I/O operation on closed file') + if n < 0: + self._buf_append(_mixed_join(self._gen, self.sentinel)) + result = self._buf[self.pos:] + self.pos += len(result) + return result + new_pos = self.pos + n + buf = [] + try: + tmp_end_pos = 0 if self._buf is None else len(self._buf) + while new_pos > tmp_end_pos or (self._buf is None and not buf): + item = next(self._gen) + tmp_end_pos += len(item) + buf.append(item) + except StopIteration: + pass + if buf: + self._buf_append(_mixed_join(buf, self.sentinel)) + + if self._buf is None: + return self.sentinel + + new_pos = max(0, new_pos) + try: + return self._buf[self.pos:new_pos] + finally: + self.pos = min(new_pos, len(self._buf)) + + def readline(self, length=None): + if self.closed: + raise ValueError('I/O operation on closed file') + + nl_pos = -1 + if self._buf: + nl_pos = self._buf.find(_newline(self._buf), self.pos) + buf = [] + try: + if self._buf is None: + pos = self.pos + else: + pos = len(self._buf) + while nl_pos < 0: + item = next(self._gen) + local_pos = item.find(_newline(item)) + buf.append(item) + if local_pos >= 0: + nl_pos = pos + local_pos + break + pos += len(item) + except StopIteration: + pass + if buf: + self._buf_append(_mixed_join(buf, self.sentinel)) + + if self._buf is None: + return self.sentinel + + if nl_pos < 0: + new_pos = len(self._buf) + else: + new_pos = nl_pos + 1 + if length is not None and self.pos + length < new_pos: + new_pos = self.pos + length + try: + return self._buf[self.pos:new_pos] + finally: + self.pos = min(new_pos, len(self._buf)) + + def readlines(self, sizehint=0): + total = 0 + lines = [] + line = self.readline() + while line: + lines.append(line) + total += len(line) + if 0 < sizehint <= total: + break + line = self.readline() + return lines diff --git a/src/lib/werkzeug/contrib/jsrouting.py b/app/lib/werkzeug/contrib/jsrouting.py similarity index 94% rename from src/lib/werkzeug/contrib/jsrouting.py rename to app/lib/werkzeug/contrib/jsrouting.py index 9b7d0c0..d815892 100644 --- a/src/lib/werkzeug/contrib/jsrouting.py +++ b/app/lib/werkzeug/contrib/jsrouting.py @@ -6,7 +6,7 @@ Addon module that allows to create a JavaScript function from a map that generates rules. - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: @@ -20,18 +20,19 @@ def dumps(*args): from inspect import getmro from werkzeug.routing import NumberConverter +from werkzeug._compat import iteritems def render_template(name_parts, rules, converters): result = u'' if name_parts: - for idx in xrange(0, len(name_parts) - 1): + for idx in range(0, len(name_parts) - 1): name = u'.'.join(name_parts[:idx + 1]) result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name) result += '%s = ' % '.'.join(name_parts) result += """(function (server_name, script_name, subdomain, url_scheme) { - var converters = %(converters)s; - var rules = $rules; + var converters = [%(converters)s]; + var rules = %(rules)s; function in_array(array, value) { if (array.indexOf != undefined) { return array.indexOf(value) != -1; @@ -162,7 +163,9 @@ def render_template(name_parts, rules, converters): + '/' + lstrip(rv.path, '/'); } }; -})""" % {'converters': u', '.join(converters)} +})""" % {'converters': u', '.join(converters), + 'rules': rules} + return result @@ -181,6 +184,8 @@ def generate_map(map, name='url_map'): defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ + from warnings import warn + warn(DeprecationWarning('This module is deprecated')) map.update() rules = [] converters = [] @@ -190,7 +195,7 @@ def generate_map(map, name='url_map'): 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} - for key, converter in rule._converters.iteritems(): + for key, converter in iteritems(rule._converters): js_func = js_to_url_function(converter) try: index = converters.index(js_func) diff --git a/src/lib/werkzeug/contrib/limiter.py b/app/lib/werkzeug/contrib/limiter.py similarity index 85% rename from src/lib/werkzeug/contrib/limiter.py rename to app/lib/werkzeug/contrib/limiter.py index d0dbcc5..6bc9d39 100644 --- a/src/lib/werkzeug/contrib/limiter.py +++ b/app/lib/werkzeug/contrib/limiter.py @@ -9,7 +9,7 @@ .. _Trac: http://trac.edgewall.org/ .. _Django: http://www.djangoproject.com/ - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from warnings import warn @@ -18,15 +18,20 @@ class StreamLimitMiddleware(object): + """Limits the input stream to a given number of bytes. This is useful if you have a WSGI application that reads form data into memory (django for example) and you don't want users to harm the server by uploading tons of data. Default is 10MB + + .. versionchanged:: 0.9 + Deprecated middleware. """ def __init__(self, app, maximum_size=1024 * 1024 * 10): + warn(DeprecationWarning('This middleware is deprecated')) self.app = app self.maximum_size = maximum_size diff --git a/src/lib/werkzeug/contrib/lint.py b/app/lib/werkzeug/contrib/lint.py similarity index 94% rename from src/lib/werkzeug/contrib/lint.py rename to app/lib/werkzeug/contrib/lint.py index c7adff9..7ec80ce 100644 --- a/src/lib/werkzeug/contrib/lint.py +++ b/app/lib/werkzeug/contrib/lint.py @@ -16,29 +16,36 @@ It's strongly recommended to use it during development. - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ -from urlparse import urlparse +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + from warnings import warn from werkzeug.datastructures import Headers from werkzeug.http import is_entity_header from werkzeug.wsgi import FileWrapper +from werkzeug._compat import string_types class WSGIWarning(Warning): + """Warning class for WSGI warnings.""" class HTTPWarning(Warning): + """Warning class for HTTP warnings.""" def check_string(context, obj, stacklevel=3): if type(obj) is not str: warn(WSGIWarning('%s requires bytestrings, got %s' % - (context, obj.__class__.__name__))) + (context, obj.__class__.__name__))) class InputStream(object): @@ -159,7 +166,7 @@ def close(self): if key not in ('expires', 'content-location') and \ is_entity_header(key): warn(HTTPWarning('entity header %r found in 304 ' - 'response' % key)) + 'response' % key)) if bytes_sent: warn(HTTPWarning('304 responses must not have a body')) elif 100 <= status_code < 200 or status_code == 204: @@ -183,6 +190,7 @@ def __del__(self): class LintMiddleware(object): + """This middleware wraps an application and warns on common errors. Among other thing it currently checks for the following problems: @@ -219,7 +227,7 @@ def check_environ(self, environ): 'wsgi.run_once'): if key not in environ: warn(WSGIWarning('required environment key %r not found' - % key), stacklevel=3) + % key), stacklevel=3) if environ['wsgi.version'] != (1, 0): warn(WSGIWarning('environ is not a WSGI 1.0 environ'), stacklevel=3) @@ -233,7 +241,6 @@ def check_environ(self, environ): warn(WSGIWarning('PATH_INFO does not start with a slash: %r' % path_info), stacklevel=3) - def check_start_response(self, status, headers, exc_info): check_string('status', status) status_code = status.split(None, 1)[0] @@ -260,7 +267,7 @@ def check_start_response(self, status, headers, exc_info): if name.lower() == 'status': warn(WSGIWarning('The status header is not supported due to ' 'conflicts with the CGI spec.'), - stacklevel=3) + stacklevel=3) if exc_info is not None and not isinstance(exc_info, tuple): warn(WSGIWarning('invalid value for exc_info'), stacklevel=3) @@ -273,7 +280,10 @@ def check_start_response(self, status, headers, exc_info): def check_headers(self, headers): etag = headers.get('etag') if etag is not None: - if etag.startswith('w/'): + if etag.startswith(('W/', 'w/')): + if etag.startswith('w/'): + warn(HTTPWarning('weak etag indicator should be upcase.'), + stacklevel=4) etag = etag[2:] if not (etag[:1] == etag[-1:] == '"'): warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4) @@ -285,7 +295,7 @@ def check_headers(self, headers): stacklevel=4) def check_iterator(self, app_iter): - if isinstance(app_iter, basestring): + if isinstance(app_iter, string_types): warn(WSGIWarning('application returned string. Response will ' 'send character for character to the client ' 'which will kill the performance. Return a ' @@ -313,7 +323,7 @@ def __call__(self, *args, **kwargs): def checking_start_response(*args, **kwargs): if len(args) not in (2, 3): warn(WSGIWarning('Invalid number of arguments: %s, expected ' - '2 or 3' % len(args), stacklevel=2)) + '2 or 3' % len(args), stacklevel=2)) if kwargs: warn(WSGIWarning('no keyword arguments allowed.')) diff --git a/app/lib/werkzeug/contrib/profiler.py b/app/lib/werkzeug/contrib/profiler.py new file mode 100644 index 0000000..be860af --- /dev/null +++ b/app/lib/werkzeug/contrib/profiler.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.contrib.profiler + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + This module provides a simple WSGI profiler middleware for finding + bottlenecks in web application. It uses the :mod:`profile` or + :mod:`cProfile` module to do the profiling and writes the stats to the + stream provided (defaults to stderr). + + Example usage:: + + from werkzeug.contrib.profiler import ProfilerMiddleware + app = ProfilerMiddleware(app) + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import sys +import time +import os.path +try: + try: + from cProfile import Profile + except ImportError: + from profile import Profile + from pstats import Stats + available = True +except ImportError: + available = False + + +class MergeStream(object): + + """An object that redirects `write` calls to multiple streams. + Use this to log to both `sys.stdout` and a file:: + + f = open('profiler.log', 'w') + stream = MergeStream(sys.stdout, f) + profiler = ProfilerMiddleware(app, stream) + """ + + def __init__(self, *streams): + if not streams: + raise TypeError('at least one stream must be given') + self.streams = streams + + def write(self, data): + for stream in self.streams: + stream.write(data) + + +class ProfilerMiddleware(object): + + """Simple profiler middleware. Wraps a WSGI application and profiles + a request. This intentionally buffers the response so that timings are + more exact. + + By giving the `profile_dir` argument, pstat.Stats files are saved to that + directory, one file per request. Without it, a summary is printed to + `stream` instead. + + For the exact meaning of `sort_by` and `restrictions` consult the + :mod:`profile` documentation. + + .. versionadded:: 0.9 + Added support for `restrictions` and `profile_dir`. + + :param app: the WSGI application to profile. + :param stream: the stream for the profiled stats. defaults to stderr. + :param sort_by: a tuple of columns to sort the result by. + :param restrictions: a tuple of profiling strictions, not used if dumping + to `profile_dir`. + :param profile_dir: directory name to save pstat files + """ + + def __init__(self, app, stream=None, + sort_by=('time', 'calls'), restrictions=(), profile_dir=None): + if not available: + raise RuntimeError('the profiler is not available because ' + 'profile or pstat is not installed.') + self._app = app + self._stream = stream or sys.stdout + self._sort_by = sort_by + self._restrictions = restrictions + self._profile_dir = profile_dir + + def __call__(self, environ, start_response): + response_body = [] + + def catching_start_response(status, headers, exc_info=None): + start_response(status, headers, exc_info) + return response_body.append + + def runapp(): + appiter = self._app(environ, catching_start_response) + response_body.extend(appiter) + if hasattr(appiter, 'close'): + appiter.close() + + p = Profile() + start = time.time() + p.runcall(runapp) + body = b''.join(response_body) + elapsed = time.time() - start + + if self._profile_dir is not None: + prof_filename = os.path.join(self._profile_dir, + '%s.%s.%06dms.%d.prof' % ( + environ['REQUEST_METHOD'], + environ.get('PATH_INFO').strip( + '/').replace('/', '.') or 'root', + elapsed * 1000.0, + time.time() + )) + p.dump_stats(prof_filename) + + else: + stats = Stats(p, stream=self._stream) + stats.sort_stats(*self._sort_by) + + self._stream.write('-' * 80) + self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO')) + stats.print_stats(*self._restrictions) + self._stream.write('-' * 80 + '\n\n') + + return [body] + + +def make_action(app_factory, hostname='localhost', port=5000, + threaded=False, processes=1, stream=None, + sort_by=('time', 'calls'), restrictions=()): + """Return a new callback for :mod:`werkzeug.script` that starts a local + server with the profiler enabled. + + :: + + from werkzeug.contrib import profiler + action_profile = profiler.make_action(make_app) + """ + def action(hostname=('h', hostname), port=('p', port), + threaded=threaded, processes=processes): + """Start a new development server.""" + from werkzeug.serving import run_simple + app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions) + run_simple(hostname, port, app, False, None, threaded, processes) + return action diff --git a/app/lib/werkzeug/contrib/securecookie.py b/app/lib/werkzeug/contrib/securecookie.py new file mode 100644 index 0000000..bc75726 --- /dev/null +++ b/app/lib/werkzeug/contrib/securecookie.py @@ -0,0 +1,323 @@ +# -*- coding: utf-8 -*- +r""" + werkzeug.contrib.securecookie + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + This module implements a cookie that is not alterable from the client + because it adds a checksum the server checks for. You can use it as + session replacement if all you have is a user id or something to mark + a logged in user. + + Keep in mind that the data is still readable from the client as a + normal cookie is. However you don't have to store and flush the + sessions you have at the server. + + Example usage: + + >>> from werkzeug.contrib.securecookie import SecureCookie + >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") + + Dumping into a string so that one can store it in a cookie: + + >>> value = x.serialize() + + Loading from that string again: + + >>> x = SecureCookie.unserialize(value, "deadbeef") + >>> x["baz"] + (1, 2, 3) + + If someone modifies the cookie and the checksum is wrong the unserialize + method will fail silently and return a new empty `SecureCookie` object. + + Keep in mind that the values will be visible in the cookie so do not + store data in a cookie you don't want the user to see. + + Application Integration + ======================= + + If you are using the werkzeug request objects you could integrate the + secure cookie into your application like this:: + + from werkzeug.utils import cached_property + from werkzeug.wrappers import BaseRequest + from werkzeug.contrib.securecookie import SecureCookie + + # don't use this key but a different one; you could just use + # os.urandom(20) to get something random + SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea' + + class Request(BaseRequest): + + @cached_property + def client_session(self): + data = self.cookies.get('session_data') + if not data: + return SecureCookie(secret_key=SECRET_KEY) + return SecureCookie.unserialize(data, SECRET_KEY) + + def application(environ, start_response): + request = Request(environ) + + # get a response object here + response = ... + + if request.client_session.should_save: + session_data = request.client_session.serialize() + response.set_cookie('session_data', session_data, + httponly=True) + return response(environ, start_response) + + A less verbose integration can be achieved by using shorthand methods:: + + class Request(BaseRequest): + + @cached_property + def client_session(self): + return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET) + + def application(environ, start_response): + request = Request(environ) + + # get a response object here + response = ... + + request.client_session.save_cookie(response) + return response(environ, start_response) + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import pickle +import base64 +from hmac import new as hmac +from time import time +from hashlib import sha1 as _default_hash + +from werkzeug._compat import iteritems, text_type +from werkzeug.urls import url_quote_plus, url_unquote_plus +from werkzeug._internal import _date_to_unix +from werkzeug.contrib.sessions import ModificationTrackingDict +from werkzeug.security import safe_str_cmp +from werkzeug._compat import to_native + + +class UnquoteError(Exception): + + """Internal exception used to signal failures on quoting.""" + + +class SecureCookie(ModificationTrackingDict): + + """Represents a secure cookie. You can subclass this class and provide + an alternative mac method. The import thing is that the mac method + is a function with a similar interface to the hashlib. Required + methods are update() and digest(). + + Example usage: + + >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") + >>> x["foo"] + 42 + >>> x["baz"] + (1, 2, 3) + >>> x["blafasel"] = 23 + >>> x.should_save + True + + :param data: the initial data. Either a dict, list of tuples or `None`. + :param secret_key: the secret key. If not set `None` or not specified + it has to be set before :meth:`serialize` is called. + :param new: The initial value of the `new` flag. + """ + + #: The hash method to use. This has to be a module with a new function + #: or a function that creates a hashlib object. Such as `hashlib.md5` + #: Subclasses can override this attribute. The default hash is sha1. + #: Make sure to wrap this in staticmethod() if you store an arbitrary + #: function there such as hashlib.sha1 which might be implemented + #: as a function. + hash_method = staticmethod(_default_hash) + + #: the module used for serialization. Unless overriden by subclasses + #: the standard pickle module is used. + serialization_method = pickle + + #: if the contents should be base64 quoted. This can be disabled if the + #: serialization process returns cookie safe strings only. + quote_base64 = True + + def __init__(self, data=None, secret_key=None, new=True): + ModificationTrackingDict.__init__(self, data or ()) + # explicitly convert it into a bytestring because python 2.6 + # no longer performs an implicit string conversion on hmac + if secret_key is not None: + secret_key = bytes(secret_key) + self.secret_key = secret_key + self.new = new + + def __repr__(self): + return '<%s %s%s>' % ( + self.__class__.__name__, + dict.__repr__(self), + self.should_save and '*' or '' + ) + + @property + def should_save(self): + """True if the session should be saved. By default this is only true + for :attr:`modified` cookies, not :attr:`new`. + """ + return self.modified + + @classmethod + def quote(cls, value): + """Quote the value for the cookie. This can be any object supported + by :attr:`serialization_method`. + + :param value: the value to quote. + """ + if cls.serialization_method is not None: + value = cls.serialization_method.dumps(value) + if cls.quote_base64: + value = b''.join(base64.b64encode(value).splitlines()).strip() + return value + + @classmethod + def unquote(cls, value): + """Unquote the value for the cookie. If unquoting does not work a + :exc:`UnquoteError` is raised. + + :param value: the value to unquote. + """ + try: + if cls.quote_base64: + value = base64.b64decode(value) + if cls.serialization_method is not None: + value = cls.serialization_method.loads(value) + return value + except Exception: + # unfortunately pickle and other serialization modules can + # cause pretty every error here. if we get one we catch it + # and convert it into an UnquoteError + raise UnquoteError() + + def serialize(self, expires=None): + """Serialize the secure cookie into a string. + + If expires is provided, the session will be automatically invalidated + after expiration when you unseralize it. This provides better + protection against session cookie theft. + + :param expires: an optional expiration date for the cookie (a + :class:`datetime.datetime` object) + """ + if self.secret_key is None: + raise RuntimeError('no secret key defined') + if expires: + self['_expires'] = _date_to_unix(expires) + result = [] + mac = hmac(self.secret_key, None, self.hash_method) + for key, value in sorted(self.items()): + result.append(('%s=%s' % ( + url_quote_plus(key), + self.quote(value).decode('ascii') + )).encode('ascii')) + mac.update(b'|' + result[-1]) + return b'?'.join([ + base64.b64encode(mac.digest()).strip(), + b'&'.join(result) + ]) + + @classmethod + def unserialize(cls, string, secret_key): + """Load the secure cookie from a serialized string. + + :param string: the cookie value to unserialize. + :param secret_key: the secret key used to serialize the cookie. + :return: a new :class:`SecureCookie`. + """ + if isinstance(string, text_type): + string = string.encode('utf-8', 'replace') + if isinstance(secret_key, text_type): + secret_key = secret_key.encode('utf-8', 'replace') + try: + base64_hash, data = string.split(b'?', 1) + except (ValueError, IndexError): + items = () + else: + items = {} + mac = hmac(secret_key, None, cls.hash_method) + for item in data.split(b'&'): + mac.update(b'|' + item) + if b'=' not in item: + items = None + break + key, value = item.split(b'=', 1) + # try to make the key a string + key = url_unquote_plus(key.decode('ascii')) + try: + key = to_native(key) + except UnicodeError: + pass + items[key] = value + + # no parsing error and the mac looks okay, we can now + # sercurely unpickle our cookie. + try: + client_hash = base64.b64decode(base64_hash) + except TypeError: + items = client_hash = None + if items is not None and safe_str_cmp(client_hash, mac.digest()): + try: + for key, value in iteritems(items): + items[key] = cls.unquote(value) + except UnquoteError: + items = () + else: + if '_expires' in items: + if time() > items['_expires']: + items = () + else: + del items['_expires'] + else: + items = () + return cls(items, secret_key, False) + + @classmethod + def load_cookie(cls, request, key='session', secret_key=None): + """Loads a :class:`SecureCookie` from a cookie in request. If the + cookie is not set, a new :class:`SecureCookie` instanced is + returned. + + :param request: a request object that has a `cookies` attribute + which is a dict of all cookie values. + :param key: the name of the cookie. + :param secret_key: the secret key used to unquote the cookie. + Always provide the value even though it has + no default! + """ + data = request.cookies.get(key) + if not data: + return cls(secret_key=secret_key) + return cls.unserialize(data, secret_key) + + def save_cookie(self, response, key='session', expires=None, + session_expires=None, max_age=None, path='/', domain=None, + secure=None, httponly=False, force=False): + """Saves the SecureCookie in a cookie on response object. All + parameters that are not described here are forwarded directly + to :meth:`~BaseResponse.set_cookie`. + + :param response: a response object that has a + :meth:`~BaseResponse.set_cookie` method. + :param key: the name of the cookie. + :param session_expires: the expiration date of the secure cookie + stored information. If this is not provided + the cookie `expires` date is used instead. + """ + if force or self.should_save: + data = self.serialize(session_expires or expires) + response.set_cookie(key, data, expires=expires, max_age=max_age, + path=path, domain=domain, secure=secure, + httponly=httponly) diff --git a/app/lib/werkzeug/contrib/sessions.py b/app/lib/werkzeug/contrib/sessions.py new file mode 100644 index 0000000..a9c3aa7 --- /dev/null +++ b/app/lib/werkzeug/contrib/sessions.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +r""" + werkzeug.contrib.sessions + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + This module contains some helper classes that help one to add session + support to a python WSGI application. For full client-side session + storage see :mod:`~werkzeug.contrib.securecookie` which implements a + secure, client-side session storage. + + + Application Integration + ======================= + + :: + + from werkzeug.contrib.sessions import SessionMiddleware, \ + FilesystemSessionStore + + app = SessionMiddleware(app, FilesystemSessionStore()) + + The current session will then appear in the WSGI environment as + `werkzeug.session`. However it's recommended to not use the middleware + but the stores directly in the application. However for very simple + scripts a middleware for sessions could be sufficient. + + This module does not implement methods or ways to check if a session is + expired. That should be done by a cronjob and storage specific. For + example to prune unused filesystem sessions one could check the modified + time of the files. If sessions are stored in the database the new() + method should add an expiration timestamp for the session. + + For better flexibility it's recommended to not use the middleware but the + store and session object directly in the application dispatching:: + + session_store = FilesystemSessionStore() + + def application(environ, start_response): + request = Request(environ) + sid = request.cookies.get('cookie_name') + if sid is None: + request.session = session_store.new() + else: + request.session = session_store.get(sid) + response = get_the_response_object(request) + if request.session.should_save: + session_store.save(request.session) + response.set_cookie('cookie_name', request.session.sid) + return response(environ, start_response) + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import re +import os +import tempfile +from os import path +from time import time +from random import random +from hashlib import sha1 +from pickle import dump, load, HIGHEST_PROTOCOL + +from werkzeug.datastructures import CallbackDict +from werkzeug.utils import dump_cookie, parse_cookie +from werkzeug.wsgi import ClosingIterator +from werkzeug.posixemulation import rename +from werkzeug._compat import PY2, text_type +from werkzeug.filesystem import get_filesystem_encoding + + +_sha1_re = re.compile(r'^[a-f0-9]{40}$') + + +def _urandom(): + if hasattr(os, 'urandom'): + return os.urandom(30) + return text_type(random()).encode('ascii') + + +def generate_key(salt=None): + if salt is None: + salt = repr(salt).encode('ascii') + return sha1(b''.join([ + salt, + str(time()).encode('ascii'), + _urandom() + ])).hexdigest() + + +class ModificationTrackingDict(CallbackDict): + __slots__ = ('modified',) + + def __init__(self, *args, **kwargs): + def on_update(self): + self.modified = True + self.modified = False + CallbackDict.__init__(self, on_update=on_update) + dict.update(self, *args, **kwargs) + + def copy(self): + """Create a flat copy of the dict.""" + missing = object() + result = object.__new__(self.__class__) + for name in self.__slots__: + val = getattr(self, name, missing) + if val is not missing: + setattr(result, name, val) + return result + + def __copy__(self): + return self.copy() + + +class Session(ModificationTrackingDict): + + """Subclass of a dict that keeps track of direct object changes. Changes + in mutable structures are not tracked, for those you have to set + `modified` to `True` by hand. + """ + __slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new') + + def __init__(self, data, sid, new=False): + ModificationTrackingDict.__init__(self, data) + self.sid = sid + self.new = new + + def __repr__(self): + return '<%s %s%s>' % ( + self.__class__.__name__, + dict.__repr__(self), + self.should_save and '*' or '' + ) + + @property + def should_save(self): + """True if the session should be saved. + + .. versionchanged:: 0.6 + By default the session is now only saved if the session is + modified, not if it is new like it was before. + """ + return self.modified + + +class SessionStore(object): + + """Baseclass for all session stores. The Werkzeug contrib module does not + implement any useful stores besides the filesystem store, application + developers are encouraged to create their own stores. + + :param session_class: The session class to use. Defaults to + :class:`Session`. + """ + + def __init__(self, session_class=None): + if session_class is None: + session_class = Session + self.session_class = session_class + + def is_valid_key(self, key): + """Check if a key has the correct format.""" + return _sha1_re.match(key) is not None + + def generate_key(self, salt=None): + """Simple function that generates a new session key.""" + return generate_key(salt) + + def new(self): + """Generate a new session.""" + return self.session_class({}, self.generate_key(), True) + + def save(self, session): + """Save a session.""" + + def save_if_modified(self, session): + """Save if a session class wants an update.""" + if session.should_save: + self.save(session) + + def delete(self, session): + """Delete a session.""" + + def get(self, sid): + """Get a session for this sid or a new session object. This method + has to check if the session key is valid and create a new session if + that wasn't the case. + """ + return self.session_class({}, sid, True) + + +#: used for temporary files by the filesystem session store +_fs_transaction_suffix = '.__wz_sess' + + +class FilesystemSessionStore(SessionStore): + + """Simple example session store that saves sessions on the filesystem. + This store works best on POSIX systems and Windows Vista / Windows + Server 2008 and newer. + + .. versionchanged:: 0.6 + `renew_missing` was added. Previously this was considered `True`, + now the default changed to `False` and it can be explicitly + deactivated. + + :param path: the path to the folder used for storing the sessions. + If not provided the default temporary directory is used. + :param filename_template: a string template used to give the session + a filename. ``%s`` is replaced with the + session id. + :param session_class: The session class to use. Defaults to + :class:`Session`. + :param renew_missing: set to `True` if you want the store to + give the user a new sid if the session was + not yet saved. + """ + + def __init__(self, path=None, filename_template='werkzeug_%s.sess', + session_class=None, renew_missing=False, mode=0o644): + SessionStore.__init__(self, session_class) + if path is None: + path = tempfile.gettempdir() + self.path = path + if isinstance(filename_template, text_type) and PY2: + filename_template = filename_template.encode( + get_filesystem_encoding()) + assert not filename_template.endswith(_fs_transaction_suffix), \ + 'filename templates may not end with %s' % _fs_transaction_suffix + self.filename_template = filename_template + self.renew_missing = renew_missing + self.mode = mode + + def get_session_filename(self, sid): + # out of the box, this should be a strict ASCII subset but + # you might reconfigure the session object to have a more + # arbitrary string. + if isinstance(sid, text_type) and PY2: + sid = sid.encode(get_filesystem_encoding()) + return path.join(self.path, self.filename_template % sid) + + def save(self, session): + fn = self.get_session_filename(session.sid) + fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, + dir=self.path) + f = os.fdopen(fd, 'wb') + try: + dump(dict(session), f, HIGHEST_PROTOCOL) + finally: + f.close() + try: + rename(tmp, fn) + os.chmod(fn, self.mode) + except (IOError, OSError): + pass + + def delete(self, session): + fn = self.get_session_filename(session.sid) + try: + os.unlink(fn) + except OSError: + pass + + def get(self, sid): + if not self.is_valid_key(sid): + return self.new() + try: + f = open(self.get_session_filename(sid), 'rb') + except IOError: + if self.renew_missing: + return self.new() + data = {} + else: + try: + try: + data = load(f) + except Exception: + data = {} + finally: + f.close() + return self.session_class(data, sid, False) + + def list(self): + """Lists all sessions in the store. + + .. versionadded:: 0.6 + """ + before, after = self.filename_template.split('%s', 1) + filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before), + re.escape(after))) + result = [] + for filename in os.listdir(self.path): + #: this is a session that is still being saved. + if filename.endswith(_fs_transaction_suffix): + continue + match = filename_re.match(filename) + if match is not None: + result.append(match.group(1)) + return result + + +class SessionMiddleware(object): + + """A simple middleware that puts the session object of a store provided + into the WSGI environ. It automatically sets cookies and restores + sessions. + + However a middleware is not the preferred solution because it won't be as + fast as sessions managed by the application itself and will put a key into + the WSGI environment only relevant for the application which is against + the concept of WSGI. + + The cookie parameters are the same as for the :func:`~dump_cookie` + function just prefixed with ``cookie_``. Additionally `max_age` is + called `cookie_age` and not `cookie_max_age` because of backwards + compatibility. + """ + + def __init__(self, app, store, cookie_name='session_id', + cookie_age=None, cookie_expires=None, cookie_path='/', + cookie_domain=None, cookie_secure=None, + cookie_httponly=False, environ_key='werkzeug.session'): + self.app = app + self.store = store + self.cookie_name = cookie_name + self.cookie_age = cookie_age + self.cookie_expires = cookie_expires + self.cookie_path = cookie_path + self.cookie_domain = cookie_domain + self.cookie_secure = cookie_secure + self.cookie_httponly = cookie_httponly + self.environ_key = environ_key + + def __call__(self, environ, start_response): + cookie = parse_cookie(environ.get('HTTP_COOKIE', '')) + sid = cookie.get(self.cookie_name, None) + if sid is None: + session = self.store.new() + else: + session = self.store.get(sid) + environ[self.environ_key] = session + + def injecting_start_response(status, headers, exc_info=None): + if session.should_save: + self.store.save(session) + headers.append(('Set-Cookie', dump_cookie(self.cookie_name, + session.sid, self.cookie_age, + self.cookie_expires, self.cookie_path, + self.cookie_domain, self.cookie_secure, + self.cookie_httponly))) + return start_response(status, headers, exc_info) + return ClosingIterator(self.app(environ, injecting_start_response), + lambda: self.store.save_if_modified(session)) diff --git a/src/lib/werkzeug/contrib/testtools.py b/app/lib/werkzeug/contrib/testtools.py similarity index 95% rename from src/lib/werkzeug/contrib/testtools.py rename to app/lib/werkzeug/contrib/testtools.py index 9bbf76a..3dab6db 100644 --- a/src/lib/werkzeug/contrib/testtools.py +++ b/app/lib/werkzeug/contrib/testtools.py @@ -9,7 +9,7 @@ A response wrapper which adds various cached attributes for simplified assertions on various content types. - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from werkzeug.utils import cached_property, import_string @@ -21,6 +21,7 @@ class ContentAccessors(object): + """ A mixin class for response objects that provides a couple of useful accessors for unittesting. @@ -50,7 +51,7 @@ def lxml(self): from lxml.html import fromstring except ImportError: fromstring = etree.HTML - if self.mimetype=='text/html': + if self.mimetype == 'text/html': return fromstring(self.data) return etree.XML(self.data) lxml = cached_property(lxml) @@ -68,4 +69,5 @@ def json(self): class TestResponse(Response, ContentAccessors): + """Pass this to `werkzeug.test.Client` for easier unittesting.""" diff --git a/app/lib/werkzeug/contrib/wrappers.py b/app/lib/werkzeug/contrib/wrappers.py new file mode 100644 index 0000000..6c86452 --- /dev/null +++ b/app/lib/werkzeug/contrib/wrappers.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.contrib.wrappers + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Extra wrappers or mixins contributed by the community. These wrappers can + be mixed in into request objects to add extra functionality. + + Example:: + + from werkzeug.wrappers import Request as RequestBase + from werkzeug.contrib.wrappers import JSONRequestMixin + + class Request(RequestBase, JSONRequestMixin): + pass + + Afterwards this request object provides the extra functionality of the + :class:`JSONRequestMixin`. + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import codecs +try: + from simplejson import loads +except ImportError: + from json import loads + +from werkzeug.exceptions import BadRequest +from werkzeug.utils import cached_property +from werkzeug.http import dump_options_header, parse_options_header +from werkzeug._compat import wsgi_decoding_dance + + +def is_known_charset(charset): + """Checks if the given charset is known to Python.""" + try: + codecs.lookup(charset) + except LookupError: + return False + return True + + +class JSONRequestMixin(object): + + """Add json method to a request object. This will parse the input data + through simplejson if possible. + + :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type + is not json or if the data itself cannot be parsed as json. + """ + + @cached_property + def json(self): + """Get the result of simplejson.loads if possible.""" + if 'json' not in self.environ.get('CONTENT_TYPE', ''): + raise BadRequest('Not a JSON request') + try: + return loads(self.data.decode(self.charset, self.encoding_errors)) + except Exception: + raise BadRequest('Unable to read JSON request') + + +class ProtobufRequestMixin(object): + + """Add protobuf parsing method to a request object. This will parse the + input data through `protobuf`_ if possible. + + :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type + is not protobuf or if the data itself cannot be parsed property. + + .. _protobuf: http://code.google.com/p/protobuf/ + """ + + #: by default the :class:`ProtobufRequestMixin` will raise a + #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not + #: initialized. You can bypass that check by setting this + #: attribute to `False`. + protobuf_check_initialization = True + + def parse_protobuf(self, proto_type): + """Parse the data into an instance of proto_type.""" + if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): + raise BadRequest('Not a Protobuf request') + + obj = proto_type() + try: + obj.ParseFromString(self.data) + except Exception: + raise BadRequest("Unable to parse Protobuf request") + + # Fail if not all required fields are set + if self.protobuf_check_initialization and not obj.IsInitialized(): + raise BadRequest("Partial Protobuf request") + + return obj + + +class RoutingArgsRequestMixin(object): + + """This request mixin adds support for the wsgiorg routing args + `specification`_. + + .. _specification: https://wsgi.readthedocs.io/en/latest/specifications/routing_args.html + """ + + def _get_routing_args(self): + return self.environ.get('wsgiorg.routing_args', (()))[0] + + def _set_routing_args(self, value): + if self.shallow: + raise RuntimeError('A shallow request tried to modify the WSGI ' + 'environment. If you really want to do that, ' + 'set `shallow` to False.') + self.environ['wsgiorg.routing_args'] = (value, self.routing_vars) + + routing_args = property(_get_routing_args, _set_routing_args, doc=''' + The positional URL arguments as `tuple`.''') + del _get_routing_args, _set_routing_args + + def _get_routing_vars(self): + rv = self.environ.get('wsgiorg.routing_args') + if rv is not None: + return rv[1] + rv = {} + if not self.shallow: + self.routing_vars = rv + return rv + + def _set_routing_vars(self, value): + if self.shallow: + raise RuntimeError('A shallow request tried to modify the WSGI ' + 'environment. If you really want to do that, ' + 'set `shallow` to False.') + self.environ['wsgiorg.routing_args'] = (self.routing_args, value) + + routing_vars = property(_get_routing_vars, _set_routing_vars, doc=''' + The keyword URL arguments as `dict`.''') + del _get_routing_vars, _set_routing_vars + + +class ReverseSlashBehaviorRequestMixin(object): + + """This mixin reverses the trailing slash behavior of :attr:`script_root` + and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin` + directly on the paths. + + Because it changes the behavior or :class:`Request` this class has to be + mixed in *before* the actual request class:: + + class MyRequest(ReverseSlashBehaviorRequestMixin, Request): + pass + + This example shows the differences (for an application mounted on + `/application` and the request going to `/application/foo/bar`): + + +---------------+-------------------+---------------------+ + | | normal behavior | reverse behavior | + +===============+===================+=====================+ + | `script_root` | ``/application`` | ``/application/`` | + +---------------+-------------------+---------------------+ + | `path` | ``/foo/bar`` | ``foo/bar`` | + +---------------+-------------------+---------------------+ + """ + + @cached_property + def path(self): + """Requested path as unicode. This works a bit like the regular path + info in the WSGI environment but will not include a leading slash. + """ + path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '', + self.charset, self.encoding_errors) + return path.lstrip('/') + + @cached_property + def script_root(self): + """The root path of the script includling a trailing slash.""" + path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '', + self.charset, self.encoding_errors) + return path.rstrip('/') + '/' + + +class DynamicCharsetRequestMixin(object): + + """"If this mixin is mixed into a request class it will provide + a dynamic `charset` attribute. This means that if the charset is + transmitted in the content type headers it's used from there. + + Because it changes the behavior or :class:`Request` this class has + to be mixed in *before* the actual request class:: + + class MyRequest(DynamicCharsetRequestMixin, Request): + pass + + By default the request object assumes that the URL charset is the + same as the data charset. If the charset varies on each request + based on the transmitted data it's not a good idea to let the URLs + change based on that. Most browsers assume either utf-8 or latin1 + for the URLs if they have troubles figuring out. It's strongly + recommended to set the URL charset to utf-8:: + + class MyRequest(DynamicCharsetRequestMixin, Request): + url_charset = 'utf-8' + + .. versionadded:: 0.6 + """ + + #: the default charset that is assumed if the content type header + #: is missing or does not contain a charset parameter. The default + #: is latin1 which is what HTTP specifies as default charset. + #: You may however want to set this to utf-8 to better support + #: browsers that do not transmit a charset for incoming data. + default_charset = 'latin1' + + def unknown_charset(self, charset): + """Called if a charset was provided but is not supported by + the Python codecs module. By default latin1 is assumed then + to not lose any information, you may override this method to + change the behavior. + + :param charset: the charset that was not found. + :return: the replacement charset. + """ + return 'latin1' + + @cached_property + def charset(self): + """The charset from the content type.""" + header = self.environ.get('CONTENT_TYPE') + if header: + ct, options = parse_options_header(header) + charset = options.get('charset') + if charset: + if is_known_charset(charset): + return charset + return self.unknown_charset(charset) + return self.default_charset + + +class DynamicCharsetResponseMixin(object): + + """If this mixin is mixed into a response class it will provide + a dynamic `charset` attribute. This means that if the charset is + looked up and stored in the `Content-Type` header and updates + itself automatically. This also means a small performance hit but + can be useful if you're working with different charsets on + responses. + + Because the charset attribute is no a property at class-level, the + default value is stored in `default_charset`. + + Because it changes the behavior or :class:`Response` this class has + to be mixed in *before* the actual response class:: + + class MyResponse(DynamicCharsetResponseMixin, Response): + pass + + .. versionadded:: 0.6 + """ + + #: the default charset. + default_charset = 'utf-8' + + def _get_charset(self): + header = self.headers.get('content-type') + if header: + charset = parse_options_header(header)[1].get('charset') + if charset: + return charset + return self.default_charset + + def _set_charset(self, charset): + header = self.headers.get('content-type') + ct, options = parse_options_header(header) + if not ct: + raise TypeError('Cannot set charset if Content-Type ' + 'header is missing.') + options['charset'] = charset + self.headers['Content-Type'] = dump_options_header(ct, options) + + charset = property(_get_charset, _set_charset, doc=""" + The charset for the response. It's stored inside the + Content-Type header as a parameter.""") + del _get_charset, _set_charset diff --git a/app/lib/werkzeug/datastructures.py b/app/lib/werkzeug/datastructures.py new file mode 100644 index 0000000..1da50e2 --- /dev/null +++ b/app/lib/werkzeug/datastructures.py @@ -0,0 +1,2740 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.datastructures + ~~~~~~~~~~~~~~~~~~~~~~~ + + This module provides mixins and classes with an immutable interface. + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import re +import codecs +import mimetypes +from copy import deepcopy +from itertools import repeat +from collections import Container, Iterable, Mapping, MutableSet + +from werkzeug._internal import _missing, _empty_stream +from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \ + PY2, text_type, integer_types, string_types, make_literal_wrapper, \ + to_native +from werkzeug.filesystem import get_filesystem_encoding + + +_locale_delim_re = re.compile(r'[_-]') + + +def is_immutable(self): + raise TypeError('%r objects are immutable' % self.__class__.__name__) + + +def iter_multi_items(mapping): + """Iterates over the items of a mapping yielding keys and values + without dropping any from more complex structures. + """ + if isinstance(mapping, MultiDict): + for item in iteritems(mapping, multi=True): + yield item + elif isinstance(mapping, dict): + for key, value in iteritems(mapping): + if isinstance(value, (tuple, list)): + for value in value: + yield key, value + else: + yield key, value + else: + for item in mapping: + yield item + + +def native_itermethods(names): + if not PY2: + return lambda x: x + + def setviewmethod(cls, name): + viewmethod_name = 'view%s' % name + viewmethod = lambda self, *a, **kw: ViewItems(self, name, 'view_%s' % name, *a, **kw) + viewmethod.__doc__ = \ + '"""`%s()` object providing a view on %s"""' % (viewmethod_name, name) + setattr(cls, viewmethod_name, viewmethod) + + def setitermethod(cls, name): + itermethod = getattr(cls, name) + setattr(cls, 'iter%s' % name, itermethod) + listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw)) + listmethod.__doc__ = \ + 'Like :py:meth:`iter%s`, but returns a list.' % name + setattr(cls, name, listmethod) + + def wrap(cls): + for name in names: + setitermethod(cls, name) + setviewmethod(cls, name) + return cls + return wrap + + +class ImmutableListMixin(object): + + """Makes a :class:`list` immutable. + + .. versionadded:: 0.5 + + :private: + """ + + _hash_cache = None + + def __hash__(self): + if self._hash_cache is not None: + return self._hash_cache + rv = self._hash_cache = hash(tuple(self)) + return rv + + def __reduce_ex__(self, protocol): + return type(self), (list(self),) + + def __delitem__(self, key): + is_immutable(self) + + def __iadd__(self, other): + is_immutable(self) + __imul__ = __iadd__ + + def __setitem__(self, key, value): + is_immutable(self) + + def append(self, item): + is_immutable(self) + remove = append + + def extend(self, iterable): + is_immutable(self) + + def insert(self, pos, value): + is_immutable(self) + + def pop(self, index=-1): + is_immutable(self) + + def reverse(self): + is_immutable(self) + + def sort(self, cmp=None, key=None, reverse=None): + is_immutable(self) + + +class ImmutableList(ImmutableListMixin, list): + + """An immutable :class:`list`. + + .. versionadded:: 0.5 + + :private: + """ + + def __repr__(self): + return '%s(%s)' % ( + self.__class__.__name__, + list.__repr__(self), + ) + + +class ImmutableDictMixin(object): + + """Makes a :class:`dict` immutable. + + .. versionadded:: 0.5 + + :private: + """ + _hash_cache = None + + @classmethod + def fromkeys(cls, keys, value=None): + instance = super(cls, cls).__new__(cls) + instance.__init__(zip(keys, repeat(value))) + return instance + + def __reduce_ex__(self, protocol): + return type(self), (dict(self),) + + def _iter_hashitems(self): + return iteritems(self) + + def __hash__(self): + if self._hash_cache is not None: + return self._hash_cache + rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) + return rv + + def setdefault(self, key, default=None): + is_immutable(self) + + def update(self, *args, **kwargs): + is_immutable(self) + + def pop(self, key, default=None): + is_immutable(self) + + def popitem(self): + is_immutable(self) + + def __setitem__(self, key, value): + is_immutable(self) + + def __delitem__(self, key): + is_immutable(self) + + def clear(self): + is_immutable(self) + + +class ImmutableMultiDictMixin(ImmutableDictMixin): + + """Makes a :class:`MultiDict` immutable. + + .. versionadded:: 0.5 + + :private: + """ + + def __reduce_ex__(self, protocol): + return type(self), (list(iteritems(self, multi=True)),) + + def _iter_hashitems(self): + return iteritems(self, multi=True) + + def add(self, key, value): + is_immutable(self) + + def popitemlist(self): + is_immutable(self) + + def poplist(self, key): + is_immutable(self) + + def setlist(self, key, new_list): + is_immutable(self) + + def setlistdefault(self, key, default_list=None): + is_immutable(self) + + +class UpdateDictMixin(object): + + """Makes dicts call `self.on_update` on modifications. + + .. versionadded:: 0.5 + + :private: + """ + + on_update = None + + def calls_update(name): + def oncall(self, *args, **kw): + rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) + if self.on_update is not None: + self.on_update(self) + return rv + oncall.__name__ = name + return oncall + + def setdefault(self, key, default=None): + modified = key not in self + rv = super(UpdateDictMixin, self).setdefault(key, default) + if modified and self.on_update is not None: + self.on_update(self) + return rv + + def pop(self, key, default=_missing): + modified = key in self + if default is _missing: + rv = super(UpdateDictMixin, self).pop(key) + else: + rv = super(UpdateDictMixin, self).pop(key, default) + if modified and self.on_update is not None: + self.on_update(self) + return rv + + __setitem__ = calls_update('__setitem__') + __delitem__ = calls_update('__delitem__') + clear = calls_update('clear') + popitem = calls_update('popitem') + update = calls_update('update') + del calls_update + + +class TypeConversionDict(dict): + + """Works like a regular dict but the :meth:`get` method can perform + type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` + are subclasses of this class and provide the same feature. + + .. versionadded:: 0.5 + """ + + def get(self, key, default=None, type=None): + """Return the default value if the requested data doesn't exist. + If `type` is provided and is a callable it should convert the value, + return it or raise a :exc:`ValueError` if that is not possible. In + this case the function will return the default as if the value was not + found: + + >>> d = TypeConversionDict(foo='42', bar='blub') + >>> d.get('foo', type=int) + 42 + >>> d.get('bar', -1, type=int) + -1 + + :param key: The key to be looked up. + :param default: The default value to be returned if the key can't + be looked up. If not further specified `None` is + returned. + :param type: A callable that is used to cast the value in the + :class:`MultiDict`. If a :exc:`ValueError` is raised + by this callable the default value is returned. + """ + try: + rv = self[key] + if type is not None: + rv = type(rv) + except (KeyError, ValueError): + rv = default + return rv + + +class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): + + """Works like a :class:`TypeConversionDict` but does not support + modifications. + + .. versionadded:: 0.5 + """ + + def copy(self): + """Return a shallow mutable copy of this object. Keep in mind that + the standard library's :func:`copy` function is a no-op for this class + like for any other python immutable type (eg: :class:`tuple`). + """ + return TypeConversionDict(self) + + def __copy__(self): + return self + + +class ViewItems(object): + + def __init__(self, multi_dict, method, repr_name, *a, **kw): + self.__multi_dict = multi_dict + self.__method = method + self.__repr_name = repr_name + self.__a = a + self.__kw = kw + + def __get_items(self): + return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw) + + def __repr__(self): + return '%s(%r)' % (self.__repr_name, list(self.__get_items())) + + def __iter__(self): + return iter(self.__get_items()) + + +@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) +class MultiDict(TypeConversionDict): + + """A :class:`MultiDict` is a dictionary subclass customized to deal with + multiple values for the same key which is for example used by the parsing + functions in the wrappers. This is necessary because some HTML form + elements pass multiple values for the same key. + + :class:`MultiDict` implements all standard dictionary methods. + Internally, it saves all values for a key as a list, but the standard dict + access methods will only return the first value for a key. If you want to + gain access to the other values, too, you have to use the `list` methods as + explained below. + + Basic Usage: + + >>> d = MultiDict([('a', 'b'), ('a', 'c')]) + >>> d + MultiDict([('a', 'b'), ('a', 'c')]) + >>> d['a'] + 'b' + >>> d.getlist('a') + ['b', 'c'] + >>> 'a' in d + True + + It behaves like a normal dict thus all dict functions will only return the + first value when multiple values for one key are found. + + From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a + subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will + render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP + exceptions. + + A :class:`MultiDict` can be constructed from an iterable of + ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 + onwards some keyword parameters. + + :param mapping: the initial value for the :class:`MultiDict`. Either a + regular dict, an iterable of ``(key, value)`` tuples + or `None`. + """ + + def __init__(self, mapping=None): + if isinstance(mapping, MultiDict): + dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping))) + elif isinstance(mapping, dict): + tmp = {} + for key, value in iteritems(mapping): + if isinstance(value, (tuple, list)): + if len(value) == 0: + continue + value = list(value) + else: + value = [value] + tmp[key] = value + dict.__init__(self, tmp) + else: + tmp = {} + for key, value in mapping or (): + tmp.setdefault(key, []).append(value) + dict.__init__(self, tmp) + + def __getstate__(self): + return dict(self.lists()) + + def __setstate__(self, value): + dict.clear(self) + dict.update(self, value) + + def __getitem__(self, key): + """Return the first data value for this key; + raises KeyError if not found. + + :param key: The key to be looked up. + :raise KeyError: if the key does not exist. + """ + if key in self: + lst = dict.__getitem__(self, key) + if len(lst) > 0: + return lst[0] + raise exceptions.BadRequestKeyError(key) + + def __setitem__(self, key, value): + """Like :meth:`add` but removes an existing key first. + + :param key: the key for the value. + :param value: the value to set. + """ + dict.__setitem__(self, key, [value]) + + def add(self, key, value): + """Adds a new value for the key. + + .. versionadded:: 0.6 + + :param key: the key for the value. + :param value: the value to add. + """ + dict.setdefault(self, key, []).append(value) + + def getlist(self, key, type=None): + """Return the list of items for a given key. If that key is not in the + `MultiDict`, the return value will be an empty list. Just as `get` + `getlist` accepts a `type` parameter. All items will be converted + with the callable defined there. + + :param key: The key to be looked up. + :param type: A callable that is used to cast the value in the + :class:`MultiDict`. If a :exc:`ValueError` is raised + by this callable the value will be removed from the list. + :return: a :class:`list` of all the values for the key. + """ + try: + rv = dict.__getitem__(self, key) + except KeyError: + return [] + if type is None: + return list(rv) + result = [] + for item in rv: + try: + result.append(type(item)) + except ValueError: + pass + return result + + def setlist(self, key, new_list): + """Remove the old values for a key and add new ones. Note that the list + you pass the values in will be shallow-copied before it is inserted in + the dictionary. + + >>> d = MultiDict() + >>> d.setlist('foo', ['1', '2']) + >>> d['foo'] + '1' + >>> d.getlist('foo') + ['1', '2'] + + :param key: The key for which the values are set. + :param new_list: An iterable with the new values for the key. Old values + are removed first. + """ + dict.__setitem__(self, key, list(new_list)) + + def setdefault(self, key, default=None): + """Returns the value for the key if it is in the dict, otherwise it + returns `default` and sets that value for `key`. + + :param key: The key to be looked up. + :param default: The default value to be returned if the key is not + in the dict. If not further specified it's `None`. + """ + if key not in self: + self[key] = default + else: + default = self[key] + return default + + def setlistdefault(self, key, default_list=None): + """Like `setdefault` but sets multiple values. The list returned + is not a copy, but the list that is actually used internally. This + means that you can put new values into the dict by appending items + to the list: + + >>> d = MultiDict({"foo": 1}) + >>> d.setlistdefault("foo").extend([2, 3]) + >>> d.getlist("foo") + [1, 2, 3] + + :param key: The key to be looked up. + :param default: An iterable of default values. It is either copied + (in case it was a list) or converted into a list + before returned. + :return: a :class:`list` + """ + if key not in self: + default_list = list(default_list or ()) + dict.__setitem__(self, key, default_list) + else: + default_list = dict.__getitem__(self, key) + return default_list + + def items(self, multi=False): + """Return an iterator of ``(key, value)`` pairs. + + :param multi: If set to `True` the iterator returned will have a pair + for each value of each key. Otherwise it will only + contain pairs for the first value of each key. + """ + + for key, values in iteritems(dict, self): + if multi: + for value in values: + yield key, value + else: + yield key, values[0] + + def lists(self): + """Return a list of ``(key, values)`` pairs, where values is the list + of all values associated with the key.""" + + for key, values in iteritems(dict, self): + yield key, list(values) + + def keys(self): + return iterkeys(dict, self) + + __iter__ = keys + + def values(self): + """Returns an iterator of the first value on every key's value list.""" + for values in itervalues(dict, self): + yield values[0] + + def listvalues(self): + """Return an iterator of all values associated with a key. Zipping + :meth:`keys` and this is the same as calling :meth:`lists`: + + >>> d = MultiDict({"foo": [1, 2, 3]}) + >>> zip(d.keys(), d.listvalues()) == d.lists() + True + """ + + return itervalues(dict, self) + + def copy(self): + """Return a shallow copy of this object.""" + return self.__class__(self) + + def deepcopy(self, memo=None): + """Return a deep copy of this object.""" + return self.__class__(deepcopy(self.to_dict(flat=False), memo)) + + def to_dict(self, flat=True): + """Return the contents as regular dict. If `flat` is `True` the + returned dict will only have the first item present, if `flat` is + `False` all values will be returned as lists. + + :param flat: If set to `False` the dict returned will have lists + with all the values in it. Otherwise it will only + contain the first value for each key. + :return: a :class:`dict` + """ + if flat: + return dict(iteritems(self)) + return dict(self.lists()) + + def update(self, other_dict): + """update() extends rather than replaces existing key lists: + + >>> a = MultiDict({'x': 1}) + >>> b = MultiDict({'x': 2, 'y': 3}) + >>> a.update(b) + >>> a + MultiDict([('y', 3), ('x', 1), ('x', 2)]) + + If the value list for a key in ``other_dict`` is empty, no new values + will be added to the dict and the key will not be created: + + >>> x = {'empty_list': []} + >>> y = MultiDict() + >>> y.update(x) + >>> y + MultiDict([]) + """ + for key, value in iter_multi_items(other_dict): + MultiDict.add(self, key, value) + + def pop(self, key, default=_missing): + """Pop the first item for a list on the dict. Afterwards the + key is removed from the dict, so additional values are discarded: + + >>> d = MultiDict({"foo": [1, 2, 3]}) + >>> d.pop("foo") + 1 + >>> "foo" in d + False + + :param key: the key to pop. + :param default: if provided the value to return if the key was + not in the dictionary. + """ + try: + lst = dict.pop(self, key) + + if len(lst) == 0: + raise exceptions.BadRequestKeyError() + + return lst[0] + except KeyError as e: + if default is not _missing: + return default + raise exceptions.BadRequestKeyError(str(e)) + + def popitem(self): + """Pop an item from the dict.""" + try: + item = dict.popitem(self) + + if len(item[1]) == 0: + raise exceptions.BadRequestKeyError() + + return (item[0], item[1][0]) + except KeyError as e: + raise exceptions.BadRequestKeyError(str(e)) + + def poplist(self, key): + """Pop the list for a key from the dict. If the key is not in the dict + an empty list is returned. + + .. versionchanged:: 0.5 + If the key does no longer exist a list is returned instead of + raising an error. + """ + return dict.pop(self, key, []) + + def popitemlist(self): + """Pop a ``(key, list)`` tuple from the dict.""" + try: + return dict.popitem(self) + except KeyError as e: + raise exceptions.BadRequestKeyError(str(e)) + + def __copy__(self): + return self.copy() + + def __deepcopy__(self, memo): + return self.deepcopy(memo=memo) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True))) + + +class _omd_bucket(object): + + """Wraps values in the :class:`OrderedMultiDict`. This makes it + possible to keep an order over multiple different keys. It requires + a lot of extra memory and slows down access a lot, but makes it + possible to access elements in O(1) and iterate in O(n). + """ + __slots__ = ('prev', 'key', 'value', 'next') + + def __init__(self, omd, key, value): + self.prev = omd._last_bucket + self.key = key + self.value = value + self.next = None + + if omd._first_bucket is None: + omd._first_bucket = self + if omd._last_bucket is not None: + omd._last_bucket.next = self + omd._last_bucket = self + + def unlink(self, omd): + if self.prev: + self.prev.next = self.next + if self.next: + self.next.prev = self.prev + if omd._first_bucket is self: + omd._first_bucket = self.next + if omd._last_bucket is self: + omd._last_bucket = self.prev + + +@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) +class OrderedMultiDict(MultiDict): + + """Works like a regular :class:`MultiDict` but preserves the + order of the fields. To convert the ordered multi dict into a + list you can use the :meth:`items` method and pass it ``multi=True``. + + In general an :class:`OrderedMultiDict` is an order of magnitude + slower than a :class:`MultiDict`. + + .. admonition:: note + + Due to a limitation in Python you cannot convert an ordered + multi dict into a regular dict by using ``dict(multidict)``. + Instead you have to use the :meth:`to_dict` method, otherwise + the internal bucket objects are exposed. + """ + + def __init__(self, mapping=None): + dict.__init__(self) + self._first_bucket = self._last_bucket = None + if mapping is not None: + OrderedMultiDict.update(self, mapping) + + def __eq__(self, other): + if not isinstance(other, MultiDict): + return NotImplemented + if isinstance(other, OrderedMultiDict): + iter1 = iteritems(self, multi=True) + iter2 = iteritems(other, multi=True) + try: + for k1, v1 in iter1: + k2, v2 = next(iter2) + if k1 != k2 or v1 != v2: + return False + except StopIteration: + return False + try: + next(iter2) + except StopIteration: + return True + return False + if len(self) != len(other): + return False + for key, values in iterlists(self): + if other.getlist(key) != values: + return False + return True + + __hash__ = None + + def __ne__(self, other): + return not self.__eq__(other) + + def __reduce_ex__(self, protocol): + return type(self), (list(iteritems(self, multi=True)),) + + def __getstate__(self): + return list(iteritems(self, multi=True)) + + def __setstate__(self, values): + dict.clear(self) + for key, value in values: + self.add(key, value) + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key)[0].value + raise exceptions.BadRequestKeyError(key) + + def __setitem__(self, key, value): + self.poplist(key) + self.add(key, value) + + def __delitem__(self, key): + self.pop(key) + + def keys(self): + return (key for key, value in iteritems(self)) + + __iter__ = keys + + def values(self): + return (value for key, value in iteritems(self)) + + def items(self, multi=False): + ptr = self._first_bucket + if multi: + while ptr is not None: + yield ptr.key, ptr.value + ptr = ptr.next + else: + returned_keys = set() + while ptr is not None: + if ptr.key not in returned_keys: + returned_keys.add(ptr.key) + yield ptr.key, ptr.value + ptr = ptr.next + + def lists(self): + returned_keys = set() + ptr = self._first_bucket + while ptr is not None: + if ptr.key not in returned_keys: + yield ptr.key, self.getlist(ptr.key) + returned_keys.add(ptr.key) + ptr = ptr.next + + def listvalues(self): + for key, values in iterlists(self): + yield values + + def add(self, key, value): + dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) + + def getlist(self, key, type=None): + try: + rv = dict.__getitem__(self, key) + except KeyError: + return [] + if type is None: + return [x.value for x in rv] + result = [] + for item in rv: + try: + result.append(type(item.value)) + except ValueError: + pass + return result + + def setlist(self, key, new_list): + self.poplist(key) + for value in new_list: + self.add(key, value) + + def setlistdefault(self, key, default_list=None): + raise TypeError('setlistdefault is unsupported for ' + 'ordered multi dicts') + + def update(self, mapping): + for key, value in iter_multi_items(mapping): + OrderedMultiDict.add(self, key, value) + + def poplist(self, key): + buckets = dict.pop(self, key, ()) + for bucket in buckets: + bucket.unlink(self) + return [x.value for x in buckets] + + def pop(self, key, default=_missing): + try: + buckets = dict.pop(self, key) + except KeyError as e: + if default is not _missing: + return default + raise exceptions.BadRequestKeyError(str(e)) + for bucket in buckets: + bucket.unlink(self) + return buckets[0].value + + def popitem(self): + try: + key, buckets = dict.popitem(self) + except KeyError as e: + raise exceptions.BadRequestKeyError(str(e)) + for bucket in buckets: + bucket.unlink(self) + return key, buckets[0].value + + def popitemlist(self): + try: + key, buckets = dict.popitem(self) + except KeyError as e: + raise exceptions.BadRequestKeyError(str(e)) + for bucket in buckets: + bucket.unlink(self) + return key, [x.value for x in buckets] + + +def _options_header_vkw(value, kw): + return dump_options_header(value, dict((k.replace('_', '-'), v) + for k, v in kw.items())) + + +def _unicodify_header_value(value): + if isinstance(value, bytes): + value = value.decode('latin-1') + if not isinstance(value, text_type): + value = text_type(value) + return value + + +@native_itermethods(['keys', 'values', 'items']) +class Headers(object): + + """An object that stores some headers. It has a dict-like interface + but is ordered and can store the same keys multiple times. + + This data structure is useful if you want a nicer way to handle WSGI + headers which are stored as tuples in a list. + + From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is + also a subclass of the :class:`~exceptions.BadRequest` HTTP exception + and will render a page for a ``400 BAD REQUEST`` if caught in a + catch-all for HTTP exceptions. + + Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` + class, with the exception of `__getitem__`. :mod:`wsgiref` will return + `None` for ``headers['missing']``, whereas :class:`Headers` will raise + a :class:`KeyError`. + + To create a new :class:`Headers` object pass it a list or dict of headers + which are used as default values. This does not reuse the list passed + to the constructor for internal usage. + + :param defaults: The list of default values for the :class:`Headers`. + + .. versionchanged:: 0.9 + This data structure now stores unicode values similar to how the + multi dicts do it. The main difference is that bytes can be set as + well which will automatically be latin1 decoded. + + .. versionchanged:: 0.9 + The :meth:`linked` function was removed without replacement as it + was an API that does not support the changes to the encoding model. + """ + + def __init__(self, defaults=None): + self._list = [] + if defaults is not None: + if isinstance(defaults, (list, Headers)): + self._list.extend(defaults) + else: + self.extend(defaults) + + def __getitem__(self, key, _get_mode=False): + if not _get_mode: + if isinstance(key, integer_types): + return self._list[key] + elif isinstance(key, slice): + return self.__class__(self._list[key]) + if not isinstance(key, string_types): + raise exceptions.BadRequestKeyError(key) + ikey = key.lower() + for k, v in self._list: + if k.lower() == ikey: + return v + # micro optimization: if we are in get mode we will catch that + # exception one stack level down so we can raise a standard + # key error instead of our special one. + if _get_mode: + raise KeyError() + raise exceptions.BadRequestKeyError(key) + + def __eq__(self, other): + return other.__class__ is self.__class__ and \ + set(other._list) == set(self._list) + + __hash__ = None + + def __ne__(self, other): + return not self.__eq__(other) + + def get(self, key, default=None, type=None, as_bytes=False): + """Return the default value if the requested data doesn't exist. + If `type` is provided and is a callable it should convert the value, + return it or raise a :exc:`ValueError` if that is not possible. In + this case the function will return the default as if the value was not + found: + + >>> d = Headers([('Content-Length', '42')]) + >>> d.get('Content-Length', type=int) + 42 + + If a headers object is bound you must not add unicode strings + because no encoding takes place. + + .. versionadded:: 0.9 + Added support for `as_bytes`. + + :param key: The key to be looked up. + :param default: The default value to be returned if the key can't + be looked up. If not further specified `None` is + returned. + :param type: A callable that is used to cast the value in the + :class:`Headers`. If a :exc:`ValueError` is raised + by this callable the default value is returned. + :param as_bytes: return bytes instead of unicode strings. + """ + try: + rv = self.__getitem__(key, _get_mode=True) + except KeyError: + return default + if as_bytes: + rv = rv.encode('latin1') + if type is None: + return rv + try: + return type(rv) + except ValueError: + return default + + def getlist(self, key, type=None, as_bytes=False): + """Return the list of items for a given key. If that key is not in the + :class:`Headers`, the return value will be an empty list. Just as + :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will + be converted with the callable defined there. + + .. versionadded:: 0.9 + Added support for `as_bytes`. + + :param key: The key to be looked up. + :param type: A callable that is used to cast the value in the + :class:`Headers`. If a :exc:`ValueError` is raised + by this callable the value will be removed from the list. + :return: a :class:`list` of all the values for the key. + :param as_bytes: return bytes instead of unicode strings. + """ + ikey = key.lower() + result = [] + for k, v in self: + if k.lower() == ikey: + if as_bytes: + v = v.encode('latin1') + if type is not None: + try: + v = type(v) + except ValueError: + continue + result.append(v) + return result + + def get_all(self, name): + """Return a list of all the values for the named field. + + This method is compatible with the :mod:`wsgiref` + :meth:`~wsgiref.headers.Headers.get_all` method. + """ + return self.getlist(name) + + def items(self, lower=False): + for key, value in self: + if lower: + key = key.lower() + yield key, value + + def keys(self, lower=False): + for key, _ in iteritems(self, lower): + yield key + + def values(self): + for _, value in iteritems(self): + yield value + + def extend(self, iterable): + """Extend the headers with a dict or an iterable yielding keys and + values. + """ + if isinstance(iterable, dict): + for key, value in iteritems(iterable): + if isinstance(value, (tuple, list)): + for v in value: + self.add(key, v) + else: + self.add(key, value) + else: + for key, value in iterable: + self.add(key, value) + + def __delitem__(self, key, _index_operation=True): + if _index_operation and isinstance(key, (integer_types, slice)): + del self._list[key] + return + key = key.lower() + new = [] + for k, v in self._list: + if k.lower() != key: + new.append((k, v)) + self._list[:] = new + + def remove(self, key): + """Remove a key. + + :param key: The key to be removed. + """ + return self.__delitem__(key, _index_operation=False) + + def pop(self, key=None, default=_missing): + """Removes and returns a key or index. + + :param key: The key to be popped. If this is an integer the item at + that position is removed, if it's a string the value for + that key is. If the key is omitted or `None` the last + item is removed. + :return: an item. + """ + if key is None: + return self._list.pop() + if isinstance(key, integer_types): + return self._list.pop(key) + try: + rv = self[key] + self.remove(key) + except KeyError: + if default is not _missing: + return default + raise + return rv + + def popitem(self): + """Removes a key or index and returns a (key, value) item.""" + return self.pop() + + def __contains__(self, key): + """Check if a key is present.""" + try: + self.__getitem__(key, _get_mode=True) + except KeyError: + return False + return True + + has_key = __contains__ + + def __iter__(self): + """Yield ``(key, value)`` tuples.""" + return iter(self._list) + + def __len__(self): + return len(self._list) + + def add(self, _key, _value, **kw): + """Add a new header tuple to the list. + + Keyword arguments can specify additional parameters for the header + value, with underscores converted to dashes:: + + >>> d = Headers() + >>> d.add('Content-Type', 'text/plain') + >>> d.add('Content-Disposition', 'attachment', filename='foo.png') + + The keyword argument dumping uses :func:`dump_options_header` + behind the scenes. + + .. versionadded:: 0.4.1 + keyword arguments were added for :mod:`wsgiref` compatibility. + """ + if kw: + _value = _options_header_vkw(_value, kw) + _value = _unicodify_header_value(_value) + self._validate_value(_value) + self._list.append((_key, _value)) + + def _validate_value(self, value): + if not isinstance(value, text_type): + raise TypeError('Value should be unicode.') + if u'\n' in value or u'\r' in value: + raise ValueError('Detected newline in header value. This is ' + 'a potential security problem') + + def add_header(self, _key, _value, **_kw): + """Add a new header tuple to the list. + + An alias for :meth:`add` for compatibility with the :mod:`wsgiref` + :meth:`~wsgiref.headers.Headers.add_header` method. + """ + self.add(_key, _value, **_kw) + + def clear(self): + """Clears all headers.""" + del self._list[:] + + def set(self, _key, _value, **kw): + """Remove all header tuples for `key` and add a new one. The newly + added key either appears at the end of the list if there was no + entry or replaces the first one. + + Keyword arguments can specify additional parameters for the header + value, with underscores converted to dashes. See :meth:`add` for + more information. + + .. versionchanged:: 0.6.1 + :meth:`set` now accepts the same arguments as :meth:`add`. + + :param key: The key to be inserted. + :param value: The value to be inserted. + """ + if kw: + _value = _options_header_vkw(_value, kw) + _value = _unicodify_header_value(_value) + self._validate_value(_value) + if not self._list: + self._list.append((_key, _value)) + return + listiter = iter(self._list) + ikey = _key.lower() + for idx, (old_key, old_value) in enumerate(listiter): + if old_key.lower() == ikey: + # replace first ocurrence + self._list[idx] = (_key, _value) + break + else: + self._list.append((_key, _value)) + return + self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] + + def setdefault(self, key, value): + """Returns the value for the key if it is in the dict, otherwise it + returns `default` and sets that value for `key`. + + :param key: The key to be looked up. + :param default: The default value to be returned if the key is not + in the dict. If not further specified it's `None`. + """ + if key in self: + return self[key] + self.set(key, value) + return value + + def __setitem__(self, key, value): + """Like :meth:`set` but also supports index/slice based setting.""" + if isinstance(key, (slice, integer_types)): + if isinstance(key, integer_types): + value = [value] + value = [(k, _unicodify_header_value(v)) for (k, v) in value] + [self._validate_value(v) for (k, v) in value] + if isinstance(key, integer_types): + self._list[key] = value[0] + else: + self._list[key] = value + else: + self.set(key, value) + + def to_list(self, charset='iso-8859-1'): + """Convert the headers into a list suitable for WSGI.""" + from warnings import warn + warn(DeprecationWarning('Method removed, use to_wsgi_list instead'), + stacklevel=2) + return self.to_wsgi_list() + + def to_wsgi_list(self): + """Convert the headers into a list suitable for WSGI. + + The values are byte strings in Python 2 converted to latin1 and unicode + strings in Python 3 for the WSGI server to encode. + + :return: list + """ + if PY2: + return [(to_native(k), v.encode('latin1')) for k, v in self] + return list(self) + + def copy(self): + return self.__class__(self._list) + + def __copy__(self): + return self.copy() + + def __str__(self): + """Returns formatted headers suitable for HTTP transmission.""" + strs = [] + for key, value in self.to_wsgi_list(): + strs.append('%s: %s' % (key, value)) + strs.append('\r\n') + return '\r\n'.join(strs) + + def __repr__(self): + return '%s(%r)' % ( + self.__class__.__name__, + list(self) + ) + + +class ImmutableHeadersMixin(object): + + """Makes a :class:`Headers` immutable. We do not mark them as + hashable though since the only usecase for this datastructure + in Werkzeug is a view on a mutable structure. + + .. versionadded:: 0.5 + + :private: + """ + + def __delitem__(self, key): + is_immutable(self) + + def __setitem__(self, key, value): + is_immutable(self) + set = __setitem__ + + def add(self, item): + is_immutable(self) + remove = add_header = add + + def extend(self, iterable): + is_immutable(self) + + def insert(self, pos, value): + is_immutable(self) + + def pop(self, index=-1): + is_immutable(self) + + def popitem(self): + is_immutable(self) + + def setdefault(self, key, default): + is_immutable(self) + + +class EnvironHeaders(ImmutableHeadersMixin, Headers): + + """Read only version of the headers from a WSGI environment. This + provides the same interface as `Headers` and is constructed from + a WSGI environment. + + From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a + subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will + render a page for a ``400 BAD REQUEST`` if caught in a catch-all for + HTTP exceptions. + """ + + def __init__(self, environ): + self.environ = environ + + def __eq__(self, other): + return self.environ is other.environ + + __hash__ = None + + def __getitem__(self, key, _get_mode=False): + # _get_mode is a no-op for this class as there is no index but + # used because get() calls it. + key = key.upper().replace('-', '_') + if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): + return _unicodify_header_value(self.environ[key]) + return _unicodify_header_value(self.environ['HTTP_' + key]) + + def __len__(self): + # the iter is necessary because otherwise list calls our + # len which would call list again and so forth. + return len(list(iter(self))) + + def __iter__(self): + for key, value in iteritems(self.environ): + if key.startswith('HTTP_') and key not in \ + ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): + yield (key[5:].replace('_', '-').title(), + _unicodify_header_value(value)) + elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): + yield (key.replace('_', '-').title(), + _unicodify_header_value(value)) + + def copy(self): + raise TypeError('cannot create %r copies' % self.__class__.__name__) + + +@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) +class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): + + """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` + instances as sequence and it will combine the return values of all wrapped + dicts: + + >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict + >>> post = MultiDict([('foo', 'bar')]) + >>> get = MultiDict([('blub', 'blah')]) + >>> combined = CombinedMultiDict([get, post]) + >>> combined['foo'] + 'bar' + >>> combined['blub'] + 'blah' + + This works for all read operations and will raise a `TypeError` for + methods that usually change data which isn't possible. + + From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a + subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will + render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP + exceptions. + """ + + def __reduce_ex__(self, protocol): + return type(self), (self.dicts,) + + def __init__(self, dicts=None): + self.dicts = dicts or [] + + @classmethod + def fromkeys(cls): + raise TypeError('cannot create %r instances by fromkeys' % + cls.__name__) + + def __getitem__(self, key): + for d in self.dicts: + if key in d: + return d[key] + raise exceptions.BadRequestKeyError(key) + + def get(self, key, default=None, type=None): + for d in self.dicts: + if key in d: + if type is not None: + try: + return type(d[key]) + except ValueError: + continue + return d[key] + return default + + def getlist(self, key, type=None): + rv = [] + for d in self.dicts: + rv.extend(d.getlist(key, type)) + return rv + + def _keys_impl(self): + """This function exists so __len__ can be implemented more efficiently, + saving one list creation from an iterator. + + Using this for Python 2's ``dict.keys`` behavior would be useless since + `dict.keys` in Python 2 returns a list, while we have a set here. + """ + rv = set() + for d in self.dicts: + rv.update(iterkeys(d)) + return rv + + def keys(self): + return iter(self._keys_impl()) + + __iter__ = keys + + def items(self, multi=False): + found = set() + for d in self.dicts: + for key, value in iteritems(d, multi): + if multi: + yield key, value + elif key not in found: + found.add(key) + yield key, value + + def values(self): + for key, value in iteritems(self): + yield value + + def lists(self): + rv = {} + for d in self.dicts: + for key, values in iterlists(d): + rv.setdefault(key, []).extend(values) + return iteritems(rv) + + def listvalues(self): + return (x[1] for x in self.lists()) + + def copy(self): + """Return a shallow copy of this object.""" + return self.__class__(self.dicts[:]) + + def to_dict(self, flat=True): + """Return the contents as regular dict. If `flat` is `True` the + returned dict will only have the first item present, if `flat` is + `False` all values will be returned as lists. + + :param flat: If set to `False` the dict returned will have lists + with all the values in it. Otherwise it will only + contain the first item for each key. + :return: a :class:`dict` + """ + rv = {} + for d in reversed(self.dicts): + rv.update(d.to_dict(flat)) + return rv + + def __len__(self): + return len(self._keys_impl()) + + def __contains__(self, key): + for d in self.dicts: + if key in d: + return True + return False + + has_key = __contains__ + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, self.dicts) + + +class FileMultiDict(MultiDict): + + """A special :class:`MultiDict` that has convenience methods to add + files to it. This is used for :class:`EnvironBuilder` and generally + useful for unittesting. + + .. versionadded:: 0.5 + """ + + def add_file(self, name, file, filename=None, content_type=None): + """Adds a new file to the dict. `file` can be a file name or + a :class:`file`-like or a :class:`FileStorage` object. + + :param name: the name of the field. + :param file: a filename or :class:`file`-like object + :param filename: an optional filename + :param content_type: an optional content type + """ + if isinstance(file, FileStorage): + value = file + else: + if isinstance(file, string_types): + if filename is None: + filename = file + file = open(file, 'rb') + if filename and content_type is None: + content_type = mimetypes.guess_type(filename)[0] or \ + 'application/octet-stream' + value = FileStorage(file, filename, name, content_type) + + self.add(name, value) + + +class ImmutableDict(ImmutableDictMixin, dict): + + """An immutable :class:`dict`. + + .. versionadded:: 0.5 + """ + + def __repr__(self): + return '%s(%s)' % ( + self.__class__.__name__, + dict.__repr__(self), + ) + + def copy(self): + """Return a shallow mutable copy of this object. Keep in mind that + the standard library's :func:`copy` function is a no-op for this class + like for any other python immutable type (eg: :class:`tuple`). + """ + return dict(self) + + def __copy__(self): + return self + + +class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): + + """An immutable :class:`MultiDict`. + + .. versionadded:: 0.5 + """ + + def copy(self): + """Return a shallow mutable copy of this object. Keep in mind that + the standard library's :func:`copy` function is a no-op for this class + like for any other python immutable type (eg: :class:`tuple`). + """ + return MultiDict(self) + + def __copy__(self): + return self + + +class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): + + """An immutable :class:`OrderedMultiDict`. + + .. versionadded:: 0.6 + """ + + def _iter_hashitems(self): + return enumerate(iteritems(self, multi=True)) + + def copy(self): + """Return a shallow mutable copy of this object. Keep in mind that + the standard library's :func:`copy` function is a no-op for this class + like for any other python immutable type (eg: :class:`tuple`). + """ + return OrderedMultiDict(self) + + def __copy__(self): + return self + + +@native_itermethods(['values']) +class Accept(ImmutableList): + + """An :class:`Accept` object is just a list subclass for lists of + ``(value, quality)`` tuples. It is automatically sorted by quality. + + All :class:`Accept` objects work similar to a list but provide extra + functionality for working with the data. Containment checks are + normalized to the rules of that header: + + >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) + >>> a.best + 'ISO-8859-1' + >>> 'iso-8859-1' in a + True + >>> 'UTF8' in a + True + >>> 'utf7' in a + False + + To get the quality for an item you can use normal item lookup: + + >>> print a['utf-8'] + 0.7 + >>> a['utf7'] + 0 + + .. versionchanged:: 0.5 + :class:`Accept` objects are forced immutable now. + """ + + def __init__(self, values=()): + if values is None: + list.__init__(self) + self.provided = False + elif isinstance(values, Accept): + self.provided = values.provided + list.__init__(self, values) + else: + self.provided = True + values = sorted(values, key=lambda x: (x[1], x[0]), reverse=True) + list.__init__(self, values) + + def _value_matches(self, value, item): + """Check if a value matches a given accept item.""" + return item == '*' or item.lower() == value.lower() + + def __getitem__(self, key): + """Besides index lookup (getting item n) you can also pass it a string + to get the quality for the item. If the item is not in the list, the + returned quality is ``0``. + """ + if isinstance(key, string_types): + return self.quality(key) + return list.__getitem__(self, key) + + def quality(self, key): + """Returns the quality of the key. + + .. versionadded:: 0.6 + In previous versions you had to use the item-lookup syntax + (eg: ``obj[key]`` instead of ``obj.quality(key)``) + """ + for item, quality in self: + if self._value_matches(key, item): + return quality + return 0 + + def __contains__(self, value): + for item, quality in self: + if self._value_matches(value, item): + return True + return False + + def __repr__(self): + return '%s([%s])' % ( + self.__class__.__name__, + ', '.join('(%r, %s)' % (x, y) for x, y in self) + ) + + def index(self, key): + """Get the position of an entry or raise :exc:`ValueError`. + + :param key: The key to be looked up. + + .. versionchanged:: 0.5 + This used to raise :exc:`IndexError`, which was inconsistent + with the list API. + """ + if isinstance(key, string_types): + for idx, (item, quality) in enumerate(self): + if self._value_matches(key, item): + return idx + raise ValueError(key) + return list.index(self, key) + + def find(self, key): + """Get the position of an entry or return -1. + + :param key: The key to be looked up. + """ + try: + return self.index(key) + except ValueError: + return -1 + + def values(self): + """Iterate over all values.""" + for item in self: + yield item[0] + + def to_header(self): + """Convert the header set into an HTTP header string.""" + result = [] + for value, quality in self: + if quality != 1: + value = '%s;q=%s' % (value, quality) + result.append(value) + return ','.join(result) + + def __str__(self): + return self.to_header() + + def best_match(self, matches, default=None): + """Returns the best match from a list of possible matches based + on the quality of the client. If two items have the same quality, + the one is returned that comes first. + + :param matches: a list of matches to check for + :param default: the value that is returned if none match + """ + best_quality = -1 + result = default + for server_item in matches: + for client_item, quality in self: + if quality <= best_quality: + break + if self._value_matches(server_item, client_item) \ + and quality > 0: + best_quality = quality + result = server_item + return result + + @property + def best(self): + """The best match as value.""" + if self: + return self[0][0] + + +class MIMEAccept(Accept): + + """Like :class:`Accept` but with special methods and behavior for + mimetypes. + """ + + def _value_matches(self, value, item): + def _normalize(x): + x = x.lower() + return x == '*' and ('*', '*') or x.split('/', 1) + + # this is from the application which is trusted. to avoid developer + # frustration we actually check these for valid values + if '/' not in value: + raise ValueError('invalid mimetype %r' % value) + value_type, value_subtype = _normalize(value) + if value_type == '*' and value_subtype != '*': + raise ValueError('invalid mimetype %r' % value) + + if '/' not in item: + return False + item_type, item_subtype = _normalize(item) + if item_type == '*' and item_subtype != '*': + return False + return ( + (item_type == item_subtype == '*' or + value_type == value_subtype == '*') or + (item_type == value_type and (item_subtype == '*' or + value_subtype == '*' or + item_subtype == value_subtype)) + ) + + @property + def accept_html(self): + """True if this object accepts HTML.""" + return ( + 'text/html' in self or + 'application/xhtml+xml' in self or + self.accept_xhtml + ) + + @property + def accept_xhtml(self): + """True if this object accepts XHTML.""" + return ( + 'application/xhtml+xml' in self or + 'application/xml' in self + ) + + @property + def accept_json(self): + """True if this object accepts JSON.""" + return 'application/json' in self + + +class LanguageAccept(Accept): + + """Like :class:`Accept` but with normalization for languages.""" + + def _value_matches(self, value, item): + def _normalize(language): + return _locale_delim_re.split(language.lower()) + return item == '*' or _normalize(value) == _normalize(item) + + +class CharsetAccept(Accept): + + """Like :class:`Accept` but with normalization for charsets.""" + + def _value_matches(self, value, item): + def _normalize(name): + try: + return codecs.lookup(name).name + except LookupError: + return name.lower() + return item == '*' or _normalize(value) == _normalize(item) + + +def cache_property(key, empty, type): + """Return a new property object for a cache header. Useful if you + want to add support for a cache extension in a subclass.""" + return property(lambda x: x._get_cache_value(key, empty, type), + lambda x, v: x._set_cache_value(key, v, type), + lambda x: x._del_cache_value(key), + 'accessor for %r' % key) + + +class _CacheControl(UpdateDictMixin, dict): + + """Subclass of a dict that stores values for a Cache-Control header. It + has accessors for all the cache-control directives specified in RFC 2616. + The class does not differentiate between request and response directives. + + Because the cache-control directives in the HTTP header use dashes the + python descriptors use underscores for that. + + To get a header of the :class:`CacheControl` object again you can convert + the object into a string or call the :meth:`to_header` method. If you plan + to subclass it and add your own items have a look at the sourcecode for + that class. + + .. versionchanged:: 0.4 + + Setting `no_cache` or `private` to boolean `True` will set the implicit + none-value which is ``*``: + + >>> cc = ResponseCacheControl() + >>> cc.no_cache = True + >>> cc + + >>> cc.no_cache + '*' + >>> cc.no_cache = None + >>> cc + + + In versions before 0.5 the behavior documented here affected the now + no longer existing `CacheControl` class. + """ + + no_cache = cache_property('no-cache', '*', None) + no_store = cache_property('no-store', None, bool) + max_age = cache_property('max-age', -1, int) + no_transform = cache_property('no-transform', None, None) + + def __init__(self, values=(), on_update=None): + dict.__init__(self, values or ()) + self.on_update = on_update + self.provided = values is not None + + def _get_cache_value(self, key, empty, type): + """Used internally by the accessor properties.""" + if type is bool: + return key in self + if key in self: + value = self[key] + if value is None: + return empty + elif type is not None: + try: + value = type(value) + except ValueError: + pass + return value + + def _set_cache_value(self, key, value, type): + """Used internally by the accessor properties.""" + if type is bool: + if value: + self[key] = None + else: + self.pop(key, None) + else: + if value is None: + self.pop(key) + elif value is True: + self[key] = None + else: + self[key] = value + + def _del_cache_value(self, key): + """Used internally by the accessor properties.""" + if key in self: + del self[key] + + def to_header(self): + """Convert the stored values into a cache control header.""" + return dump_header(self) + + def __str__(self): + return self.to_header() + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, + " ".join( + "%s=%r" % (k, v) for k, v in sorted(self.items()) + ), + ) + + +class RequestCacheControl(ImmutableDictMixin, _CacheControl): + + """A cache control for requests. This is immutable and gives access + to all the request-relevant cache control headers. + + To get a header of the :class:`RequestCacheControl` object again you can + convert the object into a string or call the :meth:`to_header` method. If + you plan to subclass it and add your own items have a look at the sourcecode + for that class. + + .. versionadded:: 0.5 + In previous versions a `CacheControl` class existed that was used + both for request and response. + """ + + max_stale = cache_property('max-stale', '*', int) + min_fresh = cache_property('min-fresh', '*', int) + no_transform = cache_property('no-transform', None, None) + only_if_cached = cache_property('only-if-cached', None, bool) + + +class ResponseCacheControl(_CacheControl): + + """A cache control for responses. Unlike :class:`RequestCacheControl` + this is mutable and gives access to response-relevant cache control + headers. + + To get a header of the :class:`ResponseCacheControl` object again you can + convert the object into a string or call the :meth:`to_header` method. If + you plan to subclass it and add your own items have a look at the sourcecode + for that class. + + .. versionadded:: 0.5 + In previous versions a `CacheControl` class existed that was used + both for request and response. + """ + + public = cache_property('public', None, bool) + private = cache_property('private', '*', None) + must_revalidate = cache_property('must-revalidate', None, bool) + proxy_revalidate = cache_property('proxy-revalidate', None, bool) + s_maxage = cache_property('s-maxage', None, None) + + +# attach cache_property to the _CacheControl as staticmethod +# so that others can reuse it. +_CacheControl.cache_property = staticmethod(cache_property) + + +class CallbackDict(UpdateDictMixin, dict): + + """A dict that calls a function passed every time something is changed. + The function is passed the dict instance. + """ + + def __init__(self, initial=None, on_update=None): + dict.__init__(self, initial or ()) + self.on_update = on_update + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, + dict.__repr__(self) + ) + + +class HeaderSet(MutableSet): + + """Similar to the :class:`ETags` class this implements a set-like structure. + Unlike :class:`ETags` this is case insensitive and used for vary, allow, and + content-language headers. + + If not constructed using the :func:`parse_set_header` function the + instantiation works like this: + + >>> hs = HeaderSet(['foo', 'bar', 'baz']) + >>> hs + HeaderSet(['foo', 'bar', 'baz']) + """ + + def __init__(self, headers=None, on_update=None): + self._headers = list(headers or ()) + self._set = set([x.lower() for x in self._headers]) + self.on_update = on_update + + def add(self, header): + """Add a new header to the set.""" + self.update((header,)) + + def remove(self, header): + """Remove a header from the set. This raises an :exc:`KeyError` if the + header is not in the set. + + .. versionchanged:: 0.5 + In older versions a :exc:`IndexError` was raised instead of a + :exc:`KeyError` if the object was missing. + + :param header: the header to be removed. + """ + key = header.lower() + if key not in self._set: + raise KeyError(header) + self._set.remove(key) + for idx, key in enumerate(self._headers): + if key.lower() == header: + del self._headers[idx] + break + if self.on_update is not None: + self.on_update(self) + + def update(self, iterable): + """Add all the headers from the iterable to the set. + + :param iterable: updates the set with the items from the iterable. + """ + inserted_any = False + for header in iterable: + key = header.lower() + if key not in self._set: + self._headers.append(header) + self._set.add(key) + inserted_any = True + if inserted_any and self.on_update is not None: + self.on_update(self) + + def discard(self, header): + """Like :meth:`remove` but ignores errors. + + :param header: the header to be discarded. + """ + try: + return self.remove(header) + except KeyError: + pass + + def find(self, header): + """Return the index of the header in the set or return -1 if not found. + + :param header: the header to be looked up. + """ + header = header.lower() + for idx, item in enumerate(self._headers): + if item.lower() == header: + return idx + return -1 + + def index(self, header): + """Return the index of the header in the set or raise an + :exc:`IndexError`. + + :param header: the header to be looked up. + """ + rv = self.find(header) + if rv < 0: + raise IndexError(header) + return rv + + def clear(self): + """Clear the set.""" + self._set.clear() + del self._headers[:] + if self.on_update is not None: + self.on_update(self) + + def as_set(self, preserve_casing=False): + """Return the set as real python set type. When calling this, all + the items are converted to lowercase and the ordering is lost. + + :param preserve_casing: if set to `True` the items in the set returned + will have the original case like in the + :class:`HeaderSet`, otherwise they will + be lowercase. + """ + if preserve_casing: + return set(self._headers) + return set(self._set) + + def to_header(self): + """Convert the header set into an HTTP header string.""" + return ', '.join(map(quote_header_value, self._headers)) + + def __getitem__(self, idx): + return self._headers[idx] + + def __delitem__(self, idx): + rv = self._headers.pop(idx) + self._set.remove(rv.lower()) + if self.on_update is not None: + self.on_update(self) + + def __setitem__(self, idx, value): + old = self._headers[idx] + self._set.remove(old.lower()) + self._headers[idx] = value + self._set.add(value.lower()) + if self.on_update is not None: + self.on_update(self) + + def __contains__(self, header): + return header.lower() in self._set + + def __len__(self): + return len(self._set) + + def __iter__(self): + return iter(self._headers) + + def __nonzero__(self): + return bool(self._set) + + def __str__(self): + return self.to_header() + + def __repr__(self): + return '%s(%r)' % ( + self.__class__.__name__, + self._headers + ) + + +class ETags(Container, Iterable): + + """A set that can be used to check if one etag is present in a collection + of etags. + """ + + def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): + self._strong = frozenset(not star_tag and strong_etags or ()) + self._weak = frozenset(weak_etags or ()) + self.star_tag = star_tag + + def as_set(self, include_weak=False): + """Convert the `ETags` object into a python set. Per default all the + weak etags are not part of this set.""" + rv = set(self._strong) + if include_weak: + rv.update(self._weak) + return rv + + def is_weak(self, etag): + """Check if an etag is weak.""" + return etag in self._weak + + def contains_weak(self, etag): + """Check if an etag is part of the set including weak and strong tags.""" + return self.is_weak(etag) or self.contains(etag) + + def contains(self, etag): + """Check if an etag is part of the set ignoring weak tags. + It is also possible to use the ``in`` operator. + + """ + if self.star_tag: + return True + return etag in self._strong + + def contains_raw(self, etag): + """When passed a quoted tag it will check if this tag is part of the + set. If the tag is weak it is checked against weak and strong tags, + otherwise strong only.""" + etag, weak = unquote_etag(etag) + if weak: + return self.contains_weak(etag) + return self.contains(etag) + + def to_header(self): + """Convert the etags set into a HTTP header string.""" + if self.star_tag: + return '*' + return ', '.join( + ['"%s"' % x for x in self._strong] + + ['W/"%s"' % x for x in self._weak] + ) + + def __call__(self, etag=None, data=None, include_weak=False): + if [etag, data].count(None) != 1: + raise TypeError('either tag or data required, but at least one') + if etag is None: + etag = generate_etag(data) + if include_weak: + if etag in self._weak: + return True + return etag in self._strong + + def __bool__(self): + return bool(self.star_tag or self._strong or self._weak) + + __nonzero__ = __bool__ + + def __str__(self): + return self.to_header() + + def __iter__(self): + return iter(self._strong) + + def __contains__(self, etag): + return self.contains(etag) + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, str(self)) + + +class IfRange(object): + + """Very simple object that represents the `If-Range` header in parsed + form. It will either have neither a etag or date or one of either but + never both. + + .. versionadded:: 0.7 + """ + + def __init__(self, etag=None, date=None): + #: The etag parsed and unquoted. Ranges always operate on strong + #: etags so the weakness information is not necessary. + self.etag = etag + #: The date in parsed format or `None`. + self.date = date + + def to_header(self): + """Converts the object back into an HTTP header.""" + if self.date is not None: + return http_date(self.date) + if self.etag is not None: + return quote_etag(self.etag) + return '' + + def __str__(self): + return self.to_header() + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, str(self)) + + +class Range(object): + + """Represents a range header. All the methods are only supporting bytes + as unit. It does store multiple ranges but :meth:`range_for_length` will + only work if only one range is provided. + + .. versionadded:: 0.7 + """ + + def __init__(self, units, ranges): + #: The units of this range. Usually "bytes". + self.units = units + #: A list of ``(begin, end)`` tuples for the range header provided. + #: The ranges are non-inclusive. + self.ranges = ranges + + def range_for_length(self, length): + """If the range is for bytes, the length is not None and there is + exactly one range and it is satisfiable it returns a ``(start, stop)`` + tuple, otherwise `None`. + """ + if self.units != 'bytes' or length is None or len(self.ranges) != 1: + return None + start, end = self.ranges[0] + if end is None: + end = length + if start < 0: + start += length + if is_byte_range_valid(start, end, length): + return start, min(end, length) + + def make_content_range(self, length): + """Creates a :class:`~werkzeug.datastructures.ContentRange` object + from the current range and given content length. + """ + rng = self.range_for_length(length) + if rng is not None: + return ContentRange(self.units, rng[0], rng[1], length) + + def to_header(self): + """Converts the object back into an HTTP header.""" + ranges = [] + for begin, end in self.ranges: + if end is None: + ranges.append(begin >= 0 and '%s-' % begin or str(begin)) + else: + ranges.append('%s-%s' % (begin, end - 1)) + return '%s=%s' % (self.units, ','.join(ranges)) + + def to_content_range_header(self, length): + """Converts the object into `Content-Range` HTTP header, + based on given length + """ + range_for_length = self.range_for_length(length) + if range_for_length is not None: + return '%s %d-%d/%d' % (self.units, + range_for_length[0], + range_for_length[1] - 1, length) + return None + + def __str__(self): + return self.to_header() + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, str(self)) + + +class ContentRange(object): + + """Represents the content range header. + + .. versionadded:: 0.7 + """ + + def __init__(self, units, start, stop, length=None, on_update=None): + assert is_byte_range_valid(start, stop, length), \ + 'Bad range provided' + self.on_update = on_update + self.set(start, stop, length, units) + + def _callback_property(name): + def fget(self): + return getattr(self, name) + + def fset(self, value): + setattr(self, name, value) + if self.on_update is not None: + self.on_update(self) + return property(fget, fset) + + #: The units to use, usually "bytes" + units = _callback_property('_units') + #: The start point of the range or `None`. + start = _callback_property('_start') + #: The stop point of the range (non-inclusive) or `None`. Can only be + #: `None` if also start is `None`. + stop = _callback_property('_stop') + #: The length of the range or `None`. + length = _callback_property('_length') + + def set(self, start, stop, length=None, units='bytes'): + """Simple method to update the ranges.""" + assert is_byte_range_valid(start, stop, length), \ + 'Bad range provided' + self._units = units + self._start = start + self._stop = stop + self._length = length + if self.on_update is not None: + self.on_update(self) + + def unset(self): + """Sets the units to `None` which indicates that the header should + no longer be used. + """ + self.set(None, None, units=None) + + def to_header(self): + if self.units is None: + return '' + if self.length is None: + length = '*' + else: + length = self.length + if self.start is None: + return '%s */%s' % (self.units, length) + return '%s %s-%s/%s' % ( + self.units, + self.start, + self.stop - 1, + length + ) + + def __nonzero__(self): + return self.units is not None + + __bool__ = __nonzero__ + + def __str__(self): + return self.to_header() + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, str(self)) + + +class Authorization(ImmutableDictMixin, dict): + + """Represents an `Authorization` header sent by the client. You should + not create this kind of object yourself but use it when it's returned by + the `parse_authorization_header` function. + + This object is a dict subclass and can be altered by setting dict items + but it should be considered immutable as it's returned by the client and + not meant for modifications. + + .. versionchanged:: 0.5 + This object became immutable. + """ + + def __init__(self, auth_type, data=None): + dict.__init__(self, data or {}) + self.type = auth_type + + username = property(lambda x: x.get('username'), doc=''' + The username transmitted. This is set for both basic and digest + auth all the time.''') + password = property(lambda x: x.get('password'), doc=''' + When the authentication type is basic this is the password + transmitted by the client, else `None`.''') + realm = property(lambda x: x.get('realm'), doc=''' + This is the server realm sent back for HTTP digest auth.''') + nonce = property(lambda x: x.get('nonce'), doc=''' + The nonce the server sent for digest auth, sent back by the client. + A nonce should be unique for every 401 response for HTTP digest + auth.''') + uri = property(lambda x: x.get('uri'), doc=''' + The URI from Request-URI of the Request-Line; duplicated because + proxies are allowed to change the Request-Line in transit. HTTP + digest auth only.''') + nc = property(lambda x: x.get('nc'), doc=''' + The nonce count value transmitted by clients if a qop-header is + also transmitted. HTTP digest auth only.''') + cnonce = property(lambda x: x.get('cnonce'), doc=''' + If the server sent a qop-header in the ``WWW-Authenticate`` + header, the client has to provide this value for HTTP digest auth. + See the RFC for more details.''') + response = property(lambda x: x.get('response'), doc=''' + A string of 32 hex digits computed as defined in RFC 2617, which + proves that the user knows a password. Digest auth only.''') + opaque = property(lambda x: x.get('opaque'), doc=''' + The opaque header from the server returned unchanged by the client. + It is recommended that this string be base64 or hexadecimal data. + Digest auth only.''') + + @property + def qop(self): + """Indicates what "quality of protection" the client has applied to + the message for HTTP digest auth.""" + def on_update(header_set): + if not header_set and 'qop' in self: + del self['qop'] + elif header_set: + self['qop'] = header_set.to_header() + return parse_set_header(self.get('qop'), on_update) + + +class WWWAuthenticate(UpdateDictMixin, dict): + + """Provides simple access to `WWW-Authenticate` headers.""" + + #: list of keys that require quoting in the generated header + _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop']) + + def __init__(self, auth_type=None, values=None, on_update=None): + dict.__init__(self, values or ()) + if auth_type: + self['__auth_type__'] = auth_type + self.on_update = on_update + + def set_basic(self, realm='authentication required'): + """Clear the auth info and enable basic auth.""" + dict.clear(self) + dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) + if self.on_update: + self.on_update(self) + + def set_digest(self, realm, nonce, qop=('auth',), opaque=None, + algorithm=None, stale=False): + """Clear the auth info and enable digest auth.""" + d = { + '__auth_type__': 'digest', + 'realm': realm, + 'nonce': nonce, + 'qop': dump_header(qop) + } + if stale: + d['stale'] = 'TRUE' + if opaque is not None: + d['opaque'] = opaque + if algorithm is not None: + d['algorithm'] = algorithm + dict.clear(self) + dict.update(self, d) + if self.on_update: + self.on_update(self) + + def to_header(self): + """Convert the stored values into a WWW-Authenticate header.""" + d = dict(self) + auth_type = d.pop('__auth_type__', None) or 'basic' + return '%s %s' % (auth_type.title(), ', '.join([ + '%s=%s' % (key, quote_header_value(value, + allow_token=key not in self._require_quoting)) + for key, value in iteritems(d) + ])) + + def __str__(self): + return self.to_header() + + def __repr__(self): + return '<%s %r>' % ( + self.__class__.__name__, + self.to_header() + ) + + def auth_property(name, doc=None): + """A static helper function for subclasses to add extra authentication + system properties onto a class:: + + class FooAuthenticate(WWWAuthenticate): + special_realm = auth_property('special_realm') + + For more information have a look at the sourcecode to see how the + regular properties (:attr:`realm` etc.) are implemented. + """ + + def _set_value(self, value): + if value is None: + self.pop(name, None) + else: + self[name] = str(value) + return property(lambda x: x.get(name), _set_value, doc=doc) + + def _set_property(name, doc=None): + def fget(self): + def on_update(header_set): + if not header_set and name in self: + del self[name] + elif header_set: + self[name] = header_set.to_header() + return parse_set_header(self.get(name), on_update) + return property(fget, doc=doc) + + type = auth_property('__auth_type__', doc=''' + The type of the auth mechanism. HTTP currently specifies + `Basic` and `Digest`.''') + realm = auth_property('realm', doc=''' + A string to be displayed to users so they know which username and + password to use. This string should contain at least the name of + the host performing the authentication and might additionally + indicate the collection of users who might have access.''') + domain = _set_property('domain', doc=''' + A list of URIs that define the protection space. If a URI is an + absolute path, it is relative to the canonical root URL of the + server being accessed.''') + nonce = auth_property('nonce', doc=''' + A server-specified data string which should be uniquely generated + each time a 401 response is made. It is recommended that this + string be base64 or hexadecimal data.''') + opaque = auth_property('opaque', doc=''' + A string of data, specified by the server, which should be returned + by the client unchanged in the Authorization header of subsequent + requests with URIs in the same protection space. It is recommended + that this string be base64 or hexadecimal data.''') + algorithm = auth_property('algorithm', doc=''' + A string indicating a pair of algorithms used to produce the digest + and a checksum. If this is not present it is assumed to be "MD5". + If the algorithm is not understood, the challenge should be ignored + (and a different one used, if there is more than one).''') + qop = _set_property('qop', doc=''' + A set of quality-of-privacy directives such as auth and auth-int.''') + + def _get_stale(self): + val = self.get('stale') + if val is not None: + return val.lower() == 'true' + + def _set_stale(self, value): + if value is None: + self.pop('stale', None) + else: + self['stale'] = value and 'TRUE' or 'FALSE' + stale = property(_get_stale, _set_stale, doc=''' + A flag, indicating that the previous request from the client was + rejected because the nonce value was stale.''') + del _get_stale, _set_stale + + # make auth_property a staticmethod so that subclasses of + # `WWWAuthenticate` can use it for new properties. + auth_property = staticmethod(auth_property) + del _set_property + + +class FileStorage(object): + + """The :class:`FileStorage` class is a thin wrapper over incoming files. + It is used by the request object to represent uploaded files. All the + attributes of the wrapper stream are proxied by the file storage so + it's possible to do ``storage.read()`` instead of the long form + ``storage.stream.read()``. + """ + + def __init__(self, stream=None, filename=None, name=None, + content_type=None, content_length=None, + headers=None): + self.name = name + self.stream = stream or _empty_stream + + # if no filename is provided we can attempt to get the filename + # from the stream object passed. There we have to be careful to + # skip things like , etc. Python marks these + # special filenames with angular brackets. + if filename is None: + filename = getattr(stream, 'name', None) + s = make_literal_wrapper(filename) + if filename and filename[0] == s('<') and filename[-1] == s('>'): + filename = None + + # On Python 3 we want to make sure the filename is always unicode. + # This might not be if the name attribute is bytes due to the + # file being opened from the bytes API. + if not PY2 and isinstance(filename, bytes): + filename = filename.decode(get_filesystem_encoding(), + 'replace') + + self.filename = filename + if headers is None: + headers = Headers() + self.headers = headers + if content_type is not None: + headers['Content-Type'] = content_type + if content_length is not None: + headers['Content-Length'] = str(content_length) + + def _parse_content_type(self): + if not hasattr(self, '_parsed_content_type'): + self._parsed_content_type = \ + parse_options_header(self.content_type) + + @property + def content_type(self): + """The content-type sent in the header. Usually not available""" + return self.headers.get('content-type') + + @property + def content_length(self): + """The content-length sent in the header. Usually not available""" + return int(self.headers.get('content-length') or 0) + + @property + def mimetype(self): + """Like :attr:`content_type`, but without parameters (eg, without + charset, type etc.) and always lowercase. For example if the content + type is ``text/HTML; charset=utf-8`` the mimetype would be + ``'text/html'``. + + .. versionadded:: 0.7 + """ + self._parse_content_type() + return self._parsed_content_type[0].lower() + + @property + def mimetype_params(self): + """The mimetype parameters as dict. For example if the content + type is ``text/html; charset=utf-8`` the params would be + ``{'charset': 'utf-8'}``. + + .. versionadded:: 0.7 + """ + self._parse_content_type() + return self._parsed_content_type[1] + + def save(self, dst, buffer_size=16384): + """Save the file to a destination path or file object. If the + destination is a file object you have to close it yourself after the + call. The buffer size is the number of bytes held in memory during + the copy process. It defaults to 16KB. + + For secure file saving also have a look at :func:`secure_filename`. + + :param dst: a filename or open file object the uploaded file + is saved to. + :param buffer_size: the size of the buffer. This works the same as + the `length` parameter of + :func:`shutil.copyfileobj`. + """ + from shutil import copyfileobj + close_dst = False + if isinstance(dst, string_types): + dst = open(dst, 'wb') + close_dst = True + try: + copyfileobj(self.stream, dst, buffer_size) + finally: + if close_dst: + dst.close() + + def close(self): + """Close the underlying file if possible.""" + try: + self.stream.close() + except Exception: + pass + + def __nonzero__(self): + return bool(self.filename) + __bool__ = __nonzero__ + + def __getattr__(self, name): + return getattr(self.stream, name) + + def __iter__(self): + return iter(self.stream) + + def __repr__(self): + return '<%s: %r (%r)>' % ( + self.__class__.__name__, + self.filename, + self.content_type + ) + + +# circular dependencies +from werkzeug.http import dump_options_header, dump_header, generate_etag, \ + quote_header_value, parse_set_header, unquote_etag, quote_etag, \ + parse_options_header, http_date, is_byte_range_valid +from werkzeug import exceptions diff --git a/app/lib/werkzeug/debug/__init__.py b/app/lib/werkzeug/debug/__init__.py new file mode 100644 index 0000000..6124727 --- /dev/null +++ b/app/lib/werkzeug/debug/__init__.py @@ -0,0 +1,466 @@ +# -*- coding: utf-8 -*- +""" + werkzeug.debug + ~~~~~~~~~~~~~~ + + WSGI application traceback debugger. + + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +import os +import re +import sys +import uuid +import json +import time +import getpass +import hashlib +import mimetypes +from itertools import chain +from os.path import join, dirname, basename, isfile +from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response +from werkzeug.http import parse_cookie +from werkzeug.debug.tbtools import get_current_traceback, render_console_html +from werkzeug.debug.console import Console +from werkzeug.security import gen_salt +from werkzeug._internal import _log +from werkzeug._compat import text_type + + +# DEPRECATED +#: import this here because it once was documented as being available +#: from this module. In case there are users left ... +from werkzeug.debug.repr import debug_repr # noqa + + +# A week +PIN_TIME = 60 * 60 * 24 * 7 + + +def hash_pin(pin): + if isinstance(pin, text_type): + pin = pin.encode('utf-8', 'replace') + return hashlib.md5(pin + b'shittysalt').hexdigest()[:12] + + +_machine_id = None + + +def get_machine_id(): + global _machine_id + rv = _machine_id + if rv is not None: + return rv + + def _generate(): + # Potential sources of secret information on linux. The machine-id + # is stable across boots, the boot id is not + for filename in '/etc/machine-id', '/proc/sys/kernel/random/boot_id': + try: + with open(filename, 'rb') as f: + return f.readline().strip() + except IOError: + continue + + # On OS X we can use the computer's serial number assuming that + # ioreg exists and can spit out that information. + try: + # Also catch import errors: subprocess may not be available, e.g. + # Google App Engine + # See https://github.com/pallets/werkzeug/issues/925 + from subprocess import Popen, PIPE + dump = Popen(['ioreg', '-c', 'IOPlatformExpertDevice', '-d', '2'], + stdout=PIPE).communicate()[0] + match = re.search(b'"serial-number" = <([^>]+)', dump) + if match is not None: + return match.group(1) + except (OSError, ImportError): + pass + + # On Windows we can use winreg to get the machine guid + wr = None + try: + import winreg as wr + except ImportError: + try: + import _winreg as wr + except ImportError: + pass + if wr is not None: + try: + with wr.OpenKey(wr.HKEY_LOCAL_MACHINE, + 'SOFTWARE\\Microsoft\\Cryptography', 0, + wr.KEY_READ | wr.KEY_WOW64_64KEY) as rk: + return wr.QueryValueEx(rk, 'MachineGuid')[0] + except WindowsError: + pass + + _machine_id = rv = _generate() + return rv + + +class _ConsoleFrame(object): + + """Helper class so that we can reuse the frame console code for the + standalone console. + """ + + def __init__(self, namespace): + self.console = Console(namespace) + self.id = 0 + + +def get_pin_and_cookie_name(app): + """Given an application object this returns a semi-stable 9 digit pin + code and a random key. The hope is that this is stable between + restarts to not make debugging particularly frustrating. If the pin + was forcefully disabled this returns `None`. + + Second item in the resulting tuple is the cookie name for remembering. + """ + pin = os.environ.get('WERKZEUG_DEBUG_PIN') + rv = None + num = None + + # Pin was explicitly disabled + if pin == 'off': + return None, None + + # Pin was provided explicitly + if pin is not None and pin.replace('-', '').isdigit(): + # If there are separators in the pin, return it directly + if '-' in pin: + rv = pin + else: + num = pin + + modname = getattr(app, '__module__', + getattr(app.__class__, '__module__')) + + try: + # `getpass.getuser()` imports the `pwd` module, + # which does not exist in the Google App Engine sandbox. + username = getpass.getuser() + except ImportError: + username = None + + mod = sys.modules.get(modname) + + # This information only exists to make the cookie unique on the + # computer, not as a security feature. + probably_public_bits = [ + username, + modname, + getattr(app, '__name__', getattr(app.__class__, '__name__')), + getattr(mod, '__file__', None), + ] + + # This information is here to make it harder for an attacker to + # guess the cookie name. They are unlikely to be contained anywhere + # within the unauthenticated debug page. + private_bits = [ + str(uuid.getnode()), + get_machine_id(), + ] + + h = hashlib.md5() + for bit in chain(probably_public_bits, private_bits): + if not bit: + continue + if isinstance(bit, text_type): + bit = bit.encode('utf-8') + h.update(bit) + h.update(b'cookiesalt') + + cookie_name = '__wzd' + h.hexdigest()[:20] + + # If we need to generate a pin we salt it a bit more so that we don't + # end up with the same value and generate out 9 digits + if num is None: + h.update(b'pinsalt') + num = ('%09d' % int(h.hexdigest(), 16))[:9] + + # Format the pincode in groups of digits for easier remembering if + # we don't have a result yet. + if rv is None: + for group_size in 5, 4, 3: + if len(num) % group_size == 0: + rv = '-'.join(num[x:x + group_size].rjust(group_size, '0') + for x in range(0, len(num), group_size)) + break + else: + rv = num + + return rv, cookie_name + + +class DebuggedApplication(object): + """Enables debugging support for a given application:: + + from werkzeug.debug import DebuggedApplication + from myapp import app + app = DebuggedApplication(app, evalex=True) + + The `evalex` keyword argument allows evaluating expressions in a + traceback's frame context. + + .. versionadded:: 0.9 + The `lodgeit_url` parameter was deprecated. + + :param app: the WSGI application to run debugged. + :param evalex: enable exception evaluation feature (interactive + debugging). This requires a non-forking server. + :param request_key: The key that points to the request object in ths + environment. This parameter is ignored in current + versions. + :param console_path: the URL for a general purpose console. + :param console_init_func: the function that is executed before starting + the general purpose console. The return value + is used as initial namespace. + :param show_hidden_frames: by default hidden traceback frames are skipped. + You can show them by setting this parameter + to `True`. + :param pin_security: can be used to disable the pin based security system. + :param pin_logging: enables the logging of the pin system. + """ + + def __init__(self, app, evalex=False, request_key='werkzeug.request', + console_path='/console', console_init_func=None, + show_hidden_frames=False, lodgeit_url=None, + pin_security=True, pin_logging=True): + if lodgeit_url is not None: + from warnings import warn + warn(DeprecationWarning('Werkzeug now pastes into gists.')) + if not console_init_func: + console_init_func = None + self.app = app + self.evalex = evalex + self.frames = {} + self.tracebacks = {} + self.request_key = request_key + self.console_path = console_path + self.console_init_func = console_init_func + self.show_hidden_frames = show_hidden_frames + self.secret = gen_salt(20) + self._failed_pin_auth = 0 + + self.pin_logging = pin_logging + if pin_security: + # Print out the pin for the debugger on standard out. + if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' and \ + pin_logging: + _log('warning', ' * Debugger is active!') + if self.pin is None: + _log('warning', ' * Debugger PIN disabled. ' + 'DEBUGGER UNSECURED!') + else: + _log('info', ' * Debugger PIN: %s' % self.pin) + else: + self.pin = None + + def _get_pin(self): + if not hasattr(self, '_pin'): + self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app) + return self._pin + + def _set_pin(self, value): + self._pin = value + + pin = property(_get_pin, _set_pin) + del _get_pin, _set_pin + + @property + def pin_cookie_name(self): + """The name of the pin cookie.""" + if not hasattr(self, '_pin_cookie'): + self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app) + return self._pin_cookie + + def debug_application(self, environ, start_response): + """Run the application and conserve the traceback frames.""" + app_iter = None + try: + app_iter = self.app(environ, start_response) + for item in app_iter: + yield item + if hasattr(app_iter, 'close'): + app_iter.close() + except Exception: + if hasattr(app_iter, 'close'): + app_iter.close() + traceback = get_current_traceback( + skip=1, show_hidden_frames=self.show_hidden_frames, + ignore_system_exceptions=True) + for frame in traceback.frames: + self.frames[frame.id] = frame + self.tracebacks[traceback.id] = traceback + + try: + start_response('500 INTERNAL SERVER ERROR', [ + ('Content-Type', 'text/html; charset=utf-8'), + # Disable Chrome's XSS protection, the debug + # output can cause false-positives. + ('X-XSS-Protection', '0'), + ]) + except Exception: + # if we end up here there has been output but an error + # occurred. in that situation we can do nothing fancy any + # more, better log something into the error log and fall + # back gracefully. + environ['wsgi.errors'].write( + 'Debugging middleware caught exception in streamed ' + 'response at a point where response headers were already ' + 'sent.\n') + else: + is_trusted = bool(self.check_pin_trust(environ)) + yield traceback.render_full(evalex=self.evalex, + evalex_trusted=is_trusted, + secret=self.secret) \ + .encode('utf-8', 'replace') + + traceback.log(environ['wsgi.errors']) + + def execute_command(self, request, command, frame): + """Execute a command in a console.""" + return Response(frame.console.eval(command), mimetype='text/html') + + def display_console(self, request): + """Display a standalone shell.""" + if 0 not in self.frames: + if self.console_init_func is None: + ns = {} + else: + ns = dict(self.console_init_func()) + ns.setdefault('app', self.app) + self.frames[0] = _ConsoleFrame(ns) + is_trusted = bool(self.check_pin_trust(request.environ)) + return Response(render_console_html(secret=self.secret, + evalex_trusted=is_trusted), + mimetype='text/html') + + def paste_traceback(self, request, traceback): + """Paste the traceback and return a JSON response.""" + rv = traceback.paste() + return Response(json.dumps(rv), mimetype='application/json') + + def get_resource(self, request, filename): + """Return a static resource from the shared folder.""" + filename = join(dirname(__file__), 'shared', basename(filename)) + if isfile(filename): + mimetype = mimetypes.guess_type(filename)[0] \ + or 'application/octet-stream' + f = open(filename, 'rb') + try: + return Response(f.read(), mimetype=mimetype) + finally: + f.close() + return Response('Not Found', status=404) + + def check_pin_trust(self, environ): + """Checks if the request passed the pin test. This returns `True` if the + request is trusted on a pin/cookie basis and returns `False` if not. + Additionally if the cookie's stored pin hash is wrong it will return + `None` so that appropriate action can be taken. + """ + if self.pin is None: + return True + val = parse_cookie(environ).get(self.pin_cookie_name) + if not val or '|' not in val: + return False + ts, pin_hash = val.split('|', 1) + if not ts.isdigit(): + return False + if pin_hash != hash_pin(self.pin): + return None + return (time.time() - PIN_TIME) < int(ts) + + def _fail_pin_auth(self): + time.sleep(self._failed_pin_auth > 5 and 5.0 or 0.5) + self._failed_pin_auth += 1 + + def pin_auth(self, request): + """Authenticates with the pin.""" + exhausted = False + auth = False + trust = self.check_pin_trust(request.environ) + + # If the trust return value is `None` it means that the cookie is + # set but the stored pin hash value is bad. This means that the + # pin was changed. In this case we count a bad auth and unset the + # cookie. This way it becomes harder to guess the cookie name + # instead of the pin as we still count up failures. + bad_cookie = False + if trust is None: + self._fail_pin_auth() + bad_cookie = True + + # If we're trusted, we're authenticated. + elif trust: + auth = True + + # If we failed too many times, then we're locked out. + elif self._failed_pin_auth > 10: + exhausted = True + + # Otherwise go through pin based authentication + else: + entered_pin = request.args.get('pin') + if entered_pin.strip().replace('-', '') == \ + self.pin.replace('-', ''): + self._failed_pin_auth = 0 + auth = True + else: + self._fail_pin_auth() + + rv = Response(json.dumps({ + 'auth': auth, + 'exhausted': exhausted, + }), mimetype='application/json') + if auth: + rv.set_cookie(self.pin_cookie_name, '%s|%s' % ( + int(time.time()), + hash_pin(self.pin) + ), httponly=True) + elif bad_cookie: + rv.delete_cookie(self.pin_cookie_name) + return rv + + def log_pin_request(self): + """Log the pin if needed.""" + if self.pin_logging and self.pin is not None: + _log('info', ' * To enable the debugger you need to ' + 'enter the security pin:') + _log('info', ' * Debugger pin code: %s' % self.pin) + return Response('') + + def __call__(self, environ, start_response): + """Dispatch the requests.""" + # important: don't ever access a function here that reads the incoming + # form data! Otherwise the application won't have access to that data + # any more! + request = Request(environ) + response = self.debug_application + if request.args.get('__debugger__') == 'yes': + cmd = request.args.get('cmd') + arg = request.args.get('f') + secret = request.args.get('s') + traceback = self.tracebacks.get(request.args.get('tb', type=int)) + frame = self.frames.get(request.args.get('frm', type=int)) + if cmd == 'resource' and arg: + response = self.get_resource(request, arg) + elif cmd == 'paste' and traceback is not None and \ + secret == self.secret: + response = self.paste_traceback(request, traceback) + elif cmd == 'pinauth' and secret == self.secret: + response = self.pin_auth(request) + elif cmd == 'printpin' and secret == self.secret: + response = self.log_pin_request() + elif self.evalex and cmd is not None and frame is not None \ + and self.secret == secret and \ + self.check_pin_trust(environ): + response = self.execute_command(request, cmd, frame) + elif self.evalex and self.console_path is not None and \ + request.path == self.console_path: + response = self.display_console(request) + return response(environ, start_response) diff --git a/src/lib/werkzeug/debug/console.py b/app/lib/werkzeug/debug/console.py similarity index 93% rename from src/lib/werkzeug/debug/console.py rename to app/lib/werkzeug/debug/console.py index 3ee7246..30e8906 100644 --- a/src/lib/werkzeug/debug/console.py +++ b/app/lib/werkzeug/debug/console.py @@ -5,12 +5,13 @@ Interactive console support. - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD. """ import sys import code from types import CodeType + from werkzeug.utils import escape from werkzeug.local import Local from werkzeug.debug.repr import debug_repr, dump, helper @@ -20,6 +21,7 @@ class HTMLStringO(object): + """A StringO version that HTML escapes on write.""" def __init__(self): @@ -50,7 +52,7 @@ def reset(self): return val def _write(self, x): - if isinstance(x, str): + if isinstance(x, bytes): x = x.decode('utf-8', 'replace') self._buffer.append(x) @@ -62,6 +64,7 @@ def writelines(self, x): class ThreadedStream(object): + """Thread-local wrapper for sys.stdout for the interactive console.""" def push(): @@ -86,6 +89,7 @@ def displayhook(obj): # stream._write bypasses escaping as debug_repr is # already generating HTML for us. if obj is not None: + _local._current_ipy.locals['_'] = obj stream._write(debug_repr(obj)) displayhook = staticmethod(displayhook) @@ -134,6 +138,7 @@ def get_source_by_code(self, code): def _wrap_compiler(console): compile = console.compile + def func(source, filename, symbol): code = compile(source, filename, symbol) console.loader.register(code, source) @@ -160,7 +165,7 @@ def runsource(self, source): try: source_to_eval = ''.join(self.buffer + [source]) if code.InteractiveInterpreter.runsource(self, - source_to_eval, '', 'single'): + source_to_eval, '', 'single'): self.more = True self.buffer.append(source) else: @@ -168,11 +173,11 @@ def runsource(self, source): del self.buffer[:] finally: output = ThreadedStream.fetch() - return prompt + source + output + return prompt + escape(source) + output def runcode(self, code): try: - exec code in self.globals, self.locals + eval(code, self.globals, self.locals) except Exception: self.showtraceback() @@ -191,6 +196,7 @@ def write(self, data): class Console(object): + """An interactive console.""" def __init__(self, globals=None, locals=None): @@ -201,6 +207,7 @@ def __init__(self, globals=None, locals=None): self._ipy = _InteractiveConsole(globals, locals) def eval(self, code): + _local._current_ipy = self._ipy old_sys_stdout = sys.stdout try: return self._ipy.runsource(code) diff --git a/src/lib/werkzeug/debug/repr.py b/app/lib/werkzeug/debug/repr.py similarity index 84% rename from src/lib/werkzeug/debug/repr.py rename to app/lib/werkzeug/debug/repr.py index a6a54a9..0a02457 100644 --- a/src/lib/werkzeug/debug/repr.py +++ b/app/lib/werkzeug/debug/repr.py @@ -10,17 +10,20 @@ Together with the CSS and JavaScript files of the debugger this gives a colorful and more compact output. - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD. """ import sys import re +import codecs from traceback import format_exception_only try: from collections import deque -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover deque = None from werkzeug.utils import escape +from werkzeug._compat import iteritems, PY2, text_type, integer_types, \ + string_types missing = object() @@ -61,6 +64,7 @@ def dump(obj=missing): class _Helper(object): + """Displays an HTML version of the normal help, for the interactive debugger only because it requires a patched sys.stdout. """ @@ -74,12 +78,14 @@ def __call__(self, topic=None): return import pydoc pydoc.help(topic) - rv = sys.stdout.reset().decode('utf-8', 'ignore') + rv = sys.stdout.reset() + if isinstance(rv, bytes): + rv = rv.decode('utf-8', 'ignore') paragraphs = _paragraph_re.split(rv) if len(paragraphs) > 1: title = paragraphs[0] text = '\n\n'.join(paragraphs[1:]) - else: # pragma: no cover + else: # pragma: no cover title = 'Help' text = paragraphs[0] sys.stdout._write(HELP_HTML % {'title': title, 'text': text}) @@ -135,7 +141,11 @@ def proxy(self, obj, recursive): del _sequence_repr_maker def regex_repr(self, obj): - pattern = repr(obj.pattern).decode('string-escape', 'ignore') + pattern = repr(obj.pattern) + if PY2: + pattern = pattern.decode('string-escape', 'ignore') + else: + pattern = codecs.decode(pattern, 'unicode-escape', 'ignore') if pattern[:1] == 'u': pattern = 'ur' + pattern[1:] else: @@ -144,26 +154,25 @@ def regex_repr(self, obj): def string_repr(self, obj, limit=70): buf = [''] - escaped = escape(obj) - a = repr(escaped[:limit]) - b = repr(escaped[limit:]) - if isinstance(obj, unicode): + a = repr(obj[:limit]) + b = repr(obj[limit:]) + if isinstance(obj, text_type) and PY2: buf.append('u') a = a[1:] b = b[1:] if b != "''": - buf.extend((a[:-1], '', b[1:], '')) + buf.extend((escape(a[:-1]), '', escape(b[1:]), '')) else: - buf.append(a) + buf.append(escape(a)) buf.append('') - return _add_subclass_info(u''.join(buf), obj, (str, unicode)) + return _add_subclass_info(u''.join(buf), obj, (bytes, text_type)) def dict_repr(self, d, recursive, limit=5): if recursive: return _add_subclass_info(u'{...}', d, dict) buf = ['{'] have_extended_section = False - for idx, (key, value) in enumerate(d.iteritems()): + for idx, (key, value) in enumerate(iteritems(d)): if idx: buf.append(', ') if idx == limit - 1: @@ -178,15 +187,17 @@ def dict_repr(self, d, recursive, limit=5): return _add_subclass_info(u''.join(buf), d, dict) def object_repr(self, obj): - return u'%s' % \ - escape(repr(obj).decode('utf-8', 'replace')) + r = repr(obj) + if PY2: + r = r.decode('utf-8', 'replace') + return u'%s' % escape(r) def dispatch_repr(self, obj, recursive): if obj is helper: return u'%r' % helper - if isinstance(obj, (int, long, float, complex)): + if isinstance(obj, (integer_types, float, complex)): return u'%r' % obj - if isinstance(obj, basestring): + if isinstance(obj, string_types): return self.string_repr(obj) if isinstance(obj, RegexType): return self.regex_repr(obj) @@ -207,10 +218,12 @@ def dispatch_repr(self, obj, recursive): def fallback_repr(self): try: info = ''.join(format_exception_only(*sys.exc_info()[:2])) - except Exception: # pragma: no cover + except Exception: # pragma: no cover info = '?' + if PY2: + info = info.decode('utf-8', 'ignore') return u'<broken repr (%s)>' \ - u'' % escape(info.decode('utf-8', 'ignore').strip()) + u'' % escape(info.strip()) def repr(self, obj): recursive = False @@ -232,8 +245,8 @@ def dump_object(self, obj): if isinstance(obj, dict): title = 'Contents of' items = [] - for key, value in obj.iteritems(): - if not isinstance(key, basestring): + for key, value in iteritems(obj): + if not isinstance(key, string_types): items = None break items.append((key, self.repr(value))) diff --git a/src/lib/werkzeug/debug/shared/FONT_LICENSE b/app/lib/werkzeug/debug/shared/FONT_LICENSE similarity index 100% rename from src/lib/werkzeug/debug/shared/FONT_LICENSE rename to app/lib/werkzeug/debug/shared/FONT_LICENSE diff --git a/src/lib/werkzeug/debug/shared/console.png b/app/lib/werkzeug/debug/shared/console.png old mode 100755 new mode 100644 similarity index 100% rename from src/lib/werkzeug/debug/shared/console.png rename to app/lib/werkzeug/debug/shared/console.png diff --git a/app/lib/werkzeug/debug/shared/debugger.js b/app/lib/werkzeug/debug/shared/debugger.js new file mode 100644 index 0000000..534019d --- /dev/null +++ b/app/lib/werkzeug/debug/shared/debugger.js @@ -0,0 +1,205 @@ +$(function() { + if (!EVALEX_TRUSTED) { + initPinBox(); + } + + /** + * if we are in console mode, show the console. + */ + if (CONSOLE_MODE && EVALEX) { + openShell(null, $('div.console div.inner').empty(), 0); + } + + $('div.traceback div.frame').each(function() { + var + target = $('pre', this), + consoleNode = null, + frameID = this.id.substring(6); + + target.click(function() { + $(this).parent().toggleClass('expanded'); + }); + + /** + * Add an interactive console to the frames + */ + if (EVALEX && target.is('.current')) { + $('') + .attr('title', 'Open an interactive python shell in this frame') + .click(function() { + consoleNode = openShell(consoleNode, target, frameID); + return false; + }) + .prependTo(target); + } + }); + + /** + * toggle traceback types on click. + */ + $('h2.traceback').click(function() { + $(this).next().slideToggle('fast'); + $('div.plain').slideToggle('fast'); + }).css('cursor', 'pointer'); + $('div.plain').hide(); + + /** + * Add extra info (this is here so that only users with JavaScript + * enabled see it.) + */ + $('span.nojavascript') + .removeClass('nojavascript') + .html('

To switch between the interactive traceback and the plaintext ' + + 'one, you can click on the "Traceback" headline. From the text ' + + 'traceback you can also create a paste of it. ' + (!EVALEX ? '' : + 'For code execution mouse-over the frame you want to debug and ' + + 'click on the console icon on the right side.' + + '

You can execute arbitrary Python code in the stack frames and ' + + 'there are some extra helpers available for introspection:' + + '

  • dump() shows all variables in the frame' + + '
  • dump(obj) dumps all that\'s known about the object
')); + + /** + * Add the pastebin feature + */ + $('div.plain form') + .submit(function() { + var label = $('input[type="submit"]', this); + var old_val = label.val(); + label.val('submitting...'); + $.ajax({ + dataType: 'json', + url: document.location.pathname, + data: {__debugger__: 'yes', tb: TRACEBACK, cmd: 'paste', + s: SECRET}, + success: function(data) { + $('div.plain span.pastemessage') + .removeClass('pastemessage') + .text('Paste created: ') + .append($('#' + data.id + '').attr('href', data.url)); + }, + error: function() { + alert('Error: Could not submit paste. No network connection?'); + label.val(old_val); + } + }); + return false; + }); + + // if we have javascript we submit by ajax anyways, so no need for the + // not scaling textarea. + var plainTraceback = $('div.plain textarea'); + plainTraceback.replaceWith($('
').text(plainTraceback.text()));
+});
+
+function initPinBox() {
+  $('.pin-prompt form').submit(function(evt) {
+    evt.preventDefault();
+    var pin = this.pin.value;
+    var btn = this.btn;
+    btn.disabled = true;
+    $.ajax({
+      dataType: 'json',
+      url: document.location.pathname,
+      data: {__debugger__: 'yes', cmd: 'pinauth', pin: pin,
+             s: SECRET},
+      success: function(data) {
+        btn.disabled = false;
+        if (data.auth) {
+          EVALEX_TRUSTED = true;
+          $('.pin-prompt').fadeOut();
+        } else {
+          if (data.exhausted) {
+            alert('Error: too many attempts.  Restart server to retry.');
+          } else {
+            alert('Error: incorrect pin');
+          }
+        }
+        console.log(data);
+      },
+      error: function() {
+        btn.disabled = false;
+        alert('Error: Could not verify PIN.  Network error?');
+      }
+    });
+  });
+}
+
+function promptForPin() {
+  if (!EVALEX_TRUSTED) {
+    $.ajax({
+      url: document.location.pathname,
+      data: {__debugger__: 'yes', cmd: 'printpin', s: SECRET}
+    });
+    $('.pin-prompt').fadeIn(function() {
+      $('.pin-prompt input[name="pin"]').focus();
+    });
+  }
+}
+
+
+/**
+ * Helper function for shell initialization
+ */
+function openShell(consoleNode, target, frameID) {
+  promptForPin();
+  if (consoleNode)
+    return consoleNode.slideToggle('fast');
+  consoleNode = $('
')
+    .appendTo(target.parent())
+    .hide()
+  var historyPos = 0, history = [''];
+  var output = $('
[console ready]
') + .appendTo(consoleNode); + var form = $('
>>> ') + .submit(function() { + var cmd = command.val(); + $.get('', { + __debugger__: 'yes', cmd: cmd, frm: frameID, s: SECRET}, function(data) { + var tmp = $('
').html(data); + $('span.extended', tmp).each(function() { + var hidden = $(this).wrap('').hide(); + hidden + .parent() + .append($('  ') + .click(function() { + hidden.toggle(); + $(this).toggleClass('open') + return false; + })); + }); + output.append(tmp); + command.focus(); + consoleNode.scrollTop(consoleNode.get(0).scrollHeight); + var old = history.pop(); + history.push(cmd); + if (typeof old != 'undefined') + history.push(old); + historyPos = history.length - 1; + }); + command.val(''); + return false; + }). + appendTo(consoleNode); + + var command = $('') + .appendTo(form) + .keydown(function(e) { + if (e.charCode == 100 && e.ctrlKey) { + output.text('--- screen cleared ---'); + return false; + } + else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) { + if (e.keyCode == 38 && historyPos > 0) + historyPos--; + else if (e.keyCode == 40 && historyPos < history.length) + historyPos++; + command.val(history[historyPos]); + return false; + } + }); + + return consoleNode.slideDown('fast', function() { + command.focus(); + }); +} diff --git a/app/lib/werkzeug/debug/shared/jquery.js b/app/lib/werkzeug/debug/shared/jquery.js new file mode 100644 index 0000000..0f60b7b --- /dev/null +++ b/app/lib/werkzeug/debug/shared/jquery.js @@ -0,0 +1,5 @@ +/*! jQuery v1.11.3 | (c) 2005, 2015 jQuery Foundation, Inc. | jquery.org/license */ +!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.3",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)+1>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b="length"in a&&a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N=M.replace("w","w#"),O="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+N+"))|)"+L+"*\\]",P=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+O+")*)|.*)\\)|)",Q=new RegExp(L+"+","g"),R=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),S=new RegExp("^"+L+"*,"+L+"*"),T=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),U=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),V=new RegExp(P),W=new RegExp("^"+N+"$"),X={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M.replace("w","w*")+")"),ATTR:new RegExp("^"+O),PSEUDO:new RegExp("^"+P),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,aa=/[+~]/,ba=/'|\\/g,ca=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),da=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ea=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(fa){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],k=b.nodeType,"string"!=typeof a||!a||1!==k&&9!==k&&11!==k)return d;if(!e&&p){if(11!==k&&(f=_.exec(a)))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return H.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName)return H.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=1!==k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(ba,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+ra(o[l]);w=aa.test(a)&&pa(b.parentNode)||b,x=o.join(",")}if(x)try{return H.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function pa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=g.documentElement,e=g.defaultView,e&&e!==e.top&&(e.addEventListener?e.addEventListener("unload",ea,!1):e.attachEvent&&e.attachEvent("onunload",ea)),p=!f(g),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(g.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(g.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!g.getElementsByName||!g.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ca,da);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(g.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){var b=g.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",P)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===g||a.ownerDocument===v&&t(v,a)?-1:b===g||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,h=[a],i=[b];if(!e||!f)return a===g?-1:b===g?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?la(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},g):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ca,da),a[3]=(a[3]||a[4]||a[5]||"").replace(ca,da),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ca,da).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(Q," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(ca,da),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return W.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(ca,da).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:oa(function(){return[0]}),last:oa(function(a,b){return[b-1]}),eq:oa(function(a,b,c){return[0>c?c+b:c]}),even:oa(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:oa(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:oa(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:oa(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function sa(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function ta(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ua(a,b,c){for(var d=0,e=b.length;e>d;d++)ga(a,b[d],c);return c}function va(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function wa(a,b,c,d,e,f){return d&&!d[u]&&(d=wa(d)),e&&!e[u]&&(e=wa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ua(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:va(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=va(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=va(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function xa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=sa(function(a){return a===b},h,!0),l=sa(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[sa(ta(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return wa(i>1&&ta(m),i>1&&ra(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&xa(a.slice(i,e)),f>e&&xa(a=a.slice(e)),f>e&&ra(a))}m.push(c)}return ta(m)}function ya(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=F.call(i));s=va(s)}H.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&ga.uniqueSort(i)}return k&&(w=v,j=t),r};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=xa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,ya(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ca,da),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ca,da),aa.test(j[0].type)&&pa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&ra(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,aa.test(a)&&pa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ja(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1; + +return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthh;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="
a",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function aa(){return!0}function ba(){return!1}function ca(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),ha=/^\s+/,ia=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,ja=/<([\w:]+)/,ka=/\s*$/g,ra={option:[1,""],legend:[1,"

","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:k.htmlSerialize?[0,"",""]:[1,"X
","
"]},sa=da(y),ta=sa.appendChild(y.createElement("div"));ra.optgroup=ra.option,ra.tbody=ra.tfoot=ra.colgroup=ra.caption=ra.thead,ra.th=ra.td;function ua(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ua(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function va(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wa(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xa(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function ya(a){var b=pa.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function za(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Aa(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Ba(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xa(b).text=a.text,ya(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!ga.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(ta.innerHTML=a.outerHTML,ta.removeChild(f=ta.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ua(f),h=ua(a),g=0;null!=(e=h[g]);++g)d[g]&&Ba(e,d[g]);if(b)if(c)for(h=h||ua(a),d=d||ua(f),g=0;null!=(e=h[g]);g++)Aa(e,d[g]);else Aa(a,f);return d=ua(f,"script"),d.length>0&&za(d,!i&&ua(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=da(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(la.test(f)){h=h||o.appendChild(b.createElement("div")),i=(ja.exec(f)||["",""])[1].toLowerCase(),l=ra[i]||ra._default,h.innerHTML=l[1]+f.replace(ia,"<$1>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&ha.test(f)&&p.push(b.createTextNode(ha.exec(f)[0])),!k.tbody){f="table"!==i||ka.test(f)?""!==l[1]||ka.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ua(p,"input"),va),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ua(o.appendChild(f),"script"),g&&za(h),c)){e=0;while(f=h[e++])oa.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ua(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&za(ua(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ua(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fa,""):void 0;if(!("string"!=typeof a||ma.test(a)||!k.htmlSerialize&&ga.test(a)||!k.leadingWhitespace&&ha.test(a)||ra[(ja.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ia,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ua(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ua(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&na.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ua(i,"script"),xa),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ua(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,ya),j=0;f>j;j++)d=g[j],oa.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qa,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Ca,Da={};function Ea(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fa(a){var b=y,c=Da[a];return c||(c=Ea(a,b),"none"!==c&&c||(Ca=(Ca||m(" + + + + diff --git a/src/application/static/sweBootstrap/albumPageModule.html b/app/templates/sweBootstrap/albumPageModule.html similarity index 100% rename from src/application/static/sweBootstrap/albumPageModule.html rename to app/templates/sweBootstrap/albumPageModule.html diff --git a/src/application/static/sweBootstrap/albumsTable.html b/app/templates/sweBootstrap/albumsTable.html similarity index 76% rename from src/application/static/sweBootstrap/albumsTable.html rename to app/templates/sweBootstrap/albumsTable.html index 68ffd5f..a1a7c4b 100644 --- a/src/application/static/sweBootstrap/albumsTable.html +++ b/app/templates/sweBootstrap/albumsTable.html @@ -16,7 +16,7 @@ - + @@ -33,45 +33,32 @@ - +
diff --git a/src/application/static/sweBootstrap/angularTracksModule.html b/app/templates/sweBootstrap/angularTracksModule.html similarity index 100% rename from src/application/static/sweBootstrap/angularTracksModule.html rename to app/templates/sweBootstrap/angularTracksModule.html diff --git a/src/application/static/sweBootstrap/artistPageModule.html b/app/templates/sweBootstrap/artistPageModule.html similarity index 100% rename from src/application/static/sweBootstrap/artistPageModule.html rename to app/templates/sweBootstrap/artistPageModule.html diff --git a/src/application/static/sweBootstrap/artistTable.html b/app/templates/sweBootstrap/artistTable.html similarity index 75% rename from src/application/static/sweBootstrap/artistTable.html rename to app/templates/sweBootstrap/artistTable.html index ff48963..3b7dace 100644 --- a/src/application/static/sweBootstrap/artistTable.html +++ b/app/templates/sweBootstrap/artistTable.html @@ -16,6 +16,7 @@ + @@ -33,45 +34,32 @@ -
+ + +
diff --git a/app/templates/sweBootstrap/b52AlbumPage.html b/app/templates/sweBootstrap/b52AlbumPage.html new file mode 100644 index 0000000..654f156 --- /dev/null +++ b/app/templates/sweBootstrap/b52AlbumPage.html @@ -0,0 +1,130 @@ + + + + + + + + + + BoSWEmian Rhapsody + + + + + + + + + + + + + + +
+
+
+
+

Cosmic Thing

+
+
+
+ +
+
+
+
+
+
+
+
+
+
+

Artists

+
+
+
+
+
+

The B-52's

+
+ +
+
+
+
+
+
+
+
+

Genre

+

Pop

+
+
+
+
+

Label

+

+ Warner Bros. +

+
+
+
+
+
+
+

Date

+

1989-06-23

+
+
+
+
+

Songs

+

10 Songs

+
+ +
+
+
+
+ +
+ + + + + + + + + +
+
+ + diff --git a/src/application/static/sweBootstrap/b52ArtistPage.html b/app/templates/sweBootstrap/b52ArtistPage.html similarity index 82% rename from src/application/static/sweBootstrap/b52ArtistPage.html rename to app/templates/sweBootstrap/b52ArtistPage.html index cda25ca..5bcbfea 100644 --- a/src/application/static/sweBootstrap/b52ArtistPage.html +++ b/app/templates/sweBootstrap/b52ArtistPage.html @@ -13,50 +13,39 @@ + + - + +
diff --git a/app/templates/sweBootstrap/b52Concert2Page.html b/app/templates/sweBootstrap/b52Concert2Page.html new file mode 100644 index 0000000..02390e7 --- /dev/null +++ b/app/templates/sweBootstrap/b52Concert2Page.html @@ -0,0 +1,131 @@ + + + + + + + + + + BoSWEmian Rhapsody + + + + + + + + + + + + + + +
+
+
+
+

The B-52's

+
+
+
+ +
+
+
+
+
+
+
+
+
+
+

Venue

+

House Of Blues

+
+
+
+
+

Date

+

+ 2017-05-20 +

+
+
+
+
+

Time

+

+ 19:00:00 +

+
+
+
+
+
+
+

Link

+

+ More Info +

+
+
+
+
+

Lineup

+
+
+
+
+
+

The B-52's

+
+ +
+
+
+
+
+
+
+ +
+ + + + + + + + + +
+
+ + diff --git a/app/templates/sweBootstrap/b52ConcertPage.html b/app/templates/sweBootstrap/b52ConcertPage.html new file mode 100644 index 0000000..3be7e87 --- /dev/null +++ b/app/templates/sweBootstrap/b52ConcertPage.html @@ -0,0 +1,131 @@ + + + + + + + + + + BoSWEmian Rhapsody + + + + + + + + + + + + + + +
+
+
+
+

The B-52's

+
+
+
+ +
+
+
+
+
+
+
+ +
+
+
+

Date

+

+ 2017-05-18 +

+
+
+
+
+

Time

+

+ 19:00:00 +

+
+
+
+
+
+
+

Link

+

+ More Info +

+
+
+
+
+

Lineup

+
+
+
+
+
+

The B-52's

+
+ +
+
+
+
+
+
+
+ +
+ + + + + + + + + +
+
+ + diff --git a/src/application/static/sweBootstrap/b52TrackPage.html b/app/templates/sweBootstrap/b52TrackPage.html similarity index 77% rename from src/application/static/sweBootstrap/b52TrackPage.html rename to app/templates/sweBootstrap/b52TrackPage.html index 1322956..9ce1622 100644 --- a/src/application/static/sweBootstrap/b52TrackPage.html +++ b/app/templates/sweBootstrap/b52TrackPage.html @@ -14,50 +14,38 @@ + - + +
diff --git a/src/application/static/sweBootstrap/coldplayAlbumPage.html b/app/templates/sweBootstrap/coldplayAlbumPage.html similarity index 75% rename from src/application/static/sweBootstrap/coldplayAlbumPage.html rename to app/templates/sweBootstrap/coldplayAlbumPage.html index 6c8f234..20507ec 100644 --- a/src/application/static/sweBootstrap/coldplayAlbumPage.html +++ b/app/templates/sweBootstrap/coldplayAlbumPage.html @@ -14,50 +14,38 @@ + - + +
diff --git a/src/application/static/sweBootstrap/coldplayArtistPage.html b/app/templates/sweBootstrap/coldplayArtistPage.html similarity index 82% rename from src/application/static/sweBootstrap/coldplayArtistPage.html rename to app/templates/sweBootstrap/coldplayArtistPage.html index f465b18..8a827e6 100644 --- a/src/application/static/sweBootstrap/coldplayArtistPage.html +++ b/app/templates/sweBootstrap/coldplayArtistPage.html @@ -14,50 +14,38 @@ + - + +
diff --git a/app/templates/sweBootstrap/coldplayConcertPage.html b/app/templates/sweBootstrap/coldplayConcertPage.html new file mode 100644 index 0000000..885f1b7 --- /dev/null +++ b/app/templates/sweBootstrap/coldplayConcertPage.html @@ -0,0 +1,131 @@ + + + + + + + + + + BoSWEmian Rhapsody + + + + + + + + + + + + + + +
+
+
+
+

Coldplay

+
+
+
+ +
+
+
+
+
+
+
+
+
+
+

Venue

+

Singapore National Stadium

+
+
+
+
+

Date

+

+ 2017-03-31 +

+
+
+
+
+

Time

+

+ 19:30:00 +

+
+
+
+
+
+
+

Link

+

+ More Info +

+
+
+
+
+

Lineup

+
+
+
+
+
+

Coldplay

+
+ +
+
+
+
+
+
+
+ +
+ + + + + + + + + +
+
+ + diff --git a/src/application/static/sweBootstrap/coldplayTrackPage.html b/app/templates/sweBootstrap/coldplayTrackPage.html similarity index 75% rename from src/application/static/sweBootstrap/coldplayTrackPage.html rename to app/templates/sweBootstrap/coldplayTrackPage.html index 3766269..fb2969c 100644 --- a/src/application/static/sweBootstrap/coldplayTrackPage.html +++ b/app/templates/sweBootstrap/coldplayTrackPage.html @@ -10,54 +10,41 @@ BoSWEmian Rhapsody - - + + - + +
@@ -113,11 +100,11 @@

Artists

-
+

Coldplay


- +
diff --git a/src/application/static/sweBootstrap/concertPageModule.html b/app/templates/sweBootstrap/concertPageModule.html similarity index 100% rename from src/application/static/sweBootstrap/concertPageModule.html rename to app/templates/sweBootstrap/concertPageModule.html diff --git a/src/application/static/sweBootstrap/concertsTable.html b/app/templates/sweBootstrap/concertsTable.html similarity index 78% rename from src/application/static/sweBootstrap/concertsTable.html rename to app/templates/sweBootstrap/concertsTable.html index 3a5a41f..7e58cb5 100644 --- a/src/application/static/sweBootstrap/concertsTable.html +++ b/app/templates/sweBootstrap/concertsTable.html @@ -17,6 +17,7 @@ + @@ -34,44 +35,32 @@ -
+ + +
diff --git a/app/templates/sweBootstrap/home.html b/app/templates/sweBootstrap/home.html new file mode 100644 index 0000000..e6ba044 --- /dev/null +++ b/app/templates/sweBootstrap/home.html @@ -0,0 +1,89 @@ + + + + + + + + +BoSWEmian Rhapsody + + + + + + + + + + + +
+ + + +
+
+
+
    +
  • +
    Hello! This is
    +
    BoSWE
    +
    Designed with simplicity in mind.
    +
    To sate your every musical desire.
    + +
  • +
  • +
    Craving a certain song?
    +
    We'll deliver it straight to your ears.
    + +
  • +
  • +
    Hungry for a particular Artist?
    +
    We have the finest selections prepared.
    + +
  • +
+
+
+ +
+ +
+ +
+ + + + + + + + diff --git a/app/templates/sweBootstrap/queenAlbumPage.html b/app/templates/sweBootstrap/queenAlbumPage.html new file mode 100644 index 0000000..a3c4e44 --- /dev/null +++ b/app/templates/sweBootstrap/queenAlbumPage.html @@ -0,0 +1,130 @@ + + + + + + + + + + BoSWEmian Rhapsody + + + + + + + + + + + + + + +
+
+
+
+

A Night At The Opera (Deluxe Remastered Version)

+
+
+
+ +
+
+
+
+
+
+
+
+
+
+

Artists

+
+
+
+
+
+

Queen

+
+ +
+
+
+
+
+
+
+
+

Genre

+

Rock

+
+
+
+
+

Label

+

+ Hollywood Records +

+
+
+
+
+
+
+

Date

+

1975-11-21

+
+
+
+
+

Songs

+

18 Songs

+
+ +
+
+
+
+ +
+ + + + + + + + + +
+
+ + diff --git a/src/application/static/sweBootstrap/queenArtistPage.html b/app/templates/sweBootstrap/queenArtistPage.html similarity index 81% rename from src/application/static/sweBootstrap/queenArtistPage.html rename to app/templates/sweBootstrap/queenArtistPage.html index 2c518b6..eac6542 100644 --- a/src/application/static/sweBootstrap/queenArtistPage.html +++ b/app/templates/sweBootstrap/queenArtistPage.html @@ -14,50 +14,38 @@ + - + +
diff --git a/src/application/static/sweBootstrap/queenTrackPage.html b/app/templates/sweBootstrap/queenTrackPage.html similarity index 77% rename from src/application/static/sweBootstrap/queenTrackPage.html rename to app/templates/sweBootstrap/queenTrackPage.html index 316a362..c19b0fe 100644 --- a/src/application/static/sweBootstrap/queenTrackPage.html +++ b/app/templates/sweBootstrap/queenTrackPage.html @@ -14,50 +14,38 @@ + - + +
diff --git a/src/application/static/sweBootstrap/staticInfo.txt b/app/templates/sweBootstrap/staticInfo.txt similarity index 100% rename from src/application/static/sweBootstrap/staticInfo.txt rename to app/templates/sweBootstrap/staticInfo.txt diff --git a/src/application/static/sweBootstrap/trackPageModule.html b/app/templates/sweBootstrap/trackPageModule.html similarity index 100% rename from src/application/static/sweBootstrap/trackPageModule.html rename to app/templates/sweBootstrap/trackPageModule.html diff --git a/src/application/static/sweBootstrap/tracksTable.html b/app/templates/sweBootstrap/tracksTable.html similarity index 77% rename from src/application/static/sweBootstrap/tracksTable.html rename to app/templates/sweBootstrap/tracksTable.html index 159685d..150f1e5 100644 --- a/src/application/static/sweBootstrap/tracksTable.html +++ b/app/templates/sweBootstrap/tracksTable.html @@ -17,6 +17,7 @@ + @@ -33,45 +34,32 @@ - -
+ + +
diff --git a/src/app.yaml b/src/app.yaml deleted file mode 100644 index 0cd3f6f..0000000 --- a/src/app.yaml +++ /dev/null @@ -1,43 +0,0 @@ -application: boswemian-rhapsody -version: 1 -runtime: python27 -api_version: 1 -threadsafe: true - -default_expiration: "5d" - -builtins: -- appstats: on -- admin_redirect: on -- deferred: on -- remote_api: on - -libraries: -- name: jinja2 - version: "2.6" -- name: markupsafe - version: "0.15" - -inbound_services: -- warmup - -handlers: -- url: /favicon.ico - static_files: application/static/img/favicon.ico - upload: application/static/img/favicon.ico - -- url: /robots.txt - static_files: application/static/robots.txt - upload: application/static/robots.txt - -- url: /gae_mini_profiler/static - static_dir: lib/gae_mini_profiler/static - -- url: /gae_mini_profiler/.* - script: lib.gae_mini_profiler.main.application - -- url: /static - static_dir: application/static - -- url: .* - script: run.application.app \ No newline at end of file diff --git a/src/appengine_config.py b/src/appengine_config.py deleted file mode 100644 index c5394eb..0000000 --- a/src/appengine_config.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -App Engine config - -""" - - -def gae_mini_profiler_should_profile_production(): - """Uncomment the first two lines to enable GAE Mini Profiler on production for admin accounts""" - # from google.appengine.api import users - # return users.is_current_user_admin() - return False - - -def webapp_add_wsgi_middleware(app): - from google.appengine.ext.appstats import recording - app = recording.appstats_wsgi_middleware(app) - return app diff --git a/src/application/__init__.py b/src/application/__init__.py deleted file mode 100644 index a5551f3..0000000 --- a/src/application/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Initialize Flask app - -""" -from flask import Flask -import os -from flask_debugtoolbar import DebugToolbarExtension -from werkzeug.debug import DebuggedApplication - -app = Flask('application') - -if os.getenv('FLASK_CONF') == 'TEST': - app.config.from_object('application.settings.Testing') - -elif 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Dev'): - # Development settings - app.config.from_object('application.settings.Development') - # Flask-DebugToolbar - toolbar = DebugToolbarExtension(app) - - # Google app engine mini profiler - # https://github.com/kamens/gae_mini_profiler - app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True) - - from gae_mini_profiler import profiler, templatetags - - @app.context_processor - def inject_profiler(): - return dict(profiler_includes=templatetags.profiler_includes()) - app.wsgi_app = profiler.ProfilerWSGIMiddleware(app.wsgi_app) -else: - app.config.from_object('application.settings.Production') - -# Enable jinja2 loop controls extension -app.jinja_env.add_extension('jinja2.ext.loopcontrols') - -# Pull in URL dispatch routes -import urls diff --git a/src/application/decorators.py b/src/application/decorators.py deleted file mode 100644 index adf6044..0000000 --- a/src/application/decorators.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -decorators.py - -Decorators for URL handlers - -""" - -from functools import wraps -from google.appengine.api import users -from flask import redirect, request, abort - - -def login_required(func): - """Requires standard login credentials""" - @wraps(func) - def decorated_view(*args, **kwargs): - if not users.get_current_user(): - return redirect(users.create_login_url(request.url)) - return func(*args, **kwargs) - return decorated_view - - -def admin_required(func): - """Requires App Engine admin credentials""" - @wraps(func) - def decorated_view(*args, **kwargs): - if users.get_current_user(): - if not users.is_current_user_admin(): - abort(401) # Unauthorized - return func(*args, **kwargs) - return redirect(users.create_login_url(request.url)) - return decorated_view diff --git a/src/application/forms.py b/src/application/forms.py deleted file mode 100644 index 0636bfb..0000000 --- a/src/application/forms.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -forms.py - -Web forms based on Flask-WTForms - -See: http://flask.pocoo.org/docs/patterns/wtforms/ - http://wtforms.simplecodes.com/ - -""" - -from flaskext import wtf -from flaskext.wtf import validators -from wtforms.ext.appengine.ndb import model_form - -from models import ExampleModel - - -class ClassicExampleForm(wtf.Form): - example_name = wtf.TextField('Name', validators=[validators.Required()]) - example_description = wtf.TextAreaField('Description', validators=[validators.Required()]) - -# App Engine ndb model form example -ExampleForm = model_form(ExampleModel, wtf.Form, field_args={ - 'example_name': dict(validators=[validators.Required()]), - 'example_description': dict(validators=[validators.Required()]), -}) diff --git a/src/application/generate_keys.py b/src/application/generate_keys.py deleted file mode 100755 index e9080ed..0000000 --- a/src/application/generate_keys.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -generate_keys.py - -Generate CSRF and Session keys, output to secret_keys.py file - -Usage: - generate_keys.py [-f] - -Outputs secret_keys.py file in current folder - -By default, an existing secret_keys file will not be replaced. -Use the '-f' flag to force the new keys to be written to the file - - -""" - -import string -import os.path - -from optparse import OptionParser -from random import choice -from string import Template - - -# File settings -file_name = 'secret_keys.py' -file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), file_name) - -file_template = Template('''# CSRF- and Session keys - -CSRF_SECRET_KEY = '$csrf_key' -SESSION_KEY = '$session_key' -''') - - -# Get options from command line -parser = OptionParser() -parser.add_option( - "-f", "--force", dest="force", - help="force overwrite of existing secret_keys file", action="store_true") -parser.add_option( - "-r", "--randomness", dest="randomness", - help="length (randomness) of generated key; default = 24", default=24) -(options, args) = parser.parse_args() - - -def generate_randomkey(length): - """Generate random key, given a number of characters""" - chars = string.letters + string.digits - return ''.join([choice(chars) for i in range(int(length))]) - - -def write_file(contents): - with open(file_path, 'wb') as f: - f.write(contents) - - -def generate_keyfile(csrf_key, session_key): - """Generate random keys for CSRF- and session key""" - output = file_template.safe_substitute(dict( - csrf_key=csrf_key, session_key=session_key - )) - if os.path.exists(file_path): - if options.force is None: - print "Warning: secret_keys.py file exists. Use 'generate_keys.py --force' to force overwrite." - else: - write_file(output) - else: - write_file(output) - - -def main(): - r = options.randomness - csrf_key = generate_randomkey(r) - session_key = generate_randomkey(r) - generate_keyfile(csrf_key, session_key) - - -if __name__ == "__main__": - main() diff --git a/src/application/models.py b/src/application/models.py deleted file mode 100644 index 0354689..0000000 --- a/src/application/models.py +++ /dev/null @@ -1,197 +0,0 @@ -# """ -# models.py - -# App Engine datastore models - -# """ - - -from google.appengine.ext import ndb - - - -class ExampleModel(ndb.Model): - """Example Model""" - example_name = ndb.StringProperty(required=True) - example_description = ndb.TextProperty(required=True) - added_by = ndb.UserProperty() - timestamp = ndb.DateTimeProperty(auto_now_add=True) - - - -# #! python -# from flask_sqlalchemy import SQLAlchemy -# from sqlalchemy import create_engine, Column, ForeignKey,\ -# Integer, Boolean, Key, Table, Float -# from sqlalchemy.orm import relationship -# from sqlalchemy.ext.declarative import declarative_base - -# # SQLAlchemy Declarative Base -# Base = declarative_base() - -# class Track(ndb.Model) : -# __tablename__ = 'tracks' - -# id = ndb.IntegerProperty(required=True) #look into making this a primary key -# name = ndb.StringProperty(required=True) -# genre = ndb.StringProperty(required=True) -# release_date = ndb.StringProperty(required=True) -# duration = ndb.IntegerProperty(required=True) -# popularity = ndb.IntegerProperty(required=True) -# preview_url = ndb.StringProperty(required=True) -# explicit = ndb.BooleanProperty(required=True) - -# # Reference to artists table -# # Auto-populates Artist.tracks -# artist_id = Column(Integer, ForeignKey('artists.id')) -# artist = relationship('Artist', back_populates='tracks') - -# # Reference to albums table -# # Auto-populates Album.tracks -# album_id = Column(Integer, ForeignKey('albums.id')) -# album = relationship('Album', back_populates='tracks') - -# def __repr__(self) : -# return "" % (self.name, self.artist.name) - -# class Artist(Base) : -# __tablename__ = 'artists' - -# id = ndb.IntegerProperty(required=True) -# name = ndb.StringProperty(required=True) -# image_url = ndb.StringProperty(required=True) -# country = ndb.StringProperty(required=True) -# decade = ndb.StringProperty(required=True) -# genre = ndb.StringProperty(required=True) - -# # Reference to an artist's most popular track -# most_popular_track_id = Column(Integer, ForeignKey('tracks.id')) -# most_popular_track = relationship('Track') - -# # Relationship to artist_album_pairs table -# # Auto-populates Artist_Album_Association.artist -# albums = relationship('Artist_Album_Association', -# order_by=Artist_Album_Association.id, back_populates='artist') - -# # Relationship to tracks table -# # Auto-populates Track.artist -# tracks = relationship('Track', -# order_by=Track.id, back_populates='artist') - -# def __repr__(self) : -# return "" % (self.name) - -# class Album(Base) : -# __tablename__ = 'albums' - -# id = ndb.IntegerProperty(required=True) -# name = ndb.StringProperty(required=True) -# genre = ndb.StringProperty(required=True) -# release_date = ndb.StringProperty(required=True) -# album_cover_url = ndb.StringProperty(required=True) -# label = ndb.StringProperty(required=True) -# number_of_tracks = ndb.IntegerProperty(required=True) - -# # Relationship to artist_album_pairs table -# # Auto-populates Artist_Album_Association.album -# artists = relationship('Artist_Album_Association', -# order_by=Artist_Album_Association.id, back_populates='album') - -# # Relationship to tracks table -# # Auto-populates Track.album -# tracks = relationship('Track', -# order_by=Track.id, back_populates='album') - -# def __repr__(self) : -# return "" % (self.name, self.label) - -# # Association Class to model the many-to-many relationship between -# # Artists and Albums -# class Artist_Album_Association(Base) : -# __tablename__ = artist_album_pairs - -# id = ndb.IntegerProperty(required=True) - -# # Reference to artists table -# # Auto-populates Artist.albums -# artist_id = Column(Integer, ForeignKey('artists.id')) -# artist = relationship('Artist', back_populates='albums') - -# # Reference to albums table -# # Auto-populates Album.artists -# album_id = Column(Integer, ForeignKey('albums.id')) -# album = relationship('Album', back_populates='artists') - -# def __repr__(self) : -# return "" % (self.artist.name, self.album.name) - -# class Concert(Base) : -# __tablename__ = 'concerts' - -# id = ndb.IntegerProperty(required=True) -# name = ndb.StringProperty(required=True) -# event_link = ndb.StringProperty(required=True) -# date = ndb.StringProperty(required=True) -# time = ndb.StringProperty(required=True) - -# # Reference to venues table -# # Auto-populates Venue.concerts -# venue_id = Column(Integer, ForeignKey('venues.id')) -# venue = relationship('Venue', back_populates='concerts') - -# # Relationship to artist_album_pairs table -# # Auto-populates Concert_AA_Association.concert -# artist_album_pairs = relationship('Concert_AA_Association', -# order_by=Concert_AA_Association.id, back_populates='concert') - -# def __repr__(self) : -# return "" % (self.name) - -# # Association class to model the many-to-many relationship between -# # Concerts and Artists and Albums -# class Concert_AA_Association(Base) : -# __tablename__ = 'concert_aa_pairs' - -# id = ndb.IntegerProperty(required=True) - -# # Reference to concerts table -# # Auto-populates Concert.artist_album_pairs -# concert_id = Column(Integer, ForeignKey('concerts.id')) -# concert = relationship('Concert', -# back_populates='artist_album_pairs') - -# # Reference to artist_album_pairs table -# aa_id = Column(Integer, ForeignKey('artist_album_pairs.id')) -# artist_album = relationship('Artist_Album_Association') - -# def __repr__(self) : -# return "" % (self.concert.name, self.artist_album.artist.name, -# self.artist_album.album.name) - -# class Venue(Base) : -# __tablename__ = 'venues' - -# id = ndb.IntegerProperty(required=True) -# name = ndb.StringProperty(required=True) -# image_url = ndb.StringProperty(required=True) -# city = ndb.StringProperty(required=True) -# region = ndb.StringProperty(required=True) -# country = ndb.StringProperty(required=True) -# latitude = ndb.FloatProperty(required=True) -# longitude = ndb.FloatProperty(required=True) - -# # Relationship with concerts table -# # Auto-populates Concert.venue -# concerts = relationship('Concert', back_populates='venue') - -# def __repr__(self) : -# return "" % (self.name, self.city) - -# # SQLAlchemy's connection to database -# # See SQLAlchemy's documation on PostgreSQL format -# engine = create_engine('POSTGRESQL_DATABASE_HERE') - -# # Create tables in database from classes -# Base.metadata.create_all(engine) - diff --git a/src/application/secret_keys.py b/src/application/secret_keys.py deleted file mode 100644 index 152d737..0000000 --- a/src/application/secret_keys.py +++ /dev/null @@ -1,4 +0,0 @@ -# CSRF- and Session keys - -CSRF_SECRET_KEY = 'sWX58Vf8M07eafOxlbLvtJGD' -SESSION_KEY = 'gJjU1ws8oe7Mur7MtFp2FuZ2' diff --git a/src/application/settings.py b/src/application/settings.py deleted file mode 100644 index b904f18..0000000 --- a/src/application/settings.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -settings.py - -Configuration for Flask app - -Important: Place your keys in the secret_keys.py module, - which should be kept out of version control. - -""" -from secret_keys import CSRF_SECRET_KEY, SESSION_KEY - - -class Config(object): - # Set secret keys for CSRF protection - SECRET_KEY = CSRF_SECRET_KEY - CSRF_SESSION_KEY = SESSION_KEY - # Flask-Cache settings - CACHE_TYPE = 'gaememcached' - - -class Development(Config): - DEBUG = True - # Flask-DebugToolbar settings - DEBUG_TB_PROFILER_ENABLED = True - DEBUG_TB_INTERCEPT_REDIRECTS = False - CSRF_ENABLED = True - - -class Testing(Config): - TESTING = True - DEBUG = True - CSRF_ENABLED = True - - -class Production(Config): - DEBUG = False - CSRF_ENABLED = True \ No newline at end of file diff --git a/src/application/static/sweBootstrap/b52AlbumPage.html b/src/application/static/sweBootstrap/b52AlbumPage.html deleted file mode 100644 index 80dbef6..0000000 --- a/src/application/static/sweBootstrap/b52AlbumPage.html +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - BoSWEmian Rhapsody - - - - - - - - - - - - -
-
-
-
-

Cosmic Thing

-
-
-
- -
-
-
-
-
-
-
-
-
-
-

Artists

-
-
-
-
-
-

The B-52's

-
- -
-
-
-
-
-
-
-
-

Genre

-

Pop

-
-
-
-
-

Label

-

- Warner Bros. -

-
-
-
-
-
-
-

Date

-

1989-06-23

-
-
-
-
-

Songs

-

10 Songs

-
- -
-
-
-
- -
- - - - - - - - - -
-
- - diff --git a/src/application/static/sweBootstrap/b52Concert2Page.html b/src/application/static/sweBootstrap/b52Concert2Page.html deleted file mode 100644 index 41aa3cc..0000000 --- a/src/application/static/sweBootstrap/b52Concert2Page.html +++ /dev/null @@ -1,143 +0,0 @@ - - - - - - - - - - BoSWEmian Rhapsody - - - - - - - - - - - - -
-
-
-
-

The B-52's

-
-
-
- -
-
-
-
-
-
-
-
-
-
-

Venue

-

House Of Blues

-
-
-
-
-

Date

-

- 2017-05-20 -

-
-
-
-
-

Time

-

- 19:00:00 -

-
-
-
-
-
-
-

Link

-

- More Info -

-
-
-
-
-

Lineup

-
-
-
-
-
-

The B-52's

-
- -
-
-
-
-
-
-
- -
- - - - - - - - - -
-
- - diff --git a/src/application/static/sweBootstrap/b52ConcertPage.html b/src/application/static/sweBootstrap/b52ConcertPage.html deleted file mode 100644 index c74f44d..0000000 --- a/src/application/static/sweBootstrap/b52ConcertPage.html +++ /dev/null @@ -1,143 +0,0 @@ - - - - - - - - - - BoSWEmian Rhapsody - - - - - - - - - - - - -
-
-
-
-

The B-52's

-
-
-
- -
-
-
-
-
-
-
- -
-
-
-

Date

-

- 2017-05-18 -

-
-
-
-
-

Time

-

- 19:00:00 -

-
-
-
-
-
-
-

Link

-

- More Info -

-
-
-
-
-

Lineup

-
-
-
-
-
-

The B-52's

-
- -
-
-
-
-
-
-
- -
- - - - - - - - - -
-
- - diff --git a/src/application/static/sweBootstrap/coldplayConcertPage.html b/src/application/static/sweBootstrap/coldplayConcertPage.html deleted file mode 100644 index b38a59a..0000000 --- a/src/application/static/sweBootstrap/coldplayConcertPage.html +++ /dev/null @@ -1,143 +0,0 @@ - - - - - - - - - - BoSWEmian Rhapsody - - - - - - - - - - - - -
-
-
-
-

Coldplay

-
-
-
- -
-
-
-
-
-
-
-
-
-
-

Venue

-

Singapore National Stadium

-
-
-
-
-

Date

-

- 2017-03-31 -

-
-
-
-
-

Time

-

- 19:30:00 -

-
-
-
-
-
-
-

Link

-

- More Info -

-
-
-
-
-

Lineup

-
-
-
-
-
-

Coldplay

-
- -
-
-
-
-
-
-
- -
- - - - - - - - - -
-
- - diff --git a/src/application/static/sweBootstrap/home.html b/src/application/static/sweBootstrap/home.html deleted file mode 100644 index 8010652..0000000 --- a/src/application/static/sweBootstrap/home.html +++ /dev/null @@ -1,87 +0,0 @@ - - - - - - - - -BoSWEmian Rhapsody - - - - - - - - - - - -
- - - -
-
-
-
    -
  • -
    Hello! This is
    -
    BoSWE
    -
    We do music stuff, and it's pretty cool tbh.
    -
    Come watch us crash Spotify's servers!
    - -
  • -
  • -
    We are BoSWE
    -
    We're honestly pretty cool, we promise.
    -
  • -
  • -
    We are BoSWE
    -
    We're honestly pretty cool, we promise.
    -
  • -
-
-
- -
- -
- -
- - - - - - - - diff --git a/src/application/static/sweBootstrap/queenAlbumPage.html b/src/application/static/sweBootstrap/queenAlbumPage.html deleted file mode 100644 index 62c34a2..0000000 --- a/src/application/static/sweBootstrap/queenAlbumPage.html +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - - - - - BoSWEmian Rhapsody - - - - - - - - - - - - -
-
-
-
-

A Night At The Opera (Deluxe Remastered Version)

-
-
-
- -
-
-
-
-
-
-
-
-
-
-

Artists

-
-
-
-
-
-

Queen

-
- -
-
-
-
-
-
-
-
-

Genre

-

Rock

-
-
-
-
-

Label

-

- Hollywood Records -

-
-
-
-
-
-
-

Date

-

1975-11-21

-
-
-
-
-

Songs

-

18 Songs

-
- -
-
-
-
- -
- - - - - - - - - -
-
- - diff --git a/src/application/urls.py b/src/application/urls.py deleted file mode 100644 index 1e8db1b..0000000 --- a/src/application/urls.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -urls.py - -URL dispatch route mappings and error handlers - -""" -from flask import render_template - -from application import app - -from application.views.public.public_warmup import PublicWarmup -from application.views.public.public_index import PublicIndex -from application.views.public.public_say_hello import PublicSayHello - -from application.views.admin.admin_list_examples import AdminListExamples -from application.views.admin.admin_list_examples_cached import AdminListExamplesCached -from application.views.admin.admin_secret import AdminSecret -from application.views.admin.admin_edit_example import AdminEditExample -from application.views.admin.admin_delete_example import AdminDeleteExample - - -# URL dispatch rules - -# App Engine warm up handler -# See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests -app.add_url_rule('/_ah/warmup', 'public_warmup', view_func=PublicWarmup.as_view('public_warmup')) - -app.add_url_rule('/', 'public_index', view_func=PublicIndex.as_view('public_index')) - -app.add_url_rule('/hello/', 'public_say_hello', view_func=PublicSayHello.as_view('public_say_hello')) - -app.add_url_rule('/examples', 'list_examples', view_func=AdminListExamples.as_view('list_examples'), methods=['GET', 'POST']) - -app.add_url_rule('/examples/cached', 'cached_examples', view_func=AdminListExamplesCached.as_view('cached_examples')) - -app.add_url_rule('/admin_only', 'admin_only', view_func=AdminSecret.as_view('admin_only')) - -app.add_url_rule('/examples//edit', 'edit_example', view_func=AdminEditExample.as_view('edit_example'), methods=['GET', 'POST']) - -app.add_url_rule('/examples//delete', 'delete_example', view_func=AdminDeleteExample.as_view('delete_example'), methods=['POST']) - -# Error handlers - -# Handle 404 errors - - -@app.errorhandler(404) -def page_not_found(e): - return render_template('404.html'), 404 - -# Handle 500 errors - - -@app.errorhandler(500) -def server_error(e): - return render_template('500.html'), 500 diff --git a/src/application/views/admin/admin_delete_example.py b/src/application/views/admin/admin_delete_example.py deleted file mode 100644 index d1141e9..0000000 --- a/src/application/views/admin/admin_delete_example.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - -from flask import flash, redirect, url_for, request - -from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError - -from models import ExampleModel - -from decorators import login_required - - -class AdminDeleteExample(View): - - @login_required - def dispatch_request(self, example_id): - example = ExampleModel.get_by_id(example_id) - if request.method == "POST": - try: - example.key.delete() - flash(u'Example %s successfully deleted.' % example_id, 'success') - return redirect(url_for('list_examples')) - except CapabilityDisabledError: - flash(u'App Engine Datastore is currently in read-only mode.', 'info') - return redirect(url_for('list_examples')) diff --git a/src/application/views/admin/admin_edit_example.py b/src/application/views/admin/admin_edit_example.py deleted file mode 100644 index 4d1ba69..0000000 --- a/src/application/views/admin/admin_edit_example.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - -from flask import flash, redirect, url_for, render_template, request - -from forms import ExampleForm -from models import ExampleModel - -from decorators import login_required - - -class AdminEditExample(View): - - @login_required - def dispatch_request(self, example_id): - example = ExampleModel.get_by_id(example_id) - form = ExampleForm(obj=example) - if request.method == "POST": - if form.validate_on_submit(): - example.example_name = form.data.get('example_name') - example.example_description = form.data.get('example_description') - example.put() - flash(u'Example %s successfully saved.' % example_id, 'success') - return redirect(url_for('list_examples')) - return render_template('edit_example.html', example=example, form=form) diff --git a/src/application/views/admin/admin_list_examples.py b/src/application/views/admin/admin_list_examples.py deleted file mode 100644 index fdbefc9..0000000 --- a/src/application/views/admin/admin_list_examples.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - -from flask import flash, redirect, url_for, render_template - -from google.appengine.api import users -from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError - -from forms import ExampleForm -from models import ExampleModel - -from decorators import login_required - - -class AdminListExamples(View): - - @login_required - def dispatch_request(self): - examples = ExampleModel.query() - form = ExampleForm() - if form.validate_on_submit(): - example = ExampleModel( - example_name=form.example_name.data, - example_description=form.example_description.data, - added_by=users.get_current_user() - ) - try: - example.put() - example_id = example.key.id() - flash(u'Example %s successfully saved.' % example_id, 'success') - return redirect(url_for('list_examples')) - except CapabilityDisabledError: - flash(u'App Engine Datastore is currently in read-only mode.', 'info') - return redirect(url_for('list_examples')) - return render_template('list_examples.html', examples=examples, form=form) diff --git a/src/application/views/admin/admin_list_examples_cached.py b/src/application/views/admin/admin_list_examples_cached.py deleted file mode 100644 index 23167b2..0000000 --- a/src/application/views/admin/admin_list_examples_cached.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - -from flask import render_template - -from models import ExampleModel - -from decorators import login_required - -from flask_cache import Cache - -from application import app - -cache = Cache(app) - - -class AdminListExamplesCached(View): - - @login_required - @cache.cached(timeout=60) - def dispatch_request(self): - - examples = ExampleModel.query() - return render_template('list_examples_cached.html', examples=examples) diff --git a/src/application/views/admin/admin_secret.py b/src/application/views/admin/admin_secret.py deleted file mode 100644 index 521c04c..0000000 --- a/src/application/views/admin/admin_secret.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - -from decorators import admin_required - - -class AdminSecret(View): - - @admin_required - def dispatch_request(self): - - return 'Super-seekrit admin page.' diff --git a/src/application/views/public/__init__.py b/src/application/views/public/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/application/views/public/public_index.py b/src/application/views/public/public_index.py deleted file mode 100644 index ebe74fe..0000000 --- a/src/application/views/public/public_index.py +++ /dev/null @@ -1,11 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - -from flask import redirect, url_for - - -class PublicIndex(View): - - def dispatch_request(self): - return redirect(url_for('static', filename='sweBootstrap/home.html')) diff --git a/src/application/views/public/public_say_hello.py b/src/application/views/public/public_say_hello.py deleted file mode 100644 index 87e6250..0000000 --- a/src/application/views/public/public_say_hello.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - - -class PublicSayHello(View): - - def dispatch_request(self, username): - return "Hello {}".format(username) diff --git a/src/application/views/public/public_warmup.py b/src/application/views/public/public_warmup.py deleted file mode 100644 index f3795f5..0000000 --- a/src/application/views/public/public_warmup.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- - -from flask.views import View - - -class PublicWarmup(View): - - def dispatch_request(self): - return "I'm ready" diff --git a/src/apptest.py b/src/apptest.py deleted file mode 100644 index edfca25..0000000 --- a/src/apptest.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/python -import os -import sys -import unittest2 -import warnings -# silences Python's complaints about imports -warnings.filterwarnings('ignore', category=UserWarning) - -USAGE = """ -Path to your sdk must be the first argument. To run type: - -$ apptest.py path/to/your/appengine/installation - -Remember to set environment variable FLASK_CONF to TEST. -Loading configuration depending on the value of -environment variable allows you to add your own -testing configuration in src/application/settings.py - -""" - - -def main(sdk_path, test_path): - sys.path.insert(0, sdk_path) - import dev_appserver - dev_appserver.fix_sys_path() - sys.path.insert(1, os.path.join(os.path.abspath('.'), 'lib')) - sys.path.insert(1, os.path.join(os.path.abspath('.'), 'application')) - suite = unittest2.loader.TestLoader().discover(test_path) - unittest2.TextTestRunner(verbosity=2).run(suite) - - -if __name__ == '__main__': - #See: http://code.google.com/appengine/docs/python/tools/localunittesting.html - try: - #Path to the SDK installation - SDK_PATH = sys.argv[1] # ...or hardcoded path - #Path to tests folder - TEST_PATH = os.path.join(os.path.dirname(os.path.abspath(__name__)),'tests') - main(SDK_PATH, TEST_PATH) - except IndexError: - # you probably forgot about path as first argument - print USAGE \ No newline at end of file diff --git a/src/index.yaml b/src/index.yaml deleted file mode 100644 index a3b9e05..0000000 --- a/src/index.yaml +++ /dev/null @@ -1,11 +0,0 @@ -indexes: - -# AUTOGENERATED - -# This index.yaml is automatically updated whenever the dev_appserver -# detects that a new type of query is run. If you want to manage the -# index.yaml file manually, remove the above marker line (the line -# saying "# AUTOGENERATED"). If you want to manage some indexes -# manually, move them above the marker line. The index.yaml file is -# automatically uploaded to the admin console when you next deploy -# your application using appcfg.py. diff --git a/src/lib/__init__.py b/src/lib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/lib/blinker/__init__.py b/src/lib/blinker/__init__.py deleted file mode 100644 index 79d8645..0000000 --- a/src/lib/blinker/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from blinker.base import ( - ANY, - NamedSignal, - Namespace, - Signal, - receiver_connected, - signal, -) - -__all__ = [ - 'ANY', - 'NamedSignal', - 'Namespace', - 'Signal', - 'receiver_connected', - 'signal', - ] - - -__version__ = '1.1' diff --git a/src/lib/blinker/_saferef.py b/src/lib/blinker/_saferef.py deleted file mode 100644 index 269e362..0000000 --- a/src/lib/blinker/_saferef.py +++ /dev/null @@ -1,234 +0,0 @@ -# extracted from Louie, http://pylouie.org/ -# updated for Python 3 -# -# Copyright (c) 2006 Patrick K. O'Brien, Mike C. Fletcher, -# Matthew R. Scott -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# * Neither the name of the nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -"""Refactored 'safe reference from dispatcher.py""" - -import operator -import sys -import traceback -import weakref - - -try: - callable -except NameError: - def callable(object): - return hasattr(object, '__call__') - - -if sys.version_info < (3,): - get_self = operator.attrgetter('im_self') - get_func = operator.attrgetter('im_func') -else: - get_self = operator.attrgetter('__self__') - get_func = operator.attrgetter('__func__') - - -def safe_ref(target, on_delete=None): - """Return a *safe* weak reference to a callable target. - - - ``target``: The object to be weakly referenced, if it's a bound - method reference, will create a BoundMethodWeakref, otherwise - creates a simple weakref. - - - ``on_delete``: If provided, will have a hard reference stored to - the callable to be called after the safe reference goes out of - scope with the reference object, (either a weakref or a - BoundMethodWeakref) as argument. - """ - try: - im_self = get_self(target) - except AttributeError: - if callable(on_delete): - return weakref.ref(target, on_delete) - else: - return weakref.ref(target) - else: - if im_self is not None: - # Turn a bound method into a BoundMethodWeakref instance. - # Keep track of these instances for lookup by disconnect(). - assert hasattr(target, 'im_func') or hasattr(target, '__func__'), ( - "safe_ref target %r has im_self, but no im_func, " - "don't know how to create reference" % target) - reference = BoundMethodWeakref(target=target, on_delete=on_delete) - return reference - - -class BoundMethodWeakref(object): - """'Safe' and reusable weak references to instance methods. - - BoundMethodWeakref objects provide a mechanism for referencing a - bound method without requiring that the method object itself - (which is normally a transient object) is kept alive. Instead, - the BoundMethodWeakref object keeps weak references to both the - object and the function which together define the instance method. - - Attributes: - - - ``key``: The identity key for the reference, calculated by the - class's calculate_key method applied to the target instance method. - - - ``deletion_methods``: Sequence of callable objects taking single - argument, a reference to this object which will be called when - *either* the target object or target function is garbage - collected (i.e. when this object becomes invalid). These are - specified as the on_delete parameters of safe_ref calls. - - - ``weak_self``: Weak reference to the target object. - - - ``weak_func``: Weak reference to the target function. - - Class Attributes: - - - ``_all_instances``: Class attribute pointing to all live - BoundMethodWeakref objects indexed by the class's - calculate_key(target) method applied to the target objects. - This weak value dictionary is used to short-circuit creation so - that multiple references to the same (object, function) pair - produce the same BoundMethodWeakref instance. - """ - - _all_instances = weakref.WeakValueDictionary() - - def __new__(cls, target, on_delete=None, *arguments, **named): - """Create new instance or return current instance. - - Basically this method of construction allows us to - short-circuit creation of references to already- referenced - instance methods. The key corresponding to the target is - calculated, and if there is already an existing reference, - that is returned, with its deletion_methods attribute updated. - Otherwise the new instance is created and registered in the - table of already-referenced methods. - """ - key = cls.calculate_key(target) - current = cls._all_instances.get(key) - if current is not None: - current.deletion_methods.append(on_delete) - return current - else: - base = super(BoundMethodWeakref, cls).__new__(cls) - cls._all_instances[key] = base - base.__init__(target, on_delete, *arguments, **named) - return base - - def __init__(self, target, on_delete=None): - """Return a weak-reference-like instance for a bound method. - - - ``target``: The instance-method target for the weak reference, - must have im_self and im_func attributes and be - reconstructable via the following, which is true of built-in - instance methods:: - - target.im_func.__get__( target.im_self ) - - - ``on_delete``: Optional callback which will be called when - this weak reference ceases to be valid (i.e. either the - object or the function is garbage collected). Should take a - single argument, which will be passed a pointer to this - object. - """ - def remove(weak, self=self): - """Set self.isDead to True when method or instance is destroyed.""" - methods = self.deletion_methods[:] - del self.deletion_methods[:] - try: - del self.__class__._all_instances[self.key] - except KeyError: - pass - for function in methods: - try: - if callable(function): - function(self) - except Exception: - try: - traceback.print_exc() - except AttributeError: - e = sys.exc_info()[1] - print ('Exception during saferef %s ' - 'cleanup function %s: %s' % (self, function, e)) - self.deletion_methods = [on_delete] - self.key = self.calculate_key(target) - im_self = get_self(target) - im_func = get_func(target) - self.weak_self = weakref.ref(im_self, remove) - self.weak_func = weakref.ref(im_func, remove) - self.self_name = str(im_self) - self.func_name = str(im_func.__name__) - - def calculate_key(cls, target): - """Calculate the reference key for this reference. - - Currently this is a two-tuple of the id()'s of the target - object and the target function respectively. - """ - return (id(get_self(target)), id(get_func(target))) - calculate_key = classmethod(calculate_key) - - def __str__(self): - """Give a friendly representation of the object.""" - return "%s(%s.%s)" % ( - self.__class__.__name__, - self.self_name, - self.func_name, - ) - - __repr__ = __str__ - - def __nonzero__(self): - """Whether we are still a valid reference.""" - return self() is not None - - def __cmp__(self, other): - """Compare with another reference.""" - if not isinstance(other, self.__class__): - return cmp(self.__class__, type(other)) - return cmp(self.key, other.key) - - def __call__(self): - """Return a strong reference to the bound method. - - If the target cannot be retrieved, then will return None, - otherwise returns a bound instance method for our object and - function. - - Note: You may call this method any number of times, as it does - not invalidate the reference. - """ - target = self.weak_self() - if target is not None: - function = self.weak_func() - if function is not None: - return function.__get__(target) - return None diff --git a/src/lib/blinker/_utilities.py b/src/lib/blinker/_utilities.py deleted file mode 100644 index 9e9e037..0000000 --- a/src/lib/blinker/_utilities.py +++ /dev/null @@ -1,138 +0,0 @@ -from weakref import ref - -from blinker._saferef import BoundMethodWeakref - - -try: - callable -except NameError: - def callable(object): - return hasattr(object, '__call__') - - -try: - from collections import defaultdict -except: - class defaultdict(dict): - - def __init__(self, default_factory=None, *a, **kw): - if (default_factory is not None and - not hasattr(default_factory, '__call__')): - raise TypeError('first argument must be callable') - dict.__init__(self, *a, **kw) - self.default_factory = default_factory - - def __getitem__(self, key): - try: - return dict.__getitem__(self, key) - except KeyError: - return self.__missing__(key) - - def __missing__(self, key): - if self.default_factory is None: - raise KeyError(key) - self[key] = value = self.default_factory() - return value - - def __reduce__(self): - if self.default_factory is None: - args = tuple() - else: - args = self.default_factory, - return type(self), args, None, None, self.items() - - def copy(self): - return self.__copy__() - - def __copy__(self): - return type(self)(self.default_factory, self) - - def __deepcopy__(self, memo): - import copy - return type(self)(self.default_factory, - copy.deepcopy(self.items())) - - def __repr__(self): - return 'defaultdict(%s, %s)' % (self.default_factory, - dict.__repr__(self)) - - -try: - from contextlib import contextmanager -except ImportError: - def contextmanager(fn): - def oops(*args, **kw): - raise RuntimeError("Python 2.5 or above is required to use " - "context managers.") - oops.__name__ = fn.__name__ - return oops - -class _symbol(object): - - def __init__(self, name): - """Construct a new named symbol.""" - self.__name__ = self.name = name - - def __reduce__(self): - return symbol, (self.name,) - - def __repr__(self): - return self.name -_symbol.__name__ = 'symbol' - - -class symbol(object): - """A constant symbol. - - >>> symbol('foo') is symbol('foo') - True - >>> symbol('foo') - foo - - A slight refinement of the MAGICCOOKIE=object() pattern. The primary - advantage of symbol() is its repr(). They are also singletons. - - Repeated calls of symbol('name') will all return the same instance. - - """ - symbols = {} - - def __new__(cls, name): - try: - return cls.symbols[name] - except KeyError: - return cls.symbols.setdefault(name, _symbol(name)) - - -def hashable_identity(obj): - if hasattr(obj, 'im_func'): - return (id(obj.im_func), id(obj.im_self)) - else: - return id(obj) - - -WeakTypes = (ref, BoundMethodWeakref) - - -class annotatable_weakref(ref): - """A weakref.ref that supports custom instance attributes.""" - - -def reference(object, callback=None, **annotations): - """Return an annotated weak ref.""" - if callable(object): - weak = callable_reference(object, callback) - else: - weak = annotatable_weakref(object, callback) - for key, value in annotations.items(): - setattr(weak, key, value) - return weak - - -def callable_reference(object, callback=None): - """Return an annotated weak ref, supporting bound instance methods.""" - if hasattr(object, 'im_self') and object.im_self is not None: - return BoundMethodWeakref(target=object, on_delete=callback) - elif hasattr(object, '__self__') and object.__self__ is not None: - return BoundMethodWeakref(target=object, on_delete=callback) - return annotatable_weakref(object, callback) diff --git a/src/lib/blinker/base.py b/src/lib/blinker/base.py deleted file mode 100644 index d2a9eff..0000000 --- a/src/lib/blinker/base.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*- coding: utf-8; fill-column: 76 -*- -"""Signals and events. - -A small implementation of signals, inspired by a snippet of Django signal -API client code seen in a blog post. Signals are first-class objects and -each manages its own receivers and message emission. - -The :func:`signal` function provides singleton behavior for named signals. - -""" -from warnings import warn -from weakref import WeakValueDictionary - -from blinker._utilities import ( - WeakTypes, - contextmanager, - defaultdict, - hashable_identity, - reference, - symbol, - ) - - -ANY = symbol('ANY') -ANY.__doc__ = 'Token for "any sender".' -ANY_ID = 0 - - -class Signal(object): - """A notification emitter.""" - - #: An :obj:`ANY` convenience synonym, allows ``Signal.ANY`` - #: without an additional import. - ANY = ANY - - def __init__(self, doc=None): - """ - :param doc: optional. If provided, will be assigned to the signal's - __doc__ attribute. - - """ - if doc: - self.__doc__ = doc - #: A mapping of connected receivers. - #: - #: The values of this mapping are not meaningful outside of the - #: internal :class:`Signal` implementation, however the boolean value - #: of the mapping is useful as an extremely efficient check to see if - #: any receivers are connected to the signal. - self.receivers = {} - self._by_receiver = defaultdict(set) - self._by_sender = defaultdict(set) - self._weak_senders = {} - - def connect(self, receiver, sender=ANY, weak=True): - """Connect *receiver* to signal events sent by *sender*. - - :param receiver: A callable. Will be invoked by :meth:`send` with - `sender=` as a single positional argument and any \*\*kwargs that - were provided to a call to :meth:`send`. - - :param sender: Any object or :obj:`ANY`, defaults to ``ANY``. - Restricts notifications delivered to *receiver* to only those - :meth:`send` emissions sent by *sender*. If ``ANY``, the receiver - will always be notified. A *receiver* may be connected to - multiple *sender* values on the same Signal through multiple calls - to :meth:`connect`. - - :param weak: If true, the Signal will hold a weakref to *receiver* - and automatically disconnect when *receiver* goes out of scope or - is garbage collected. Defaults to True. - - """ - receiver_id = hashable_identity(receiver) - if weak: - receiver_ref = reference(receiver, self._cleanup_receiver) - receiver_ref.receiver_id = receiver_id - else: - receiver_ref = receiver - if sender is ANY: - sender_id = ANY_ID - else: - sender_id = hashable_identity(sender) - - self.receivers.setdefault(receiver_id, receiver_ref) - self._by_sender[sender_id].add(receiver_id) - self._by_receiver[receiver_id].add(sender_id) - del receiver_ref - - if sender is not ANY and sender_id not in self._weak_senders: - # wire together a cleanup for weakref-able senders - try: - sender_ref = reference(sender, self._cleanup_sender) - sender_ref.sender_id = sender_id - except TypeError: - pass - else: - self._weak_senders.setdefault(sender_id, sender_ref) - del sender_ref - - # broadcast this connection. if receivers raise, disconnect. - if receiver_connected.receivers and self is not receiver_connected: - try: - receiver_connected.send(self, - receiver_arg=receiver, - sender_arg=sender, - weak_arg=weak) - except: - self.disconnect(receiver, sender) - raise - return receiver - - def connect_via(self, sender, weak=False): - """Connect the decorated function as a receiver for *sender*. - - :param sender: Any object or :obj:`ANY`. The decorated function - will only receive :meth:`send` emissions sent by *sender*. If - ``ANY``, the receiver will always be notified. A function may be - decorated multiple times with differing *sender* values. - - :param weak: If true, the Signal will hold a weakref to the - decorated function and automatically disconnect when *receiver* - goes out of scope or is garbage collected. Unlike - :meth:`connect`, this defaults to False. - - The decorated function will be invoked by :meth:`send` with - `sender=` as a single positional argument and any \*\*kwargs that - were provided to the call to :meth:`send`. - - - .. versionadded:: 1.1 - - """ - def decorator(fn): - self.connect(fn, sender, weak) - return fn - return decorator - - @contextmanager - def connected_to(self, receiver, sender=ANY): - """Execute a block with the signal temporarily connected to *receiver*. - - :param receiver: a receiver callable - :param sender: optional, a sender to filter on - - This is a context manager for use in the ``with`` statement. It can - be useful in unit tests. *receiver* is connected to the signal for - the duration of the ``with`` block, and will be disconnected - automatically when exiting the block: - - .. testsetup:: - - from __future__ import with_statement - from blinker import Signal - on_ready = Signal() - receiver = lambda sender: None - - .. testcode:: - - with on_ready.connected_to(receiver): - # do stuff - on_ready.send(123) - - .. versionadded:: 1.1 - - """ - self.connect(receiver, sender=sender, weak=False) - try: - yield None - except: - self.disconnect(receiver) - raise - else: - self.disconnect(receiver) - - def temporarily_connected_to(self, receiver, sender=ANY): - """An alias for :meth:`connected_to`. - - :param receiver: a receiver callable - :param sender: optional, a sender to filter on - - .. versionadded:: 0.9 - - .. versionchanged:: 1.1 - Renamed to :meth:`connected_to`. ``temporarily_connected_to`` - will be deprecated in 1.2 and removed in a subsequent version. - - """ - warn("temporarily_connected_to is deprecated; " - "use connected_to instead.", - PendingDeprecationWarning) - return self.connected_to(receiver, sender) - - def send(self, *sender, **kwargs): - """Emit this signal on behalf of *sender*, passing on \*\*kwargs. - - Returns a list of 2-tuples, pairing receivers with their return - value. The ordering of receiver notification is undefined. - - :param \*sender: Any object or ``None``. If omitted, synonymous - with ``None``. Only accepts one positional argument. - - :param \*\*kwargs: Data to be sent to receivers. - - """ - # Using '*sender' rather than 'sender=None' allows 'sender' to be - # used as a keyword argument- i.e. it's an invisible name in the - # function signature. - if len(sender) == 0: - sender = None - elif len(sender) > 1: - raise TypeError('send() accepts only one positional argument, ' - '%s given' % len(sender)) - else: - sender = sender[0] - if not self.receivers: - return [] - else: - return [(receiver, receiver(sender, **kwargs)) - for receiver in self.receivers_for(sender)] - - def has_receivers_for(self, sender): - """True if there is probably a receiver for *sender*. - - Performs an optimistic check only. Does not guarantee that all - weakly referenced receivers are still alive. See - :meth:`receivers_for` for a stronger search. - - """ - if not self.receivers: - return False - if self._by_sender[ANY_ID]: - return True - if sender is ANY: - return False - return hashable_identity(sender) in self._by_sender - - def receivers_for(self, sender): - """Iterate all live receivers listening for *sender*.""" - # TODO: test receivers_for(ANY) - if self.receivers: - sender_id = hashable_identity(sender) - if sender_id in self._by_sender: - ids = (self._by_sender[ANY_ID] | - self._by_sender[sender_id]) - else: - ids = self._by_sender[ANY_ID].copy() - for receiver_id in ids: - receiver = self.receivers.get(receiver_id) - if receiver is None: - continue - if isinstance(receiver, WeakTypes): - strong = receiver() - if strong is None: - self._disconnect(receiver_id, ANY_ID) - continue - receiver = strong - yield receiver - - def disconnect(self, receiver, sender=ANY): - """Disconnect *receiver* from this signal's events. - - :param receiver: a previously :meth:`connected` callable - - :param sender: a specific sender to disconnect from, or :obj:`ANY` - to disconnect from all senders. Defaults to ``ANY``. - - """ - if sender is ANY: - sender_id = ANY_ID - else: - sender_id = hashable_identity(sender) - receiver_id = hashable_identity(receiver) - self._disconnect(receiver_id, sender_id) - - def _disconnect(self, receiver_id, sender_id): - if sender_id == ANY_ID: - if self._by_receiver.pop(receiver_id, False): - for bucket in self._by_sender.values(): - bucket.discard(receiver_id) - self.receivers.pop(receiver_id, None) - else: - self._by_sender[sender_id].discard(receiver_id) - - def _cleanup_receiver(self, receiver_ref): - """Disconnect a receiver from all senders.""" - self._disconnect(receiver_ref.receiver_id, ANY_ID) - - def _cleanup_sender(self, sender_ref): - """Disconnect all receivers from a sender.""" - sender_id = sender_ref.sender_id - assert sender_id != ANY_ID - self._weak_senders.pop(sender_id, None) - for receiver_id in self._by_sender.pop(sender_id, ()): - self._by_receiver[receiver_id].discard(sender_id) - - def _clear_state(self): - """Throw away all signal state. Useful for unit tests.""" - self._weak_senders.clear() - self.receivers.clear() - self._by_sender.clear() - self._by_receiver.clear() - - -receiver_connected = Signal("""\ -Sent by a :class:`Signal` after a receiver connects. - -:argument: the Signal that was connected to -:keyword receiver_arg: the connected receiver -:keyword sender_arg: the sender to connect to -:keyword weak_arg: true if the connection to receiver_arg is a weak reference - -""") - - -class NamedSignal(Signal): - """A named generic notification emitter.""" - - def __init__(self, name, doc=None): - Signal.__init__(self, doc) - - #: The name of this signal. - self.name = name - - def __repr__(self): - base = Signal.__repr__(self) - return "%s; %r>" % (base[:-1], self.name) - - -class Namespace(WeakValueDictionary): - """A mapping of signal names to signals.""" - - def signal(self, name, doc=None): - """Return the :class:`NamedSignal` *name*, creating it if required. - - Repeated calls to this function will return the same signal object. - - """ - try: - return self[name] - except KeyError: - return self.setdefault(name, NamedSignal(name, doc)) - - -signal = Namespace().signal diff --git a/src/lib/flask/__init__.py b/src/lib/flask/__init__.py deleted file mode 100644 index 05a2744..0000000 --- a/src/lib/flask/__init__.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask - ~~~~~ - - A microframework based on Werkzeug. It's extensively documented - and follows best practice patterns. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -__version__ = '0.10' - -# utilities we import from Werkzeug and Jinja2 that are unused -# in the module but are exported as public interface. -from werkzeug.exceptions import abort -from werkzeug.utils import redirect -from jinja2 import Markup, escape - -from .app import Flask, Request, Response -from .config import Config -from .helpers import url_for, flash, send_file, send_from_directory, \ - get_flashed_messages, get_template_attribute, make_response, safe_join, \ - stream_with_context -from .globals import current_app, g, request, session, _request_ctx_stack, \ - _app_ctx_stack -from .ctx import has_request_context, has_app_context, \ - after_this_request, copy_current_request_context -from .module import Module -from .blueprints import Blueprint -from .templating import render_template, render_template_string - -# the signals -from .signals import signals_available, template_rendered, request_started, \ - request_finished, got_request_exception, request_tearing_down, \ - appcontext_tearing_down, appcontext_pushed, \ - appcontext_popped, message_flashed - -# We're not exposing the actual json module but a convenient wrapper around -# it. -from . import json - -# This was the only thing that flask used to export at one point and it had -# a more generic name. -jsonify = json.jsonify - -# backwards compat, goes away in 1.0 -from .sessions import SecureCookieSession as Session -json_available = True diff --git a/src/lib/flask/_compat.py b/src/lib/flask/_compat.py deleted file mode 100644 index c342884..0000000 --- a/src/lib/flask/_compat.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask._compat - ~~~~~~~~~~~~~ - - Some py2/py3 compatibility support based on a stripped down - version of six so we don't have to depend on a specific version - of it. - - :copyright: (c) 2013 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -import sys - -PY2 = sys.version_info[0] == 2 -_identity = lambda x: x - - -if not PY2: - text_type = str - string_types = (str,) - integer_types = (int, ) - - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - from io import StringIO - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - implements_to_string = _identity - -else: - text_type = unicode - string_types = (str, unicode) - integer_types = (int, long) - - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - from cStringIO import StringIO - - exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') - - def implements_to_string(cls): - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode('utf-8') - return cls - - -def with_metaclass(meta, *bases): - # This requires a bit of explanation: the basic idea is to make a - # dummy metaclass for one level of class instantiation that replaces - # itself with the actual metaclass. Because of internal type checks - # we also need to make sure that we downgrade the custom metaclass - # for one level to something closer to type (that's why __call__ and - # __init__ comes back from type etc.). - # - # This has the advantage over six.with_metaclass in that it does not - # introduce dummy classes into the final MRO. - class metaclass(meta): - __call__ = type.__call__ - __init__ = type.__init__ - def __new__(cls, name, this_bases, d): - if this_bases is None: - return type.__new__(cls, name, (), d) - return meta(name, bases, d) - return metaclass('temporary_class', None, {}) diff --git a/src/lib/flask/blueprints.py b/src/lib/flask/blueprints.py deleted file mode 100644 index 4575ec9..0000000 --- a/src/lib/flask/blueprints.py +++ /dev/null @@ -1,401 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.blueprints - ~~~~~~~~~~~~~~~~ - - Blueprints are the recommended way to implement larger or more - pluggable applications in Flask 0.7 and later. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -from functools import update_wrapper - -from .helpers import _PackageBoundObject, _endpoint_from_view_func - - -class BlueprintSetupState(object): - """Temporary holder object for registering a blueprint with the - application. An instance of this class is created by the - :meth:`~flask.Blueprint.make_setup_state` method and later passed - to all register callback functions. - """ - - def __init__(self, blueprint, app, options, first_registration): - #: a reference to the current application - self.app = app - - #: a reference to the blueprint that created this setup state. - self.blueprint = blueprint - - #: a dictionary with all options that were passed to the - #: :meth:`~flask.Flask.register_blueprint` method. - self.options = options - - #: as blueprints can be registered multiple times with the - #: application and not everything wants to be registered - #: multiple times on it, this attribute can be used to figure - #: out if the blueprint was registered in the past already. - self.first_registration = first_registration - - subdomain = self.options.get('subdomain') - if subdomain is None: - subdomain = self.blueprint.subdomain - - #: The subdomain that the blueprint should be active for, `None` - #: otherwise. - self.subdomain = subdomain - - url_prefix = self.options.get('url_prefix') - if url_prefix is None: - url_prefix = self.blueprint.url_prefix - - #: The prefix that should be used for all URLs defined on the - #: blueprint. - self.url_prefix = url_prefix - - #: A dictionary with URL defaults that is added to each and every - #: URL that was defined with the blueprint. - self.url_defaults = dict(self.blueprint.url_values_defaults) - self.url_defaults.update(self.options.get('url_defaults', ())) - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """A helper method to register a rule (and optionally a view function) - to the application. The endpoint is automatically prefixed with the - blueprint's name. - """ - if self.url_prefix: - rule = self.url_prefix + rule - options.setdefault('subdomain', self.subdomain) - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - defaults = self.url_defaults - if 'defaults' in options: - defaults = dict(defaults, **options.pop('defaults')) - self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), - view_func, defaults=defaults, **options) - - -class Blueprint(_PackageBoundObject): - """Represents a blueprint. A blueprint is an object that records - functions that will be called with the - :class:`~flask.blueprint.BlueprintSetupState` later to register functions - or other things on the main application. See :ref:`blueprints` for more - information. - - .. versionadded:: 0.7 - """ - - warn_on_modifications = False - _got_registered_once = False - - def __init__(self, name, import_name, static_folder=None, - static_url_path=None, template_folder=None, - url_prefix=None, subdomain=None, url_defaults=None): - _PackageBoundObject.__init__(self, import_name, template_folder) - self.name = name - self.url_prefix = url_prefix - self.subdomain = subdomain - self.static_folder = static_folder - self.static_url_path = static_url_path - self.deferred_functions = [] - self.view_functions = {} - if url_defaults is None: - url_defaults = {} - self.url_values_defaults = url_defaults - - def record(self, func): - """Registers a function that is called when the blueprint is - registered on the application. This function is called with the - state as argument as returned by the :meth:`make_setup_state` - method. - """ - if self._got_registered_once and self.warn_on_modifications: - from warnings import warn - warn(Warning('The blueprint was already registered once ' - 'but is getting modified now. These changes ' - 'will not show up.')) - self.deferred_functions.append(func) - - def record_once(self, func): - """Works like :meth:`record` but wraps the function in another - function that will ensure the function is only called once. If the - blueprint is registered a second time on the application, the - function passed is not called. - """ - def wrapper(state): - if state.first_registration: - func(state) - return self.record(update_wrapper(wrapper, func)) - - def make_setup_state(self, app, options, first_registration=False): - """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` - object that is later passed to the register callback functions. - Subclasses can override this to return a subclass of the setup state. - """ - return BlueprintSetupState(self, app, options, first_registration) - - def register(self, app, options, first_registration=False): - """Called by :meth:`Flask.register_blueprint` to register a blueprint - on the application. This can be overridden to customize the register - behavior. Keyword arguments from - :func:`~flask.Flask.register_blueprint` are directly forwarded to this - method in the `options` dictionary. - """ - self._got_registered_once = True - state = self.make_setup_state(app, options, first_registration) - if self.has_static_folder: - state.add_url_rule(self.static_url_path + '/', - view_func=self.send_static_file, - endpoint='static') - - for deferred in self.deferred_functions: - deferred(state) - - def route(self, rule, **options): - """Like :meth:`Flask.route` but for a blueprint. The endpoint for the - :func:`url_for` function is prefixed with the name of the blueprint. - """ - def decorator(f): - endpoint = options.pop("endpoint", f.__name__) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for - the :func:`url_for` function is prefixed with the name of the blueprint. - """ - if endpoint: - assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's" - self.record(lambda s: - s.add_url_rule(rule, endpoint, view_func, **options)) - - def endpoint(self, endpoint): - """Like :meth:`Flask.endpoint` but for a blueprint. This does not - prefix the endpoint with the blueprint name, this has to be done - explicitly by the user of this method. If the endpoint is prefixed - with a `.` it will be registered to the current blueprint, otherwise - it's an application independent endpoint. - """ - def decorator(f): - def register_endpoint(state): - state.app.view_functions[endpoint] = f - self.record_once(register_endpoint) - return f - return decorator - - def app_template_filter(self, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.template_filter` but for a blueprint. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_filter(f, name=name) - return f - return decorator - - def add_app_template_filter(self, f, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.add_template_filter` but for a blueprint. Works exactly - like the :meth:`app_template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.filters[name or f.__name__] = f - self.record_once(register_template) - - def app_template_test(self, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.template_test` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_test(f, name=name) - return f - return decorator - - def add_app_template_test(self, f, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.add_template_test` but for a blueprint. Works exactly - like the :meth:`app_template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.tests[name or f.__name__] = f - self.record_once(register_template) - - def app_template_global(self, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.template_global` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_global(f, name=name) - return f - return decorator - - def add_app_template_global(self, f, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.add_template_global` but for a blueprint. Works exactly - like the :meth:`app_template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.globals[name or f.__name__] = f - self.record_once(register_template) - - def before_request(self, f): - """Like :meth:`Flask.before_request` but for a blueprint. This function - is only executed before each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def before_app_request(self, f): - """Like :meth:`Flask.before_request`. Such a function is executed - before each request, even if outside of a blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(None, []).append(f)) - return f - - def before_app_first_request(self, f): - """Like :meth:`Flask.before_first_request`. Such a function is - executed before the first request to the application. - """ - self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) - return f - - def after_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. This function - is only executed after each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def after_app_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. Such a function - is executed after each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(None, []).append(f)) - return f - - def teardown_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. This - function is only executed when tearing down requests handled by a - function of that blueprint. Teardown request functions are executed - when the request context is popped, even when no actual request was - performed. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def teardown_app_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. Such a - function is executed when tearing down each request, even if outside of - the blueprint. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(None, []).append(f)) - return f - - def context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. This - function is only executed for requests handled by a blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(self.name, []).append(f)) - return f - - def app_context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. Such a - function is executed each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(None, []).append(f)) - return f - - def app_errorhandler(self, code): - """Like :meth:`Flask.errorhandler` but for a blueprint. This - handler is used for all requests, even if outside of the blueprint. - """ - def decorator(f): - self.record_once(lambda s: s.app.errorhandler(code)(f)) - return f - return decorator - - def url_value_preprocessor(self, f): - """Registers a function as URL value preprocessor for this - blueprint. It's called before the view functions are called and - can modify the url values provided. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(self.name, []).append(f)) - return f - - def url_defaults(self, f): - """Callback function for URL defaults for this blueprint. It's called - with the endpoint and values and should update the values passed - in place. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(self.name, []).append(f)) - return f - - def app_url_value_preprocessor(self, f): - """Same as :meth:`url_value_preprocessor` but application wide. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(None, []).append(f)) - return f - - def app_url_defaults(self, f): - """Same as :meth:`url_defaults` but application wide. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(None, []).append(f)) - return f - - def errorhandler(self, code_or_exception): - """Registers an error handler that becomes active for this blueprint - only. Please be aware that routing does not happen local to a - blueprint so an error handler for 404 usually is not handled by - a blueprint unless it is caused inside a view function. Another - special case is the 500 internal server error which is always looked - up from the application. - - Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator - of the :class:`~flask.Flask` object. - """ - def decorator(f): - self.record_once(lambda s: s.app._register_error_handler( - self.name, code_or_exception, f)) - return f - return decorator diff --git a/src/lib/flask/config.py b/src/lib/flask/config.py deleted file mode 100644 index 155afa2..0000000 --- a/src/lib/flask/config.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.config - ~~~~~~~~~~~~ - - Implements the configuration related objects. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -import imp -import os -import errno - -from werkzeug.utils import import_string -from ._compat import string_types - - -class ConfigAttribute(object): - """Makes an attribute forward to the config""" - - def __init__(self, name, get_converter=None): - self.__name__ = name - self.get_converter = get_converter - - def __get__(self, obj, type=None): - if obj is None: - return self - rv = obj.config[self.__name__] - if self.get_converter is not None: - rv = self.get_converter(rv) - return rv - - def __set__(self, obj, value): - obj.config[self.__name__] = value - - -class Config(dict): - """Works exactly like a dict but provides ways to fill it from files - or special dictionaries. There are two common patterns to populate the - config. - - Either you can fill the config from a config file:: - - app.config.from_pyfile('yourconfig.cfg') - - Or alternatively you can define the configuration options in the - module that calls :meth:`from_object` or provide an import path to - a module that should be loaded. It is also possible to tell it to - use the same module and with that provide the configuration values - just before the call:: - - DEBUG = True - SECRET_KEY = 'development key' - app.config.from_object(__name__) - - In both cases (loading from any Python file or loading from modules), - only uppercase keys are added to the config. This makes it possible to use - lowercase values in the config file for temporary values that are not added - to the config or to define the config keys in the same file that implements - the application. - - Probably the most interesting way to load configurations is from an - environment variable pointing to a file:: - - app.config.from_envvar('YOURAPPLICATION_SETTINGS') - - In this case before launching the application you have to set this - environment variable to the file you want to use. On Linux and OS X - use the export statement:: - - export YOURAPPLICATION_SETTINGS='/path/to/config/file' - - On windows use `set` instead. - - :param root_path: path to which files are read relative from. When the - config object is created by the application, this is - the application's :attr:`~flask.Flask.root_path`. - :param defaults: an optional dictionary of default values - """ - - def __init__(self, root_path, defaults=None): - dict.__init__(self, defaults or {}) - self.root_path = root_path - - def from_envvar(self, variable_name, silent=False): - """Loads a configuration from an environment variable pointing to - a configuration file. This is basically just a shortcut with nicer - error messages for this line of code:: - - app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) - - :param variable_name: name of the environment variable - :param silent: set to `True` if you want silent failure for missing - files. - :return: bool. `True` if able to load config, `False` otherwise. - """ - rv = os.environ.get(variable_name) - if not rv: - if silent: - return False - raise RuntimeError('The environment variable %r is not set ' - 'and as such configuration could not be ' - 'loaded. Set this variable and make it ' - 'point to a configuration file' % - variable_name) - return self.from_pyfile(rv, silent=silent) - - def from_pyfile(self, filename, silent=False): - """Updates the values in the config from a Python file. This function - behaves as if the file was imported as module with the - :meth:`from_object` function. - - :param filename: the filename of the config. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to `True` if you want silent failure for missing - files. - - .. versionadded:: 0.7 - `silent` parameter. - """ - filename = os.path.join(self.root_path, filename) - d = imp.new_module('config') - d.__file__ = filename - try: - with open(filename) as config_file: - exec(compile(config_file.read(), filename, 'exec'), d.__dict__) - except IOError as e: - if silent and e.errno in (errno.ENOENT, errno.EISDIR): - return False - e.strerror = 'Unable to load configuration file (%s)' % e.strerror - raise - self.from_object(d) - return True - - def from_object(self, obj): - """Updates the values from the given object. An object can be of one - of the following two types: - - - a string: in this case the object with that name will be imported - - an actual object reference: that object is used directly - - Objects are usually either modules or classes. - - Just the uppercase variables in that object are stored in the config. - Example usage:: - - app.config.from_object('yourapplication.default_config') - from yourapplication import default_config - app.config.from_object(default_config) - - You should not use this function to load the actual configuration but - rather configuration defaults. The actual config should be loaded - with :meth:`from_pyfile` and ideally from a location not within the - package because the package might be installed system wide. - - :param obj: an import name or object - """ - if isinstance(obj, string_types): - obj = import_string(obj) - for key in dir(obj): - if key.isupper(): - self[key] = getattr(obj, key) - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/src/lib/flask/debughelpers.py b/src/lib/flask/debughelpers.py deleted file mode 100644 index 2f8510f..0000000 --- a/src/lib/flask/debughelpers.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.debughelpers - ~~~~~~~~~~~~~~~~~~ - - Various helpers to make the development experience better. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -from ._compat import implements_to_string - - -class UnexpectedUnicodeError(AssertionError, UnicodeError): - """Raised in places where we want some better error reporting for - unexpected unicode or binary data. - """ - - -@implements_to_string -class DebugFilesKeyError(KeyError, AssertionError): - """Raised from request.files during debugging. The idea is that it can - provide a better error message than just a generic KeyError/BadRequest. - """ - - def __init__(self, request, key): - form_matches = request.form.getlist(key) - buf = ['You tried to access the file "%s" in the request.files ' - 'dictionary but it does not exist. The mimetype for the request ' - 'is "%s" instead of "multipart/form-data" which means that no ' - 'file contents were transmitted. To fix this error you should ' - 'provide enctype="multipart/form-data" in your form.' % - (key, request.mimetype)] - if form_matches: - buf.append('\n\nThe browser instead transmitted some file names. ' - 'This was submitted: %s' % ', '.join('"%s"' % x - for x in form_matches)) - self.msg = ''.join(buf) - - def __str__(self): - return self.msg - - -class FormDataRoutingRedirect(AssertionError): - """This exception is raised by Flask in debug mode if it detects a - redirect caused by the routing system when the request method is not - GET, HEAD or OPTIONS. Reasoning: form data will be dropped. - """ - - def __init__(self, request): - exc = request.routing_exception - buf = ['A request was sent to this URL (%s) but a redirect was ' - 'issued automatically by the routing system to "%s".' - % (request.url, exc.new_url)] - - # In case just a slash was appended we can be extra helpful - if request.base_url + '/' == exc.new_url.split('?')[0]: - buf.append(' The URL was defined with a trailing slash so ' - 'Flask will automatically redirect to the URL ' - 'with the trailing slash if it was accessed ' - 'without one.') - - buf.append(' Make sure to directly send your %s-request to this URL ' - 'since we can\'t make browsers or HTTP clients redirect ' - 'with form data reliably or without user interaction.' % - request.method) - buf.append('\n\nNote: this exception is only raised in debug mode') - AssertionError.__init__(self, ''.join(buf).encode('utf-8')) - - -def attach_enctype_error_multidict(request): - """Since Flask 0.8 we're monkeypatching the files object in case a - request is detected that does not use multipart form data but the files - object is accessed. - """ - oldcls = request.files.__class__ - class newcls(oldcls): - def __getitem__(self, key): - try: - return oldcls.__getitem__(self, key) - except KeyError as e: - if key not in request.form: - raise - raise DebugFilesKeyError(request, key) - newcls.__name__ = oldcls.__name__ - newcls.__module__ = oldcls.__module__ - request.files.__class__ = newcls diff --git a/src/lib/flask/exceptions.py b/src/lib/flask/exceptions.py deleted file mode 100644 index 9ccdeda..0000000 --- a/src/lib/flask/exceptions.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.exceptions - ~~~~~~~~~~~~ - - Flask specific additions to :class:`~werkzeug.exceptions.HTTPException` - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -from werkzeug.exceptions import HTTPException, BadRequest -from .helpers import json - - -class JSONHTTPException(HTTPException): - """A base class for HTTP exceptions with ``Content-Type: - application/json``. - - The ``description`` attribute of this class must set to a string (*not* an - HTML string) which describes the error. - - """ - - def get_body(self, environ): - """Overrides :meth:`werkzeug.exceptions.HTTPException.get_body` to - return the description of this error in JSON format instead of HTML. - - """ - return json.dumps(dict(description=self.get_description(environ))) - - def get_headers(self, environ): - """Returns a list of headers including ``Content-Type: - application/json``. - - """ - return [('Content-Type', 'application/json')] - - -class JSONBadRequest(JSONHTTPException, BadRequest): - """Represents an HTTP ``400 Bad Request`` error whose body contains an - error message in JSON format instead of HTML format (as in the superclass). - - """ - - #: The description of the error which occurred as a string. - description = ( - 'The browser (or proxy) sent a request that this server could not ' - 'understand.' - ) diff --git a/src/lib/flask/ext/__init__.py b/src/lib/flask/ext/__init__.py deleted file mode 100644 index f29958a..0000000 --- a/src/lib/flask/ext/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.ext - ~~~~~~~~~ - - Redirect imports for extensions. This module basically makes it possible - for us to transition from flaskext.foo to flask_foo without having to - force all extensions to upgrade at the same time. - - When a user does ``from flask.ext.foo import bar`` it will attempt to - import ``from flask_foo import bar`` first and when that fails it will - try to import ``from flaskext.foo import bar``. - - We're switching from namespace packages because it was just too painful for - everybody involved. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - - -def setup(): - from ..exthook import ExtensionImporter - importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__) - importer.install() - - -setup() -del setup diff --git a/src/lib/flask/globals.py b/src/lib/flask/globals.py deleted file mode 100644 index 67d41f5..0000000 --- a/src/lib/flask/globals.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.globals - ~~~~~~~~~~~~~ - - Defines all the global objects that are proxies to the current - active context. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from functools import partial -from werkzeug.local import LocalStack, LocalProxy - - -def _lookup_req_object(name): - top = _request_ctx_stack.top - if top is None: - raise RuntimeError('working outside of request context') - return getattr(top, name) - - -def _lookup_app_object(name): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError('working outside of application context') - return getattr(top, name) - - -def _find_app(): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError('working outside of application context') - return top.app - - -# context locals -_request_ctx_stack = LocalStack() -_app_ctx_stack = LocalStack() -current_app = LocalProxy(_find_app) -request = LocalProxy(partial(_lookup_req_object, 'request')) -session = LocalProxy(partial(_lookup_req_object, 'session')) -g = LocalProxy(partial(_lookup_app_object, 'g')) diff --git a/src/lib/flask/helpers.py b/src/lib/flask/helpers.py deleted file mode 100644 index 1e7c87f..0000000 --- a/src/lib/flask/helpers.py +++ /dev/null @@ -1,849 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.helpers - ~~~~~~~~~~~~~ - - Implements various helpers. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -import os -import sys -import pkgutil -import posixpath -import mimetypes -from time import time -from zlib import adler32 -from threading import RLock -from werkzeug.routing import BuildError -from functools import update_wrapper - -try: - from werkzeug.urls import url_quote -except ImportError: - from urlparse import quote as url_quote - -from werkzeug.datastructures import Headers -from werkzeug.exceptions import NotFound - -# this was moved in 0.7 -try: - from werkzeug.wsgi import wrap_file -except ImportError: - from werkzeug.utils import wrap_file - -from jinja2 import FileSystemLoader - -from .signals import message_flashed -from .globals import session, _request_ctx_stack, _app_ctx_stack, \ - current_app, request -from ._compat import string_types, text_type - - -# sentinel -_missing = object() - - -# what separators does this operating system provide that are not a slash? -# this is used by the send_from_directory function to ensure that nobody is -# able to access files from outside the filesystem. -_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] - if sep not in (None, '/')) - - -def _endpoint_from_view_func(view_func): - """Internal helper that returns the default endpoint for a given - function. This always is the function name. - """ - assert view_func is not None, 'expected view func if endpoint ' \ - 'is not provided.' - return view_func.__name__ - - -def stream_with_context(generator_or_function): - """Request contexts disappear when the response is started on the server. - This is done for efficiency reasons and to make it less likely to encounter - memory leaks with badly written WSGI middlewares. The downside is that if - you are using streamed responses, the generator cannot access request bound - information any more. - - This function however can help you keep the context around for longer:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - @stream_with_context - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(generate()) - - Alternatively it can also be used around a specific generator:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(stream_with_context(generate())) - - .. versionadded:: 0.9 - """ - try: - gen = iter(generator_or_function) - except TypeError: - def decorator(*args, **kwargs): - gen = generator_or_function() - return stream_with_context(gen) - return update_wrapper(decorator, generator_or_function) - - def generator(): - ctx = _request_ctx_stack.top - if ctx is None: - raise RuntimeError('Attempted to stream with context but ' - 'there was no context in the first place to keep around.') - with ctx: - # Dummy sentinel. Has to be inside the context block or we're - # not actually keeping the context around. - yield None - - # The try/finally is here so that if someone passes a WSGI level - # iterator in we're still running the cleanup logic. Generators - # don't need that because they are closed on their destruction - # automatically. - try: - for item in gen: - yield item - finally: - if hasattr(gen, 'close'): - gen.close() - - # The trick is to start the generator. Then the code execution runs until - # the first dummy None is yielded at which point the context was already - # pushed. This item is discarded. Then when the iteration continues the - # real generator is executed. - wrapped_g = generator() - next(wrapped_g) - return wrapped_g - - -def make_response(*args): - """Sometimes it is necessary to set additional headers in a view. Because - views do not have to return response objects but can return a value that - is converted into a response object by Flask itself, it becomes tricky to - add headers to it. This function can be called instead of using a return - and you will get a response object which you can use to attach headers. - - If view looked like this and you want to add a new header:: - - def index(): - return render_template('index.html', foo=42) - - You can now do something like this:: - - def index(): - response = make_response(render_template('index.html', foo=42)) - response.headers['X-Parachutes'] = 'parachutes are cool' - return response - - This function accepts the very same arguments you can return from a - view function. This for example creates a response with a 404 error - code:: - - response = make_response(render_template('not_found.html'), 404) - - The other use case of this function is to force the return value of a - view function into a response which is helpful with view - decorators:: - - response = make_response(view_function()) - response.headers['X-Parachutes'] = 'parachutes are cool' - - Internally this function does the following things: - - - if no arguments are passed, it creates a new response argument - - if one argument is passed, :meth:`flask.Flask.make_response` - is invoked with it. - - if more than one argument is passed, the arguments are passed - to the :meth:`flask.Flask.make_response` function as tuple. - - .. versionadded:: 0.6 - """ - if not args: - return current_app.response_class() - if len(args) == 1: - args = args[0] - return current_app.make_response(args) - - -def url_for(endpoint, **values): - """Generates a URL to the given endpoint with the method provided. - - Variable arguments that are unknown to the target endpoint are appended - to the generated URL as query arguments. If the value of a query argument - is `None`, the whole pair is skipped. In case blueprints are active - you can shortcut references to the same blueprint by prefixing the - local endpoint with a dot (``.``). - - This will reference the index function local to the current blueprint:: - - url_for('.index') - - For more information, head over to the :ref:`Quickstart `. - - To integrate applications, :class:`Flask` has a hook to intercept URL build - errors through :attr:`Flask.build_error_handler`. The `url_for` function - results in a :exc:`~werkzeug.routing.BuildError` when the current app does - not have a URL for the given endpoint and values. When it does, the - :data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if - it is not `None`, which can return a string to use as the result of - `url_for` (instead of `url_for`'s default to raise the - :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. - An example:: - - def external_url_handler(error, endpoint, **values): - "Looks up an external URL when `url_for` cannot build a URL." - # This is an example of hooking the build_error_handler. - # Here, lookup_url is some utility function you've built - # which looks up the endpoint in some external URL registry. - url = lookup_url(endpoint, **values) - if url is None: - # External lookup did not have a URL. - # Re-raise the BuildError, in context of original traceback. - exc_type, exc_value, tb = sys.exc_info() - if exc_value is error: - raise exc_type, exc_value, tb - else: - raise error - # url_for will use this result, instead of raising BuildError. - return url - - app.build_error_handler = external_url_handler - - Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and - `endpoint` and `**values` are the arguments passed into `url_for`. Note - that this is for building URLs outside the current application, and not for - handling 404 NotFound errors. - - .. versionadded:: 0.10 - The `_scheme` parameter was added. - - .. versionadded:: 0.9 - The `_anchor` and `_method` parameters were added. - - .. versionadded:: 0.9 - Calls :meth:`Flask.handle_build_error` on - :exc:`~werkzeug.routing.BuildError`. - - :param endpoint: the endpoint of the URL (name of the function) - :param values: the variable arguments of the URL rule - :param _external: if set to `True`, an absolute URL is generated. Server - address can be changed via `SERVER_NAME` configuration variable which - defaults to `localhost`. - :param _scheme: a string specifying the desired URL scheme. The `_external` - parameter must be set to `True` or a `ValueError` is raised. - :param _anchor: if provided this is added as anchor to the URL. - :param _method: if provided this explicitly specifies an HTTP method. - """ - appctx = _app_ctx_stack.top - reqctx = _request_ctx_stack.top - if appctx is None: - raise RuntimeError('Attempted to generate a URL without the ' - 'application context being pushed. This has to be ' - 'executed when application context is available.') - - # If request specific information is available we have some extra - # features that support "relative" urls. - if reqctx is not None: - url_adapter = reqctx.url_adapter - blueprint_name = request.blueprint - if not reqctx.request._is_old_module: - if endpoint[:1] == '.': - if blueprint_name is not None: - endpoint = blueprint_name + endpoint - else: - endpoint = endpoint[1:] - else: - # TODO: get rid of this deprecated functionality in 1.0 - if '.' not in endpoint: - if blueprint_name is not None: - endpoint = blueprint_name + '.' + endpoint - elif endpoint.startswith('.'): - endpoint = endpoint[1:] - external = values.pop('_external', False) - - # Otherwise go with the url adapter from the appctx and make - # the urls external by default. - else: - url_adapter = appctx.url_adapter - if url_adapter is None: - raise RuntimeError('Application was not able to create a URL ' - 'adapter for request independent URL generation. ' - 'You might be able to fix this by setting ' - 'the SERVER_NAME config variable.') - external = values.pop('_external', True) - - anchor = values.pop('_anchor', None) - method = values.pop('_method', None) - scheme = values.pop('_scheme', None) - appctx.app.inject_url_defaults(endpoint, values) - - if scheme is not None: - if not external: - raise ValueError('When specifying _scheme, _external must be True') - url_adapter.url_scheme = scheme - - try: - rv = url_adapter.build(endpoint, values, method=method, - force_external=external) - except BuildError as error: - # We need to inject the values again so that the app callback can - # deal with that sort of stuff. - values['_external'] = external - values['_anchor'] = anchor - values['_method'] = method - return appctx.app.handle_url_build_error(error, endpoint, values) - - if anchor is not None: - rv += '#' + url_quote(anchor) - return rv - - -def get_template_attribute(template_name, attribute): - """Loads a macro (or variable) a template exports. This can be used to - invoke a macro from within Python code. If you for example have a - template named `_cider.html` with the following contents: - - .. sourcecode:: html+jinja - - {% macro hello(name) %}Hello {{ name }}!{% endmacro %} - - You can access this from Python code like this:: - - hello = get_template_attribute('_cider.html', 'hello') - return hello('World') - - .. versionadded:: 0.2 - - :param template_name: the name of the template - :param attribute: the name of the variable of macro to access - """ - return getattr(current_app.jinja_env.get_template(template_name).module, - attribute) - - -def flash(message, category='message'): - """Flashes a message to the next request. In order to remove the - flashed message from the session and to display it to the user, - the template has to call :func:`get_flashed_messages`. - - .. versionchanged:: 0.3 - `category` parameter added. - - :param message: the message to be flashed. - :param category: the category for the message. The following values - are recommended: ``'message'`` for any kind of message, - ``'error'`` for errors, ``'info'`` for information - messages and ``'warning'`` for warnings. However any - kind of string can be used as category. - """ - # Original implementation: - # - # session.setdefault('_flashes', []).append((category, message)) - # - # This assumed that changes made to mutable structures in the session are - # are always in sync with the sess on object, which is not true for session - # implementations that use external storage for keeping their keys/values. - flashes = session.get('_flashes', []) - flashes.append((category, message)) - session['_flashes'] = flashes - message_flashed.send(current_app._get_current_object(), - message=message, category=category) - - -def get_flashed_messages(with_categories=False, category_filter=[]): - """Pulls all flashed messages from the session and returns them. - Further calls in the same request to the function will return - the same messages. By default just the messages are returned, - but when `with_categories` is set to `True`, the return value will - be a list of tuples in the form ``(category, message)`` instead. - - Filter the flashed messages to one or more categories by providing those - categories in `category_filter`. This allows rendering categories in - separate html blocks. The `with_categories` and `category_filter` - arguments are distinct: - - * `with_categories` controls whether categories are returned with message - text (`True` gives a tuple, where `False` gives just the message text). - * `category_filter` filters the messages down to only those matching the - provided categories. - - See :ref:`message-flashing-pattern` for examples. - - .. versionchanged:: 0.3 - `with_categories` parameter added. - - .. versionchanged:: 0.9 - `category_filter` parameter added. - - :param with_categories: set to `True` to also receive categories. - :param category_filter: whitelist of categories to limit return values - """ - flashes = _request_ctx_stack.top.flashes - if flashes is None: - _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ - if '_flashes' in session else [] - if category_filter: - flashes = list(filter(lambda f: f[0] in category_filter, flashes)) - if not with_categories: - return [x[1] for x in flashes] - return flashes - - -def send_file(filename_or_fp, mimetype=None, as_attachment=False, - attachment_filename=None, add_etags=True, - cache_timeout=None, conditional=False): - """Sends the contents of a file to the client. This will use the - most efficient method available and configured. By default it will - try to use the WSGI server's file_wrapper support. Alternatively - you can set the application's :attr:`~Flask.use_x_sendfile` attribute - to ``True`` to directly emit an `X-Sendfile` header. This however - requires support of the underlying webserver for `X-Sendfile`. - - By default it will try to guess the mimetype for you, but you can - also explicitly provide one. For extra security you probably want - to send certain files as attachment (HTML for instance). The mimetype - guessing requires a `filename` or an `attachment_filename` to be - provided. - - Please never pass filenames to this function from user sources without - checking them first. Something like this is usually sufficient to - avoid security problems:: - - if '..' in filename or filename.startswith('/'): - abort(404) - - .. versionadded:: 0.2 - - .. versionadded:: 0.5 - The `add_etags`, `cache_timeout` and `conditional` parameters were - added. The default behavior is now to attach etags. - - .. versionchanged:: 0.7 - mimetype guessing and etag support for file objects was - deprecated because it was unreliable. Pass a filename if you are - able to, otherwise attach an etag yourself. This functionality - will be removed in Flask 1.0 - - .. versionchanged:: 0.9 - cache_timeout pulls its default from application config, when None. - - :param filename_or_fp: the filename of the file to send. This is - relative to the :attr:`~Flask.root_path` if a - relative path is specified. - Alternatively a file object might be provided - in which case `X-Sendfile` might not work and - fall back to the traditional method. Make sure - that the file pointer is positioned at the start - of data to send before calling :func:`send_file`. - :param mimetype: the mimetype of the file if provided, otherwise - auto detection happens. - :param as_attachment: set to `True` if you want to send this file with - a ``Content-Disposition: attachment`` header. - :param attachment_filename: the filename for the attachment if it - differs from the file's filename. - :param add_etags: set to `False` to disable attaching of etags. - :param conditional: set to `True` to enable conditional responses. - - :param cache_timeout: the timeout in seconds for the headers. When `None` - (default), this value is set by - :meth:`~Flask.get_send_file_max_age` of - :data:`~flask.current_app`. - """ - mtime = None - if isinstance(filename_or_fp, string_types): - filename = filename_or_fp - file = None - else: - from warnings import warn - file = filename_or_fp - filename = getattr(file, 'name', None) - - # XXX: this behavior is now deprecated because it was unreliable. - # removed in Flask 1.0 - if not attachment_filename and not mimetype \ - and isinstance(filename, string_types): - warn(DeprecationWarning('The filename support for file objects ' - 'passed to send_file is now deprecated. Pass an ' - 'attach_filename if you want mimetypes to be guessed.'), - stacklevel=2) - if add_etags: - warn(DeprecationWarning('In future flask releases etags will no ' - 'longer be generated for file objects passed to the send_file ' - 'function because this behavior was unreliable. Pass ' - 'filenames instead if possible, otherwise attach an etag ' - 'yourself based on another value'), stacklevel=2) - - if filename is not None: - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - if mimetype is None and (filename or attachment_filename): - mimetype = mimetypes.guess_type(filename or attachment_filename)[0] - if mimetype is None: - mimetype = 'application/octet-stream' - - headers = Headers() - if as_attachment: - if attachment_filename is None: - if filename is None: - raise TypeError('filename unavailable, required for ' - 'sending as attachment') - attachment_filename = os.path.basename(filename) - headers.add('Content-Disposition', 'attachment', - filename=attachment_filename) - - if current_app.use_x_sendfile and filename: - if file is not None: - file.close() - headers['X-Sendfile'] = filename - headers['Content-Length'] = os.path.getsize(filename) - data = None - else: - if file is None: - file = open(filename, 'rb') - mtime = os.path.getmtime(filename) - headers['Content-Length'] = os.path.getsize(filename) - data = wrap_file(request.environ, file) - - rv = current_app.response_class(data, mimetype=mimetype, headers=headers, - direct_passthrough=True) - - # if we know the file modification date, we can store it as the - # the time of the last modification. - if mtime is not None: - rv.last_modified = int(mtime) - - rv.cache_control.public = True - if cache_timeout is None: - cache_timeout = current_app.get_send_file_max_age(filename) - if cache_timeout is not None: - rv.cache_control.max_age = cache_timeout - rv.expires = int(time() + cache_timeout) - - if add_etags and filename is not None: - rv.set_etag('flask-%s-%s-%s' % ( - os.path.getmtime(filename), - os.path.getsize(filename), - adler32( - filename.encode('utf-8') if isinstance(filename, text_type) - else filename - ) & 0xffffffff - )) - if conditional: - rv = rv.make_conditional(request) - # make sure we don't send x-sendfile for servers that - # ignore the 304 status code for x-sendfile. - if rv.status_code == 304: - rv.headers.pop('x-sendfile', None) - return rv - - -def safe_join(directory, filename): - """Safely join `directory` and `filename`. - - Example usage:: - - @app.route('/wiki/') - def wiki_page(filename): - filename = safe_join(app.config['WIKI_FOLDER'], filename) - with open(filename, 'rb') as fd: - content = fd.read() # Read and process the file content... - - :param directory: the base directory. - :param filename: the untrusted filename relative to that directory. - :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path - would fall out of `directory`. - """ - filename = posixpath.normpath(filename) - for sep in _os_alt_seps: - if sep in filename: - raise NotFound() - if os.path.isabs(filename) or \ - filename == '..' or \ - filename.startswith('../'): - raise NotFound() - return os.path.join(directory, filename) - - -def send_from_directory(directory, filename, **options): - """Send a file from a given directory with :func:`send_file`. This - is a secure way to quickly expose static files from an upload folder - or something similar. - - Example usage:: - - @app.route('/uploads/') - def download_file(filename): - return send_from_directory(app.config['UPLOAD_FOLDER'], - filename, as_attachment=True) - - .. admonition:: Sending files and Performance - - It is strongly recommended to activate either `X-Sendfile` support in - your webserver or (if no authentication happens) to tell the webserver - to serve files for the given path on its own without calling into the - web application for improved performance. - - .. versionadded:: 0.5 - - :param directory: the directory where all the files are stored. - :param filename: the filename relative to that directory to - download. - :param options: optional keyword arguments that are directly - forwarded to :func:`send_file`. - """ - filename = safe_join(directory, filename) - if not os.path.isfile(filename): - raise NotFound() - options.setdefault('conditional', True) - return send_file(filename, **options) - - -def get_root_path(import_name): - """Returns the path to a package or cwd if that cannot be found. This - returns the path of a package or the folder that contains a module. - - Not to be confused with the package path returned by :func:`find_package`. - """ - # Module already imported and has a file attribute. Use that first. - mod = sys.modules.get(import_name) - if mod is not None and hasattr(mod, '__file__'): - return os.path.dirname(os.path.abspath(mod.__file__)) - - # Next attempt: check the loader. - loader = pkgutil.get_loader(import_name) - - # Loader does not exist or we're referring to an unloaded main module - # or a main module without path (interactive sessions), go with the - # current working directory. - if loader is None or import_name == '__main__': - return os.getcwd() - - # For .egg, zipimporter does not have get_filename until Python 2.7. - # Some other loaders might exhibit the same behavior. - if hasattr(loader, 'get_filename'): - filepath = loader.get_filename(import_name) - else: - # Fall back to imports. - __import__(import_name) - filepath = sys.modules[import_name].__file__ - - # filepath is import_name.py for a module, or __init__.py for a package. - return os.path.dirname(os.path.abspath(filepath)) - - -def find_package(import_name): - """Finds a package and returns the prefix (or None if the package is - not installed) as well as the folder that contains the package or - module as a tuple. The package path returned is the module that would - have to be added to the pythonpath in order to make it possible to - import the module. The prefix is the path below which a UNIX like - folder structure exists (lib, share etc.). - """ - root_mod_name = import_name.split('.')[0] - loader = pkgutil.get_loader(root_mod_name) - if loader is None or import_name == '__main__': - # import name is not found, or interactive/main module - package_path = os.getcwd() - else: - # For .egg, zipimporter does not have get_filename until Python 2.7. - if hasattr(loader, 'get_filename'): - filename = loader.get_filename(root_mod_name) - elif hasattr(loader, 'archive'): - # zipimporter's loader.archive points to the .egg or .zip - # archive filename is dropped in call to dirname below. - filename = loader.archive - else: - # At least one loader is missing both get_filename and archive: - # Google App Engine's HardenedModulesHook - # - # Fall back to imports. - __import__(import_name) - filename = sys.modules[import_name].__file__ - package_path = os.path.abspath(os.path.dirname(filename)) - # package_path ends with __init__.py for a package - if loader.is_package(root_mod_name): - package_path = os.path.dirname(package_path) - - site_parent, site_folder = os.path.split(package_path) - py_prefix = os.path.abspath(sys.prefix) - if package_path.startswith(py_prefix): - return py_prefix, package_path - elif site_folder.lower() == 'site-packages': - parent, folder = os.path.split(site_parent) - # Windows like installations - if folder.lower() == 'lib': - base_dir = parent - # UNIX like installations - elif os.path.basename(parent).lower() == 'lib': - base_dir = os.path.dirname(parent) - else: - base_dir = site_parent - return base_dir, package_path - return None, package_path - - -class locked_cached_property(object): - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value. Works like the one in Werkzeug but has a lock for - thread safety. - """ - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - self.lock = RLock() - - def __get__(self, obj, type=None): - if obj is None: - return self - with self.lock: - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -class _PackageBoundObject(object): - - def __init__(self, import_name, template_folder=None): - #: The name of the package or module. Do not change this once - #: it was set by the constructor. - self.import_name = import_name - - #: location of the templates. `None` if templates should not be - #: exposed. - self.template_folder = template_folder - - #: Where is the app root located? - self.root_path = get_root_path(self.import_name) - - self._static_folder = None - self._static_url_path = None - - def _get_static_folder(self): - if self._static_folder is not None: - return os.path.join(self.root_path, self._static_folder) - def _set_static_folder(self, value): - self._static_folder = value - static_folder = property(_get_static_folder, _set_static_folder) - del _get_static_folder, _set_static_folder - - def _get_static_url_path(self): - if self._static_url_path is None: - if self.static_folder is None: - return None - return '/' + os.path.basename(self.static_folder) - return self._static_url_path - def _set_static_url_path(self, value): - self._static_url_path = value - static_url_path = property(_get_static_url_path, _set_static_url_path) - del _get_static_url_path, _set_static_url_path - - @property - def has_static_folder(self): - """This is `True` if the package bound object's container has a - folder named ``'static'``. - - .. versionadded:: 0.5 - """ - return self.static_folder is not None - - @locked_cached_property - def jinja_loader(self): - """The Jinja loader for this package bound object. - - .. versionadded:: 0.5 - """ - if self.template_folder is not None: - return FileSystemLoader(os.path.join(self.root_path, - self.template_folder)) - - def get_send_file_max_age(self, filename): - """Provides default cache_timeout for the :func:`send_file` functions. - - By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from - the configuration of :data:`~flask.current_app`. - - Static file functions such as :func:`send_from_directory` use this - function, and :func:`send_file` calls this function on - :data:`~flask.current_app` when the given cache_timeout is `None`. If a - cache_timeout is given in :func:`send_file`, that timeout is used; - otherwise, this method is called. - - This allows subclasses to change the behavior when sending files based - on the filename. For example, to set the cache timeout for .js files - to 60 seconds:: - - class MyFlask(flask.Flask): - def get_send_file_max_age(self, name): - if name.lower().endswith('.js'): - return 60 - return flask.Flask.get_send_file_max_age(self, name) - - .. versionadded:: 0.9 - """ - return current_app.config['SEND_FILE_MAX_AGE_DEFAULT'] - - def send_static_file(self, filename): - """Function used internally to send static files from the static - folder to the browser. - - .. versionadded:: 0.5 - """ - if not self.has_static_folder: - raise RuntimeError('No static folder for this object') - # Ensure get_send_file_max_age is called in all cases. - # Here, we ensure get_send_file_max_age is called for Blueprints. - cache_timeout = self.get_send_file_max_age(filename) - return send_from_directory(self.static_folder, filename, - cache_timeout=cache_timeout) - - def open_resource(self, resource, mode='rb'): - """Opens a resource from the application's resource folder. To see - how this works, consider the following folder structure:: - - /myapplication.py - /schema.sql - /static - /style.css - /templates - /layout.html - /index.html - - If you want to open the `schema.sql` file you would do the - following:: - - with app.open_resource('schema.sql') as f: - contents = f.read() - do_something_with(contents) - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: resource file opening mode, default is 'rb'. - """ - if mode not in ('r', 'rb'): - raise ValueError('Resources can only be opened for reading') - return open(os.path.join(self.root_path, resource), mode) diff --git a/src/lib/flask/json.py b/src/lib/flask/json.py deleted file mode 100644 index d1cda5a..0000000 --- a/src/lib/flask/json.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.jsonimpl - ~~~~~~~~~~~~~~ - - Implementation helpers for the JSON support in Flask. - - :copyright: (c) 2012 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -import io -import uuid -from datetime import datetime -from .globals import current_app, request -from ._compat import text_type, PY2 - -from werkzeug.http import http_date -from jinja2 import Markup - -# Use the same json implementation as itsdangerous on which we -# depend anyways. -try: - from itsdangerous import simplejson as _json -except ImportError: - from itsdangerous import json as _json - - -# figure out if simplejson escapes slashes. This behavior was changed -# from one version to another without reason. -_slash_escape = '\\/' not in _json.dumps('/') - - -__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump', - 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder', - 'jsonify'] - - -def _wrap_reader_for_text(fp, encoding): - if isinstance(fp.read(0), bytes): - fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) - return fp - - -def _wrap_writer_for_text(fp, encoding): - try: - fp.write('') - except TypeError: - fp = io.TextIOWrapper(fp, encoding) - return fp - - -class JSONEncoder(_json.JSONEncoder): - """The default Flask JSON encoder. This one extends the default simplejson - encoder by also supporting ``datetime`` objects, ``UUID`` as well as - ``Markup`` objects which are serialized as RFC 822 datetime strings (same - as the HTTP date format). In order to support more data types override the - :meth:`default` method. - """ - - def default(self, o): - """Implement this method in a subclass such that it returns a - serializable object for ``o``, or calls the base implementation (to - raise a ``TypeError``). - - For example, to support arbitrary iterators, you could implement - default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - return JSONEncoder.default(self, o) - """ - if isinstance(o, datetime): - return http_date(o) - if isinstance(o, uuid.UUID): - return str(o) - if hasattr(o, '__html__'): - return text_type(o.__html__()) - return _json.JSONEncoder.default(self, o) - - -class JSONDecoder(_json.JSONDecoder): - """The default JSON decoder. This one does not change the behavior from - the default simplejson encoder. Consult the :mod:`json` documentation - for more information. This decoder is not only used for the load - functions of this module but also :attr:`~flask.Request`. - """ - - -def _dump_arg_defaults(kwargs): - """Inject default arguments for dump functions.""" - if current_app: - kwargs.setdefault('cls', current_app.json_encoder) - if not current_app.config['JSON_AS_ASCII']: - kwargs.setdefault('ensure_ascii', False) - kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) - else: - kwargs.setdefault('sort_keys', True) - kwargs.setdefault('cls', JSONEncoder) - - -def _load_arg_defaults(kwargs): - """Inject default arguments for load functions.""" - if current_app: - kwargs.setdefault('cls', current_app.json_decoder) - else: - kwargs.setdefault('cls', JSONDecoder) - - -def dumps(obj, **kwargs): - """Serialize ``obj`` to a JSON formatted ``str`` by using the application's - configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an - application on the stack. - - This function can return ``unicode`` strings or ascii-only bytestrings by - default which coerce into unicode strings automatically. That behavior by - default is controlled by the ``JSON_AS_ASCII`` configuration variable - and can be overriden by the simplejson ``ensure_ascii`` parameter. - """ - _dump_arg_defaults(kwargs) - encoding = kwargs.pop('encoding', None) - rv = _json.dumps(obj, **kwargs) - if encoding is not None and isinstance(rv, text_type): - rv = rv.encode(encoding) - return rv - - -def dump(obj, fp, **kwargs): - """Like :func:`dumps` but writes into a file object.""" - _dump_arg_defaults(kwargs) - encoding = kwargs.pop('encoding', None) - if encoding is not None: - fp = _wrap_writer_for_text(fp, encoding) - _json.dump(obj, fp, **kwargs) - - -def loads(s, **kwargs): - """Unserialize a JSON object from a string ``s`` by using the application's - configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an - application on the stack. - """ - _load_arg_defaults(kwargs) - if isinstance(s, bytes): - s = s.decode(kwargs.pop('encoding', None) or 'utf-8') - return _json.loads(s, **kwargs) - - -def load(fp, **kwargs): - """Like :func:`loads` but reads from a file object. - """ - _load_arg_defaults(kwargs) - if not PY2: - fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') - return _json.load(fp, **kwargs) - - -def htmlsafe_dumps(obj, **kwargs): - """Works exactly like :func:`dumps` but is safe for use in ``') - self.assert_equal(rv, u'"\\u003c/script\\u003e"') - self.assert_equal(type(rv), text_type) - rv = render('{{ ""|tojson }}') - self.assert_equal(rv, '"\\u003c/script\\u003e"') - rv = render('{{ "<\0/script>"|tojson }}') - self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"') - rv = render('{{ " -
- - - - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/go/go.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/go/go.js deleted file mode 100644 index 00e5d6c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/go/go.js +++ /dev/null @@ -1,170 +0,0 @@ -CodeMirror.defineMode("go", function(config, parserConfig) { - var indentUnit = config.indentUnit; - - var keywords = { - "break":true, "case":true, "chan":true, "const":true, "continue":true, - "default":true, "defer":true, "else":true, "fallthrough":true, "for":true, - "func":true, "go":true, "goto":true, "if":true, "import":true, - "interface":true, "map":true, "package":true, "range":true, "return":true, - "select":true, "struct":true, "switch":true, "type":true, "var":true, - "bool":true, "byte":true, "complex64":true, "complex128":true, - "float32":true, "float64":true, "int8":true, "int16":true, "int32":true, - "int64":true, "string":true, "uint8":true, "uint16":true, "uint32":true, - "uint64":true, "int":true, "uint":true, "uintptr":true - }; - - var atoms = { - "true":true, "false":true, "iota":true, "nil":true, "append":true, - "cap":true, "close":true, "complex":true, "copy":true, "imag":true, - "len":true, "make":true, "new":true, "panic":true, "print":true, - "println":true, "real":true, "recover":true - }; - - var blockKeywords = { - "else":true, "for":true, "func":true, "if":true, "interface":true, - "select":true, "struct":true, "switch":true - }; - - var isOperatorChar = /[+\-*&^%:=<>!|\/]/; - - var curPunc; - - function tokenBase(stream, state) { - var ch = stream.next(); - if (ch == '"' || ch == "'" || ch == "`") { - state.tokenize = tokenString(ch); - return state.tokenize(stream, state); - } - if (/[\d\.]/.test(ch)) { - if (ch == ".") { - stream.match(/^[0-9]+([eE][\-+]?[0-9]+)?/); - } else if (ch == "0") { - stream.match(/^[xX][0-9a-fA-F]+/) || stream.match(/^0[0-7]+/); - } else { - stream.match(/^[0-9]*\.?[0-9]*([eE][\-+]?[0-9]+)?/); - } - return "number"; - } - if (/[\[\]{}\(\),;\:\.]/.test(ch)) { - curPunc = ch; - return null - } - if (ch == "/") { - if (stream.eat("*")) { - state.tokenize = tokenComment; - return tokenComment(stream, state); - } - if (stream.eat("/")) { - stream.skipToEnd(); - return "comment"; - } - } - if (isOperatorChar.test(ch)) { - stream.eatWhile(isOperatorChar); - return "operator"; - } - stream.eatWhile(/[\w\$_]/); - var cur = stream.current(); - if (keywords.propertyIsEnumerable(cur)) { - if (cur == "case" || cur == "default") curPunc = "case"; - return "keyword"; - } - if (atoms.propertyIsEnumerable(cur)) return "atom"; - return "word"; - } - - function tokenString(quote) { - return function(stream, state) { - var escaped = false, next, end = false; - while ((next = stream.next()) != null) { - if (next == quote && !escaped) {end = true; break;} - escaped = !escaped && next == "\\"; - } - if (end || !(escaped || quote == "`")) - state.tokenize = tokenBase; - return "string"; - }; - } - - function tokenComment(stream, state) { - var maybeEnd = false, ch; - while (ch = stream.next()) { - if (ch == "/" && maybeEnd) { - state.tokenize = tokenBase; - break; - } - maybeEnd = (ch == "*"); - } - return "comment"; - } - - function Context(indented, column, type, align, prev) { - this.indented = indented; - this.column = column; - this.type = type; - this.align = align; - this.prev = prev; - } - function pushContext(state, col, type) { - return state.context = new Context(state.indented, col, type, null, state.context); - } - function popContext(state) { - var t = state.context.type; - if (t == ")" || t == "]" || t == "}") - state.indented = state.context.indented; - return state.context = state.context.prev; - } - - // Interface - - return { - startState: function(basecolumn) { - return { - tokenize: null, - context: new Context((basecolumn || 0) - indentUnit, 0, "top", false), - indented: 0, - startOfLine: true - }; - }, - - token: function(stream, state) { - var ctx = state.context; - if (stream.sol()) { - if (ctx.align == null) ctx.align = false; - state.indented = stream.indentation(); - state.startOfLine = true; - if (ctx.type == "case") ctx.type = "}"; - } - if (stream.eatSpace()) return null; - curPunc = null; - var style = (state.tokenize || tokenBase)(stream, state); - if (style == "comment") return style; - if (ctx.align == null) ctx.align = true; - - if (curPunc == "{") pushContext(state, stream.column(), "}"); - else if (curPunc == "[") pushContext(state, stream.column(), "]"); - else if (curPunc == "(") pushContext(state, stream.column(), ")"); - else if (curPunc == "case") ctx.type = "case" - else if (curPunc == "}" && ctx.type == "}") ctx = popContext(state); - else if (curPunc == ctx.type) popContext(state); - state.startOfLine = false; - return style; - }, - - indent: function(state, textAfter) { - if (state.tokenize != tokenBase && state.tokenize != null) return 0; - var ctx = state.context, firstChar = textAfter && textAfter.charAt(0); - if (ctx.type == "case" && /^(?:case|default)\b/.test(textAfter)) { - state.context.type = "}"; - return ctx.indented; - } - var closing = firstChar == ctx.type; - if (ctx.align) return ctx.column + (closing ? 0 : 1); - else return ctx.indented + (closing ? 0 : indentUnit); - }, - - electricChars: "{}:" - }; -}); - -CodeMirror.defineMIME("text/x-go", "go"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/go/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/go/index.html deleted file mode 100644 index 9cdad1a..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/go/index.html +++ /dev/null @@ -1,72 +0,0 @@ - - - - CodeMirror: Go mode - - - - - - - - -

CodeMirror: Go mode

- -
- - - -

MIME type: text/x-go

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/groovy/groovy.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/groovy/groovy.js deleted file mode 100644 index 029e0c9..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/groovy/groovy.js +++ /dev/null @@ -1,210 +0,0 @@ -CodeMirror.defineMode("groovy", function(config, parserConfig) { - function words(str) { - var obj = {}, words = str.split(" "); - for (var i = 0; i < words.length; ++i) obj[words[i]] = true; - return obj; - } - var keywords = words( - "abstract as assert boolean break byte case catch char class const continue def default " + - "do double else enum extends final finally float for goto if implements import in " + - "instanceof int interface long native new package private protected public return " + - "short static strictfp super switch synchronized threadsafe throw throws transient " + - "try void volatile while"); - var blockKeywords = words("catch class do else finally for if switch try while enum interface def"); - var atoms = words("null true false this"); - - var curPunc; - function tokenBase(stream, state) { - var ch = stream.next(); - if (ch == '"' || ch == "'") { - return startString(ch, stream, state); - } - if (/[\[\]{}\(\),;\:\.]/.test(ch)) { - curPunc = ch; - return null - } - if (/\d/.test(ch)) { - stream.eatWhile(/[\w\.]/); - if (stream.eat(/eE/)) { stream.eat(/\+\-/); stream.eatWhile(/\d/); } - return "number"; - } - if (ch == "/") { - if (stream.eat("*")) { - state.tokenize.push(tokenComment); - return tokenComment(stream, state); - } - if (stream.eat("/")) { - stream.skipToEnd(); - return "comment"; - } - if (expectExpression(state.lastToken)) { - return startString(ch, stream, state); - } - } - if (ch == "-" && stream.eat(">")) { - curPunc = "->"; - return null; - } - if (/[+\-*&%=<>!?|\/~]/.test(ch)) { - stream.eatWhile(/[+\-*&%=<>|~]/); - return "operator"; - } - stream.eatWhile(/[\w\$_]/); - if (ch == "@") { stream.eatWhile(/[\w\$_\.]/); return "meta"; } - if (state.lastToken == ".") return "property"; - if (stream.eat(":")) { curPunc = "proplabel"; return "property"; } - var cur = stream.current(); - if (atoms.propertyIsEnumerable(cur)) { return "atom"; } - if (keywords.propertyIsEnumerable(cur)) { - if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement"; - return "keyword"; - } - return "word"; - } - tokenBase.isBase = true; - - function startString(quote, stream, state) { - var tripleQuoted = false; - if (quote != "/" && stream.eat(quote)) { - if (stream.eat(quote)) tripleQuoted = true; - else return "string"; - } - function t(stream, state) { - var escaped = false, next, end = !tripleQuoted; - while ((next = stream.next()) != null) { - if (next == quote && !escaped) { - if (!tripleQuoted) { break; } - if (stream.match(quote + quote)) { end = true; break; } - } - if (quote == '"' && next == "$" && !escaped && stream.eat("{")) { - state.tokenize.push(tokenBaseUntilBrace()); - return "string"; - } - escaped = !escaped && next == "\\"; - } - if (end) state.tokenize.pop(); - return "string"; - } - state.tokenize.push(t); - return t(stream, state); - } - - function tokenBaseUntilBrace() { - var depth = 1; - function t(stream, state) { - if (stream.peek() == "}") { - depth--; - if (depth == 0) { - state.tokenize.pop(); - return state.tokenize[state.tokenize.length-1](stream, state); - } - } else if (stream.peek() == "{") { - depth++; - } - return tokenBase(stream, state); - } - t.isBase = true; - return t; - } - - function tokenComment(stream, state) { - var maybeEnd = false, ch; - while (ch = stream.next()) { - if (ch == "/" && maybeEnd) { - state.tokenize.pop(); - break; - } - maybeEnd = (ch == "*"); - } - return "comment"; - } - - function expectExpression(last) { - return !last || last == "operator" || last == "->" || /[\.\[\{\(,;:]/.test(last) || - last == "newstatement" || last == "keyword" || last == "proplabel"; - } - - function Context(indented, column, type, align, prev) { - this.indented = indented; - this.column = column; - this.type = type; - this.align = align; - this.prev = prev; - } - function pushContext(state, col, type) { - return state.context = new Context(state.indented, col, type, null, state.context); - } - function popContext(state) { - var t = state.context.type; - if (t == ")" || t == "]" || t == "}") - state.indented = state.context.indented; - return state.context = state.context.prev; - } - - // Interface - - return { - startState: function(basecolumn) { - return { - tokenize: [tokenBase], - context: new Context((basecolumn || 0) - config.indentUnit, 0, "top", false), - indented: 0, - startOfLine: true, - lastToken: null - }; - }, - - token: function(stream, state) { - var ctx = state.context; - if (stream.sol()) { - if (ctx.align == null) ctx.align = false; - state.indented = stream.indentation(); - state.startOfLine = true; - // Automatic semicolon insertion - if (ctx.type == "statement" && !expectExpression(state.lastToken)) { - popContext(state); ctx = state.context; - } - } - if (stream.eatSpace()) return null; - curPunc = null; - var style = state.tokenize[state.tokenize.length-1](stream, state); - if (style == "comment") return style; - if (ctx.align == null) ctx.align = true; - - if ((curPunc == ";" || curPunc == ":") && ctx.type == "statement") popContext(state); - // Handle indentation for {x -> \n ... } - else if (curPunc == "->" && ctx.type == "statement" && ctx.prev.type == "}") { - popContext(state); - state.context.align = false; - } - else if (curPunc == "{") pushContext(state, stream.column(), "}"); - else if (curPunc == "[") pushContext(state, stream.column(), "]"); - else if (curPunc == "(") pushContext(state, stream.column(), ")"); - else if (curPunc == "}") { - while (ctx.type == "statement") ctx = popContext(state); - if (ctx.type == "}") ctx = popContext(state); - while (ctx.type == "statement") ctx = popContext(state); - } - else if (curPunc == ctx.type) popContext(state); - else if (ctx.type == "}" || ctx.type == "top" || (ctx.type == "statement" && curPunc == "newstatement")) - pushContext(state, stream.column(), "statement"); - state.startOfLine = false; - state.lastToken = curPunc || style; - return style; - }, - - indent: function(state, textAfter) { - if (!state.tokenize[state.tokenize.length-1].isBase) return 0; - var firstChar = textAfter && textAfter.charAt(0), ctx = state.context; - if (ctx.type == "statement" && !expectExpression(state.lastToken)) ctx = ctx.prev; - var closing = firstChar == ctx.type; - if (ctx.type == "statement") return ctx.indented + (firstChar == "{" ? 0 : config.indentUnit); - else if (ctx.align) return ctx.column + (closing ? 0 : 1); - else return ctx.indented + (closing ? 0 : config.indentUnit); - }, - - electricChars: "{}" - }; -}); - -CodeMirror.defineMIME("text/x-groovy", "groovy"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/groovy/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/groovy/index.html deleted file mode 100644 index 226475c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/groovy/index.html +++ /dev/null @@ -1,71 +0,0 @@ - - - - CodeMirror: Groovy mode - - - - - - - -

CodeMirror: Groovy mode

- -
- - - -

MIME types defined: text/x-groovy

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/haskell/haskell.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/haskell/haskell.js deleted file mode 100644 index aac5041..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/haskell/haskell.js +++ /dev/null @@ -1,242 +0,0 @@ -CodeMirror.defineMode("haskell", function(cmCfg, modeCfg) { - - function switchState(source, setState, f) { - setState(f); - return f(source, setState); - } - - // These should all be Unicode extended, as per the Haskell 2010 report - var smallRE = /[a-z_]/; - var largeRE = /[A-Z]/; - var digitRE = /[0-9]/; - var hexitRE = /[0-9A-Fa-f]/; - var octitRE = /[0-7]/; - var idRE = /[a-z_A-Z0-9']/; - var symbolRE = /[-!#$%&*+.\/<=>?@\\^|~:]/; - var specialRE = /[(),;[\]`{}]/; - var whiteCharRE = /[ \t\v\f]/; // newlines are handled in tokenizer - - function normal(source, setState) { - if (source.eatWhile(whiteCharRE)) { - return null; - } - - var ch = source.next(); - if (specialRE.test(ch)) { - if (ch == '{' && source.eat('-')) { - var t = "comment"; - if (source.eat('#')) { - t = "meta"; - } - return switchState(source, setState, ncomment(t, 1)); - } - return null; - } - - if (ch == '\'') { - if (source.eat('\\')) { - source.next(); // should handle other escapes here - } - else { - source.next(); - } - if (source.eat('\'')) { - return "string"; - } - return "error"; - } - - if (ch == '"') { - return switchState(source, setState, stringLiteral); - } - - if (largeRE.test(ch)) { - source.eatWhile(idRE); - if (source.eat('.')) { - return "qualifier"; - } - return "variable-2"; - } - - if (smallRE.test(ch)) { - source.eatWhile(idRE); - return "variable"; - } - - if (digitRE.test(ch)) { - if (ch == '0') { - if (source.eat(/[xX]/)) { - source.eatWhile(hexitRE); // should require at least 1 - return "integer"; - } - if (source.eat(/[oO]/)) { - source.eatWhile(octitRE); // should require at least 1 - return "number"; - } - } - source.eatWhile(digitRE); - var t = "number"; - if (source.eat('.')) { - t = "number"; - source.eatWhile(digitRE); // should require at least 1 - } - if (source.eat(/[eE]/)) { - t = "number"; - source.eat(/[-+]/); - source.eatWhile(digitRE); // should require at least 1 - } - return t; - } - - if (symbolRE.test(ch)) { - if (ch == '-' && source.eat(/-/)) { - source.eatWhile(/-/); - if (!source.eat(symbolRE)) { - source.skipToEnd(); - return "comment"; - } - } - var t = "variable"; - if (ch == ':') { - t = "variable-2"; - } - source.eatWhile(symbolRE); - return t; - } - - return "error"; - } - - function ncomment(type, nest) { - if (nest == 0) { - return normal; - } - return function(source, setState) { - var currNest = nest; - while (!source.eol()) { - var ch = source.next(); - if (ch == '{' && source.eat('-')) { - ++currNest; - } - else if (ch == '-' && source.eat('}')) { - --currNest; - if (currNest == 0) { - setState(normal); - return type; - } - } - } - setState(ncomment(type, currNest)); - return type; - } - } - - function stringLiteral(source, setState) { - while (!source.eol()) { - var ch = source.next(); - if (ch == '"') { - setState(normal); - return "string"; - } - if (ch == '\\') { - if (source.eol() || source.eat(whiteCharRE)) { - setState(stringGap); - return "string"; - } - if (source.eat('&')) { - } - else { - source.next(); // should handle other escapes here - } - } - } - setState(normal); - return "error"; - } - - function stringGap(source, setState) { - if (source.eat('\\')) { - return switchState(source, setState, stringLiteral); - } - source.next(); - setState(normal); - return "error"; - } - - - var wellKnownWords = (function() { - var wkw = {}; - function setType(t) { - return function () { - for (var i = 0; i < arguments.length; i++) - wkw[arguments[i]] = t; - } - } - - setType("keyword")( - "case", "class", "data", "default", "deriving", "do", "else", "foreign", - "if", "import", "in", "infix", "infixl", "infixr", "instance", "let", - "module", "newtype", "of", "then", "type", "where", "_"); - - setType("keyword")( - "\.\.", ":", "::", "=", "\\", "\"", "<-", "->", "@", "~", "=>"); - - setType("builtin")( - "!!", "$!", "$", "&&", "+", "++", "-", ".", "/", "/=", "<", "<=", "=<<", - "==", ">", ">=", ">>", ">>=", "^", "^^", "||", "*", "**"); - - setType("builtin")( - "Bool", "Bounded", "Char", "Double", "EQ", "Either", "Enum", "Eq", - "False", "FilePath", "Float", "Floating", "Fractional", "Functor", "GT", - "IO", "IOError", "Int", "Integer", "Integral", "Just", "LT", "Left", - "Maybe", "Monad", "Nothing", "Num", "Ord", "Ordering", "Rational", "Read", - "ReadS", "Real", "RealFloat", "RealFrac", "Right", "Show", "ShowS", - "String", "True"); - - setType("builtin")( - "abs", "acos", "acosh", "all", "and", "any", "appendFile", "asTypeOf", - "asin", "asinh", "atan", "atan2", "atanh", "break", "catch", "ceiling", - "compare", "concat", "concatMap", "const", "cos", "cosh", "curry", - "cycle", "decodeFloat", "div", "divMod", "drop", "dropWhile", "either", - "elem", "encodeFloat", "enumFrom", "enumFromThen", "enumFromThenTo", - "enumFromTo", "error", "even", "exp", "exponent", "fail", "filter", - "flip", "floatDigits", "floatRadix", "floatRange", "floor", "fmap", - "foldl", "foldl1", "foldr", "foldr1", "fromEnum", "fromInteger", - "fromIntegral", "fromRational", "fst", "gcd", "getChar", "getContents", - "getLine", "head", "id", "init", "interact", "ioError", "isDenormalized", - "isIEEE", "isInfinite", "isNaN", "isNegativeZero", "iterate", "last", - "lcm", "length", "lex", "lines", "log", "logBase", "lookup", "map", - "mapM", "mapM_", "max", "maxBound", "maximum", "maybe", "min", "minBound", - "minimum", "mod", "negate", "not", "notElem", "null", "odd", "or", - "otherwise", "pi", "pred", "print", "product", "properFraction", - "putChar", "putStr", "putStrLn", "quot", "quotRem", "read", "readFile", - "readIO", "readList", "readLn", "readParen", "reads", "readsPrec", - "realToFrac", "recip", "rem", "repeat", "replicate", "return", "reverse", - "round", "scaleFloat", "scanl", "scanl1", "scanr", "scanr1", "seq", - "sequence", "sequence_", "show", "showChar", "showList", "showParen", - "showString", "shows", "showsPrec", "significand", "signum", "sin", - "sinh", "snd", "span", "splitAt", "sqrt", "subtract", "succ", "sum", - "tail", "take", "takeWhile", "tan", "tanh", "toEnum", "toInteger", - "toRational", "truncate", "uncurry", "undefined", "unlines", "until", - "unwords", "unzip", "unzip3", "userError", "words", "writeFile", "zip", - "zip3", "zipWith", "zipWith3"); - - return wkw; - })(); - - - - return { - startState: function () { return { f: normal }; }, - copyState: function (s) { return { f: s.f }; }, - - token: function(stream, state) { - var t = state.f(stream, function(s) { state.f = s; }); - var w = stream.current(); - return (w in wellKnownWords) ? wellKnownWords[w] : t; - } - }; - -}); - -CodeMirror.defineMIME("text/x-haskell", "haskell"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/haskell/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/haskell/index.html deleted file mode 100644 index 15706e7..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/haskell/index.html +++ /dev/null @@ -1,60 +0,0 @@ - - - - CodeMirror: Haskell mode - - - - - - - - -

CodeMirror: Haskell mode

- -
- - - -

MIME types defined: text/x-haskell.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlembedded/htmlembedded.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlembedded/htmlembedded.js deleted file mode 100644 index 08e170e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlembedded/htmlembedded.js +++ /dev/null @@ -1,68 +0,0 @@ -CodeMirror.defineMode("htmlembedded", function(config, parserConfig) { - - //config settings - var scriptStartRegex = parserConfig.scriptStartRegex || /^<%/i, - scriptEndRegex = parserConfig.scriptEndRegex || /^%>/i; - - //inner modes - var scriptingMode, htmlMixedMode; - - //tokenizer when in html mode - function htmlDispatch(stream, state) { - if (stream.match(scriptStartRegex, false)) { - state.token=scriptingDispatch; - return scriptingMode.token(stream, state.scriptState); - } - else - return htmlMixedMode.token(stream, state.htmlState); - } - - //tokenizer when in scripting mode - function scriptingDispatch(stream, state) { - if (stream.match(scriptEndRegex, false)) { - state.token=htmlDispatch; - return htmlMixedMode.token(stream, state.htmlState); - } - else - return scriptingMode.token(stream, state.scriptState); - } - - - return { - startState: function() { - scriptingMode = scriptingMode || CodeMirror.getMode(config, parserConfig.scriptingModeSpec); - htmlMixedMode = htmlMixedMode || CodeMirror.getMode(config, "htmlmixed"); - return { - token : parserConfig.startOpen ? scriptingDispatch : htmlDispatch, - htmlState : htmlMixedMode.startState(), - scriptState : scriptingMode.startState() - } - }, - - token: function(stream, state) { - return state.token(stream, state); - }, - - indent: function(state, textAfter) { - if (state.token == htmlDispatch) - return htmlMixedMode.indent(state.htmlState, textAfter); - else - return scriptingMode.indent(state.scriptState, textAfter); - }, - - copyState: function(state) { - return { - token : state.token, - htmlState : CodeMirror.copyState(htmlMixedMode, state.htmlState), - scriptState : CodeMirror.copyState(scriptingMode, state.scriptState) - } - }, - - - electricChars: "/{}:" - } -}); - -CodeMirror.defineMIME("application/x-ejs", { name: "htmlembedded", scriptingModeSpec:"javascript"}); -CodeMirror.defineMIME("application/x-aspx", { name: "htmlembedded", scriptingModeSpec:"text/x-csharp"}); -CodeMirror.defineMIME("application/x-jsp", { name: "htmlembedded", scriptingModeSpec:"text/x-java"}); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlembedded/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlembedded/index.html deleted file mode 100644 index c1374e5..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlembedded/index.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - CodeMirror: Html Embedded Scripts mode - - - - - - - - - - - -

CodeMirror: Html Embedded Scripts mode

- -
- - - -

Mode for html embedded scripts like JSP and ASP.NET. Depends on HtmlMixed which in turn depends on - JavaScript, CSS and XML.
Other dependancies include those of the scriping language chosen.

- -

MIME types defined: application/x-aspx (ASP.NET), - application/x-ejs (Embedded Javascript), application/x-jsp (JavaServer Pages)

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlmixed/htmlmixed.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlmixed/htmlmixed.js deleted file mode 100644 index c66a658..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlmixed/htmlmixed.js +++ /dev/null @@ -1,83 +0,0 @@ -CodeMirror.defineMode("htmlmixed", function(config, parserConfig) { - var htmlMode = CodeMirror.getMode(config, {name: "xml", htmlMode: true}); - var jsMode = CodeMirror.getMode(config, "javascript"); - var cssMode = CodeMirror.getMode(config, "css"); - - function html(stream, state) { - var style = htmlMode.token(stream, state.htmlState); - if (style == "tag" && stream.current() == ">" && state.htmlState.context) { - if (/^script$/i.test(state.htmlState.context.tagName)) { - state.token = javascript; - state.localState = jsMode.startState(htmlMode.indent(state.htmlState, "")); - state.mode = "javascript"; - } - else if (/^style$/i.test(state.htmlState.context.tagName)) { - state.token = css; - state.localState = cssMode.startState(htmlMode.indent(state.htmlState, "")); - state.mode = "css"; - } - } - return style; - } - function maybeBackup(stream, pat, style) { - var cur = stream.current(); - var close = cur.search(pat); - if (close > -1) stream.backUp(cur.length - close); - return style; - } - function javascript(stream, state) { - if (stream.match(/^<\/\s*script\s*>/i, false)) { - state.token = html; - state.localState = null; - state.mode = "html"; - return html(stream, state); - } - return maybeBackup(stream, /<\/\s*script\s*>/, - jsMode.token(stream, state.localState)); - } - function css(stream, state) { - if (stream.match(/^<\/\s*style\s*>/i, false)) { - state.token = html; - state.localState = null; - state.mode = "html"; - return html(stream, state); - } - return maybeBackup(stream, /<\/\s*style\s*>/, - cssMode.token(stream, state.localState)); - } - - return { - startState: function() { - var state = htmlMode.startState(); - return {token: html, localState: null, mode: "html", htmlState: state}; - }, - - copyState: function(state) { - if (state.localState) - var local = CodeMirror.copyState(state.token == css ? cssMode : jsMode, state.localState); - return {token: state.token, localState: local, mode: state.mode, - htmlState: CodeMirror.copyState(htmlMode, state.htmlState)}; - }, - - token: function(stream, state) { - return state.token(stream, state); - }, - - indent: function(state, textAfter) { - if (state.token == html || /^\s*<\//.test(textAfter)) - return htmlMode.indent(state.htmlState, textAfter); - else if (state.token == javascript) - return jsMode.indent(state.localState, textAfter); - else - return cssMode.indent(state.localState, textAfter); - }, - - compareStates: function(a, b) { - return htmlMode.compareStates(a.htmlState, b.htmlState); - }, - - electricChars: "/{}:" - } -}); - -CodeMirror.defineMIME("text/html", "htmlmixed"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlmixed/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlmixed/index.html deleted file mode 100644 index 63fc412..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/htmlmixed/index.html +++ /dev/null @@ -1,51 +0,0 @@ - - - - CodeMirror: HTML mixed mode - - - - - - - - - - -

CodeMirror: HTML mixed mode

-
- - -

The HTML mixed mode depends on the XML, JavaScript, and CSS modes.

- -

MIME types defined: text/html - (redefined, only takes effect if you load this parser after the - XML parser).

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/javascript/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/javascript/index.html deleted file mode 100644 index c3ab91d..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/javascript/index.html +++ /dev/null @@ -1,77 +0,0 @@ - - - - CodeMirror: JavaScript mode - - - - - - - -

CodeMirror: JavaScript mode

- -
- - - -

JavaScript mode supports a single configuration - option, json, which will set the mode to expect JSON - data rather than a JavaScript program.

- -

MIME types defined: text/javascript, application/json.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/javascript/javascript.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/javascript/javascript.js deleted file mode 100644 index 462f486..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/javascript/javascript.js +++ /dev/null @@ -1,360 +0,0 @@ -CodeMirror.defineMode("javascript", function(config, parserConfig) { - var indentUnit = config.indentUnit; - var jsonMode = parserConfig.json; - - // Tokenizer - - var keywords = function(){ - function kw(type) {return {type: type, style: "keyword"};} - var A = kw("keyword a"), B = kw("keyword b"), C = kw("keyword c"); - var operator = kw("operator"), atom = {type: "atom", style: "atom"}; - return { - "if": A, "while": A, "with": A, "else": B, "do": B, "try": B, "finally": B, - "return": C, "break": C, "continue": C, "new": C, "delete": C, "throw": C, - "var": kw("var"), "const": kw("var"), "let": kw("var"), - "function": kw("function"), "catch": kw("catch"), - "for": kw("for"), "switch": kw("switch"), "case": kw("case"), "default": kw("default"), - "in": operator, "typeof": operator, "instanceof": operator, - "true": atom, "false": atom, "null": atom, "undefined": atom, "NaN": atom, "Infinity": atom - }; - }(); - - var isOperatorChar = /[+\-*&%=<>!?|]/; - - function chain(stream, state, f) { - state.tokenize = f; - return f(stream, state); - } - - function nextUntilUnescaped(stream, end) { - var escaped = false, next; - while ((next = stream.next()) != null) { - if (next == end && !escaped) - return false; - escaped = !escaped && next == "\\"; - } - return escaped; - } - - // Used as scratch variables to communicate multiple values without - // consing up tons of objects. - var type, content; - function ret(tp, style, cont) { - type = tp; content = cont; - return style; - } - - function jsTokenBase(stream, state) { - var ch = stream.next(); - if (ch == '"' || ch == "'") - return chain(stream, state, jsTokenString(ch)); - else if (/[\[\]{}\(\),;\:\.]/.test(ch)) - return ret(ch); - else if (ch == "0" && stream.eat(/x/i)) { - stream.eatWhile(/[\da-f]/i); - return ret("number", "number"); - } - else if (/\d/.test(ch)) { - stream.match(/^\d*(?:\.\d*)?(?:[eE][+\-]?\d+)?/); - return ret("number", "number"); - } - else if (ch == "/") { - if (stream.eat("*")) { - return chain(stream, state, jsTokenComment); - } - else if (stream.eat("/")) { - stream.skipToEnd(); - return ret("comment", "comment"); - } - else if (state.reAllowed) { - nextUntilUnescaped(stream, "/"); - stream.eatWhile(/[gimy]/); // 'y' is "sticky" option in Mozilla - return ret("regexp", "string-2"); - } - else { - stream.eatWhile(isOperatorChar); - return ret("operator", null, stream.current()); - } - } - else if (ch == "#") { - stream.skipToEnd(); - return ret("error", "error"); - } - else if (isOperatorChar.test(ch)) { - stream.eatWhile(isOperatorChar); - return ret("operator", null, stream.current()); - } - else { - stream.eatWhile(/[\w\$_]/); - var word = stream.current(), known = keywords.propertyIsEnumerable(word) && keywords[word]; - return (known && state.kwAllowed) ? ret(known.type, known.style, word) : - ret("variable", "variable", word); - } - } - - function jsTokenString(quote) { - return function(stream, state) { - if (!nextUntilUnescaped(stream, quote)) - state.tokenize = jsTokenBase; - return ret("string", "string"); - }; - } - - function jsTokenComment(stream, state) { - var maybeEnd = false, ch; - while (ch = stream.next()) { - if (ch == "/" && maybeEnd) { - state.tokenize = jsTokenBase; - break; - } - maybeEnd = (ch == "*"); - } - return ret("comment", "comment"); - } - - // Parser - - var atomicTypes = {"atom": true, "number": true, "variable": true, "string": true, "regexp": true}; - - function JSLexical(indented, column, type, align, prev, info) { - this.indented = indented; - this.column = column; - this.type = type; - this.prev = prev; - this.info = info; - if (align != null) this.align = align; - } - - function inScope(state, varname) { - for (var v = state.localVars; v; v = v.next) - if (v.name == varname) return true; - } - - function parseJS(state, style, type, content, stream) { - var cc = state.cc; - // Communicate our context to the combinators. - // (Less wasteful than consing up a hundred closures on every call.) - cx.state = state; cx.stream = stream; cx.marked = null, cx.cc = cc; - - if (!state.lexical.hasOwnProperty("align")) - state.lexical.align = true; - - while(true) { - var combinator = cc.length ? cc.pop() : jsonMode ? expression : statement; - if (combinator(type, content)) { - while(cc.length && cc[cc.length - 1].lex) - cc.pop()(); - if (cx.marked) return cx.marked; - if (type == "variable" && inScope(state, content)) return "variable-2"; - return style; - } - } - } - - // Combinator utils - - var cx = {state: null, column: null, marked: null, cc: null}; - function pass() { - for (var i = arguments.length - 1; i >= 0; i--) cx.cc.push(arguments[i]); - } - function cont() { - pass.apply(null, arguments); - return true; - } - function register(varname) { - var state = cx.state; - if (state.context) { - cx.marked = "def"; - for (var v = state.localVars; v; v = v.next) - if (v.name == varname) return; - state.localVars = {name: varname, next: state.localVars}; - } - } - - // Combinators - - var defaultVars = {name: "this", next: {name: "arguments"}}; - function pushcontext() { - if (!cx.state.context) cx.state.localVars = defaultVars; - cx.state.context = {prev: cx.state.context, vars: cx.state.localVars}; - } - function popcontext() { - cx.state.localVars = cx.state.context.vars; - cx.state.context = cx.state.context.prev; - } - function pushlex(type, info) { - var result = function() { - var state = cx.state; - state.lexical = new JSLexical(state.indented, cx.stream.column(), type, null, state.lexical, info) - }; - result.lex = true; - return result; - } - function poplex() { - var state = cx.state; - if (state.lexical.prev) { - if (state.lexical.type == ")") - state.indented = state.lexical.indented; - state.lexical = state.lexical.prev; - } - } - poplex.lex = true; - - function expect(wanted) { - return function expecting(type) { - if (type == wanted) return cont(); - else if (wanted == ";") return pass(); - else return cont(arguments.callee); - }; - } - - function statement(type) { - if (type == "var") return cont(pushlex("vardef"), vardef1, expect(";"), poplex); - if (type == "keyword a") return cont(pushlex("form"), expression, statement, poplex); - if (type == "keyword b") return cont(pushlex("form"), statement, poplex); - if (type == "{") return cont(pushlex("}"), block, poplex); - if (type == ";") return cont(); - if (type == "function") return cont(functiondef); - if (type == "for") return cont(pushlex("form"), expect("("), pushlex(")"), forspec1, expect(")"), - poplex, statement, poplex); - if (type == "variable") return cont(pushlex("stat"), maybelabel); - if (type == "switch") return cont(pushlex("form"), expression, pushlex("}", "switch"), expect("{"), - block, poplex, poplex); - if (type == "case") return cont(expression, expect(":")); - if (type == "default") return cont(expect(":")); - if (type == "catch") return cont(pushlex("form"), pushcontext, expect("("), funarg, expect(")"), - statement, poplex, popcontext); - return pass(pushlex("stat"), expression, expect(";"), poplex); - } - function expression(type) { - if (atomicTypes.hasOwnProperty(type)) return cont(maybeoperator); - if (type == "function") return cont(functiondef); - if (type == "keyword c") return cont(maybeexpression); - if (type == "(") return cont(pushlex(")"), maybeexpression, expect(")"), poplex, maybeoperator); - if (type == "operator") return cont(expression); - if (type == "[") return cont(pushlex("]"), commasep(expression, "]"), poplex, maybeoperator); - if (type == "{") return cont(pushlex("}"), commasep(objprop, "}"), poplex, maybeoperator); - return cont(); - } - function maybeexpression(type) { - if (type.match(/[;\}\)\],]/)) return pass(); - return pass(expression); - } - - function maybeoperator(type, value) { - if (type == "operator" && /\+\+|--/.test(value)) return cont(maybeoperator); - if (type == "operator") return cont(expression); - if (type == ";") return; - if (type == "(") return cont(pushlex(")"), commasep(expression, ")"), poplex, maybeoperator); - if (type == ".") return cont(property, maybeoperator); - if (type == "[") return cont(pushlex("]"), expression, expect("]"), poplex, maybeoperator); - } - function maybelabel(type) { - if (type == ":") return cont(poplex, statement); - return pass(maybeoperator, expect(";"), poplex); - } - function property(type) { - if (type == "variable") {cx.marked = "property"; return cont();} - } - function objprop(type) { - if (type == "variable") cx.marked = "property"; - if (atomicTypes.hasOwnProperty(type)) return cont(expect(":"), expression); - } - function commasep(what, end) { - function proceed(type) { - if (type == ",") return cont(what, proceed); - if (type == end) return cont(); - return cont(expect(end)); - } - return function commaSeparated(type) { - if (type == end) return cont(); - else return pass(what, proceed); - }; - } - function block(type) { - if (type == "}") return cont(); - return pass(statement, block); - } - function vardef1(type, value) { - if (type == "variable"){register(value); return cont(vardef2);} - return cont(); - } - function vardef2(type, value) { - if (value == "=") return cont(expression, vardef2); - if (type == ",") return cont(vardef1); - } - function forspec1(type) { - if (type == "var") return cont(vardef1, forspec2); - if (type == ";") return pass(forspec2); - if (type == "variable") return cont(formaybein); - return pass(forspec2); - } - function formaybein(type, value) { - if (value == "in") return cont(expression); - return cont(maybeoperator, forspec2); - } - function forspec2(type, value) { - if (type == ";") return cont(forspec3); - if (value == "in") return cont(expression); - return cont(expression, expect(";"), forspec3); - } - function forspec3(type) { - if (type != ")") cont(expression); - } - function functiondef(type, value) { - if (type == "variable") {register(value); return cont(functiondef);} - if (type == "(") return cont(pushlex(")"), pushcontext, commasep(funarg, ")"), poplex, statement, popcontext); - } - function funarg(type, value) { - if (type == "variable") {register(value); return cont();} - } - - // Interface - - return { - startState: function(basecolumn) { - return { - tokenize: jsTokenBase, - reAllowed: true, - kwAllowed: true, - cc: [], - lexical: new JSLexical((basecolumn || 0) - indentUnit, 0, "block", false), - localVars: parserConfig.localVars, - context: parserConfig.localVars && {vars: parserConfig.localVars}, - indented: 0 - }; - }, - - token: function(stream, state) { - if (stream.sol()) { - if (!state.lexical.hasOwnProperty("align")) - state.lexical.align = false; - state.indented = stream.indentation(); - } - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - if (type == "comment") return style; - state.reAllowed = !!(type == "operator" || type == "keyword c" || type.match(/^[\[{}\(,;:]$/)); - state.kwAllowed = type != '.'; - return parseJS(state, style, type, content, stream); - }, - - indent: function(state, textAfter) { - if (state.tokenize != jsTokenBase) return 0; - var firstChar = textAfter && textAfter.charAt(0), lexical = state.lexical, - type = lexical.type, closing = firstChar == type; - if (type == "vardef") return lexical.indented + 4; - else if (type == "form" && firstChar == "{") return lexical.indented; - else if (type == "stat" || type == "form") return lexical.indented + indentUnit; - else if (lexical.info == "switch" && !closing) - return lexical.indented + (/^(?:case|default)\b/.test(textAfter) ? indentUnit : 2 * indentUnit); - else if (lexical.align) return lexical.column + (closing ? 0 : 1); - else return lexical.indented + (closing ? 0 : indentUnit); - }, - - electricChars: ":{}" - }; -}); - -CodeMirror.defineMIME("text/javascript", "javascript"); -CodeMirror.defineMIME("application/json", {name: "javascript", json: true}); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/jinja2/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/jinja2/index.html deleted file mode 100644 index 021a282..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/jinja2/index.html +++ /dev/null @@ -1,37 +0,0 @@ - - - - CodeMirror: Jinja2 mode - - - - - - - -

CodeMirror: Jinja2 mode

-
- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/jinja2/jinja2.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/jinja2/jinja2.js deleted file mode 100644 index 7c14dc9..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/jinja2/jinja2.js +++ /dev/null @@ -1,51 +0,0 @@ -CodeMirror.defineMode("jinja2", function(config, parserConf) { - var keywords = ["block", "endblock", "for", "endfor", "in", "true", "false", - "loop", "none", "self", "super", "if", "as", "not", "and", - "else", "import", "with", "without", "context", 'extends', 'macro', 'endmacro']; - - var blocks = ['block', 'for', 'if', 'import', 'with', 'macro', 'raw', 'call', 'filter']; - // TODO raw should stop highlighting until the endraw - var standalone = ['extends', 'import', 'else', 'elif', 'from', 'include', 'set']; - var special = ['in', 'is', 'true', 'false', 'loop', 'none', 'self', 'super', - 'as', 'not', 'and', 'if', 'else', 'without', 'with', - 'context', 'ignore', 'missing', 'import']; - // TODO list builtin filters and tests - - keywords = new RegExp("^((" + keywords.join(")|(") + "))\\b"); - - function tokenBase (stream, state) { - var ch = stream.next(); - if (ch == "{") { - if (ch = stream.eat(/\{|%|#/)) { - stream.eat("-"); - state.tokenize = inTag(ch); - return "tag"; - } - } - } - function inTag (close) { - if (close == "{") { - close = "}"; - } - return function (stream, state) { - var ch = stream.next(); - if ((ch == close || (ch == "-" && stream.eat(close))) - && stream.eat("}")) { - state.tokenize = tokenBase; - return "tag"; - } - if (stream.match(keywords)) { - return "keyword"; - } - return close == "#" ? "comment" : "string"; - }; - } - return { - startState: function () { - return {tokenize: tokenBase}; - }, - token: function (stream, state) { - return state.tokenize(stream, state); - } - }; -}); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/less/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/less/index.html deleted file mode 100644 index f770520..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/less/index.html +++ /dev/null @@ -1,603 +0,0 @@ - - - - CodeMirror: LESS mode - - - - - - - - - -

CodeMirror: LESS mode

-
- - -

MIME types defined: text/less.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/less/less.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/less/less.js deleted file mode 100644 index 1c20bd8..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/less/less.js +++ /dev/null @@ -1,218 +0,0 @@ -/* -LESS mode - http://www.lesscss.org/ -Ported to CodeMirror by Peter Kroon -*/ - -CodeMirror.defineMode("css", function(config) { - var indentUnit = config.indentUnit, type; - function ret(style, tp) {type = tp; return style;} - //html5 tags - var tags = ["a","abbr","acronym","address","applet","area","article","aside","audio","b","base","basefont","bdi","bdo","big","blockquote","body","br","button","canvas","caption","cite","code","col","colgroup","command","datalist","dd","del","details","dfn","dir","div","dl","dt","em","embed","fieldset","figcaption","figure","font","footer","form","frame","frameset","h1","h2","h3","h4","h5","h6","head","header","hgroup","hr","html","i","iframe","img","input","ins","keygen","kbd","label","legend","li","link","map","mark","menu","meta","meter","nav","noframes","noscript","object","ol","optgroup","option","output","p","param","pre","progress","q","rp","rt","ruby","s","samp","script","section","select","small","source","span","strike","strong","style","sub","summary","sup","table","tbody","td","textarea","tfoot","th","thead","time","title","tr","track","tt","u","ul","var","video","wbr"]; - - function inTagsArray(val){ - for(var i=0; i*\/]/.test(ch)) {//removed . dot character original was [,.+>*\/] - return ret(null, "select-op"); - } - else if (/[;{}:\[\]()]/.test(ch)) { //added () char for lesscss original was [;{}:\[\]] - if(ch == ":"){ - stream.eatWhile(/[active|hover|link|visited]/); - if( stream.current().match(/active|hover|link|visited/)){ - return ret("tag", "tag"); - }else{ - return ret(null, ch); - } - }else{ - return ret(null, ch); - } - } - else if (ch == ".") { // lesscss - stream.eatWhile(/[\a-zA-Z0-9\-_]/); - return ret("tag", "tag"); - } - else if (ch == "#") { // lesscss - //we don't eat white-space, we want the hex color and or id only - stream.eatWhile(/[A-Za-z0-9]/); - //check if there is a proper hex color length e.g. #eee || #eeeEEE - if(stream.current().length ===4 || stream.current().length ===7){ - if(stream.current().match(/[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3}/,false) != null){//is there a valid hex color value present in the current stream - //when not a valid hex value, parse as id - if(stream.current().substring(1) != stream.current().match(/[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3}/,false))return ret("atom", "tag"); - //eat white-space - stream.eatSpace(); - //when hex value declaration doesn't end with [;,] but is does with a slash/cc comment treat it as an id, just like the other hex values that don't end with[;,] - if( /[\/<>.(){!$%^&*_\-\\?=+\|#'~`]/.test(stream.peek()) )return ret("atom", "tag"); - //#time { color: #aaa } - else if(stream.peek() == "}" )return ret("number", "unit"); - //we have a valid hex color value, parse as id whenever an element/class is defined after the hex(id) value e.g. #eee aaa || #eee .aaa - else if( /[a-zA-Z\\]/.test(stream.peek()) )return ret("atom", "tag"); - //when a hex value is on the end of a line, parse as id - else if(stream.eol())return ret("atom", "tag"); - //default - else return ret("number", "unit"); - }else{//when not a valid hexvalue in the current stream e.g. #footer - stream.eatWhile(/[\w\\\-]/); - return ret("atom", "tag"); - } - }else{ - stream.eatWhile(/[\w\\\-]/); - return ret("atom", "tag"); - } - } - else if (ch == "&") { - stream.eatWhile(/[\w\-]/); - return ret(null, ch); - } - else if (ch == "&") { - stream.eatWhile(/[\w\-]/); - return ret(null, ch); - } - else { - stream.eatWhile(/[\w\\\-_.%]/); - if( stream.peek().match(/\(/) != null ){// lesscss - stream.eatWhile(/[a-zA-Z\s]/); - if(stream.peek() == "(")return ret(null, ch); - }else if( stream.current().match(/\-\d|\-.\d/) ){ // lesscss match e.g.: -5px -0.4 etc... - return ret("number", "unit"); - }else if( inTagsArray(stream.current()) ){ // lesscss match html tags - return ret("tag", "tag"); - }else if( /\/|\)/.test(stream.peek() || stream.eol() || (stream.eatSpace() && stream.peek() == ")")) && stream.current().indexOf(".") !== -1){ - return ret("string", "string");//let url(/images/logo.png) without quotes return as string - }else{ - return ret("variable", "variable"); - } - } - - } - - function tokenSComment(stream, state) {// SComment = Slash comment - stream.skipToEnd(); - state.tokenize = tokenBase; - return ret("comment", "comment"); - } - - function tokenCComment(stream, state) { - var maybeEnd = false, ch; - while ((ch = stream.next()) != null) { - if (maybeEnd && ch == "/") { - state.tokenize = tokenBase; - break; - } - maybeEnd = (ch == "*"); - } - return ret("comment", "comment"); - } - - function tokenSGMLComment(stream, state) { - var dashes = 0, ch; - while ((ch = stream.next()) != null) { - if (dashes >= 2 && ch == ">") { - state.tokenize = tokenBase; - break; - } - dashes = (ch == "-") ? dashes + 1 : 0; - } - return ret("comment", "comment"); - } - - function tokenString(quote) { - return function(stream, state) { - var escaped = false, ch; - while ((ch = stream.next()) != null) { - if (ch == quote && !escaped) - break; - escaped = !escaped && ch == "\\"; - } - if (!escaped) state.tokenize = tokenBase; - return ret("string", "string"); - }; - } - - return { - startState: function(base) { - return {tokenize: tokenBase, - baseIndent: base || 0, - stack: []}; - }, - - token: function(stream, state) { - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - - var context = state.stack[state.stack.length-1]; - if (type == "hash" && context == "rule") style = "atom"; - else if (style == "variable") { - if (context == "rule") style = null; //"tag" - else if (!context || context == "@media{"){ - style = stream.current() == "when" ? "variable" : - stream.string.match(/#/g) != undefined ? null : - /[\s,|\s\)]/.test(stream.peek()) ? "tag" : null; - } - } - - if (context == "rule" && /^[\{\};]$/.test(type)) - state.stack.pop(); - if (type == "{") { - if (context == "@media") state.stack[state.stack.length-1] = "@media{"; - else state.stack.push("{"); - } - else if (type == "}") state.stack.pop(); - else if (type == "@media") state.stack.push("@media"); - else if (context == "{" && type != "comment") state.stack.push("rule"); - return style; - }, - - indent: function(state, textAfter) { - var n = state.stack.length; - if (/^\}/.test(textAfter)) - n -= state.stack[state.stack.length-1] == "rule" ? 2 : 1; - return state.baseIndent + n * indentUnit; - }, - - electricChars: "}" - }; -}); - -CodeMirror.defineMIME("text/css", "css"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/lua/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/lua/index.html deleted file mode 100644 index 600ddb0..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/lua/index.html +++ /dev/null @@ -1,72 +0,0 @@ - - - - CodeMirror: Lua mode - - - - - - - - -

CodeMirror: Lua mode

-
- - -

Loosely based on Franciszek - Wawrzak's CodeMirror - 1 mode. One configuration parameter is - supported, specials, to which you can provide an - array of strings to have those identifiers highlighted with - the lua-special style.

-

MIME types defined: text/x-lua.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/lua/lua.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/lua/lua.js deleted file mode 100644 index 60e84a9..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/lua/lua.js +++ /dev/null @@ -1,140 +0,0 @@ -// LUA mode. Ported to CodeMirror 2 from Franciszek Wawrzak's -// CodeMirror 1 mode. -// highlights keywords, strings, comments (no leveling supported! ("[==[")), tokens, basic indenting - -CodeMirror.defineMode("lua", function(config, parserConfig) { - var indentUnit = config.indentUnit; - - function prefixRE(words) { - return new RegExp("^(?:" + words.join("|") + ")", "i"); - } - function wordRE(words) { - return new RegExp("^(?:" + words.join("|") + ")$", "i"); - } - var specials = wordRE(parserConfig.specials || []); - - // long list of standard functions from lua manual - var builtins = wordRE([ - "_G","_VERSION","assert","collectgarbage","dofile","error","getfenv","getmetatable","ipairs","load", - "loadfile","loadstring","module","next","pairs","pcall","print","rawequal","rawget","rawset","require", - "select","setfenv","setmetatable","tonumber","tostring","type","unpack","xpcall", - - "coroutine.create","coroutine.resume","coroutine.running","coroutine.status","coroutine.wrap","coroutine.yield", - - "debug.debug","debug.getfenv","debug.gethook","debug.getinfo","debug.getlocal","debug.getmetatable", - "debug.getregistry","debug.getupvalue","debug.setfenv","debug.sethook","debug.setlocal","debug.setmetatable", - "debug.setupvalue","debug.traceback", - - "close","flush","lines","read","seek","setvbuf","write", - - "io.close","io.flush","io.input","io.lines","io.open","io.output","io.popen","io.read","io.stderr","io.stdin", - "io.stdout","io.tmpfile","io.type","io.write", - - "math.abs","math.acos","math.asin","math.atan","math.atan2","math.ceil","math.cos","math.cosh","math.deg", - "math.exp","math.floor","math.fmod","math.frexp","math.huge","math.ldexp","math.log","math.log10","math.max", - "math.min","math.modf","math.pi","math.pow","math.rad","math.random","math.randomseed","math.sin","math.sinh", - "math.sqrt","math.tan","math.tanh", - - "os.clock","os.date","os.difftime","os.execute","os.exit","os.getenv","os.remove","os.rename","os.setlocale", - "os.time","os.tmpname", - - "package.cpath","package.loaded","package.loaders","package.loadlib","package.path","package.preload", - "package.seeall", - - "string.byte","string.char","string.dump","string.find","string.format","string.gmatch","string.gsub", - "string.len","string.lower","string.match","string.rep","string.reverse","string.sub","string.upper", - - "table.concat","table.insert","table.maxn","table.remove","table.sort" - ]); - var keywords = wordRE(["and","break","elseif","false","nil","not","or","return", - "true","function", "end", "if", "then", "else", "do", - "while", "repeat", "until", "for", "in", "local" ]); - - var indentTokens = wordRE(["function", "if","repeat","do", "\\(", "{"]); - var dedentTokens = wordRE(["end", "until", "\\)", "}"]); - var dedentPartial = prefixRE(["end", "until", "\\)", "}", "else", "elseif"]); - - function readBracket(stream) { - var level = 0; - while (stream.eat("=")) ++level; - stream.eat("["); - return level; - } - - function normal(stream, state) { - var ch = stream.next(); - if (ch == "-" && stream.eat("-")) { - if (stream.eat("[")) - return (state.cur = bracketed(readBracket(stream), "comment"))(stream, state); - stream.skipToEnd(); - return "comment"; - } - if (ch == "\"" || ch == "'") - return (state.cur = string(ch))(stream, state); - if (ch == "[" && /[\[=]/.test(stream.peek())) - return (state.cur = bracketed(readBracket(stream), "string"))(stream, state); - if (/\d/.test(ch)) { - stream.eatWhile(/[\w.%]/); - return "number"; - } - if (/[\w_]/.test(ch)) { - stream.eatWhile(/[\w\\\-_.]/); - return "variable"; - } - return null; - } - - function bracketed(level, style) { - return function(stream, state) { - var curlev = null, ch; - while ((ch = stream.next()) != null) { - if (curlev == null) {if (ch == "]") curlev = 0;} - else if (ch == "=") ++curlev; - else if (ch == "]" && curlev == level) { state.cur = normal; break; } - else curlev = null; - } - return style; - }; - } - - function string(quote) { - return function(stream, state) { - var escaped = false, ch; - while ((ch = stream.next()) != null) { - if (ch == quote && !escaped) break; - escaped = !escaped && ch == "\\"; - } - if (!escaped) state.cur = normal; - return "string"; - }; - } - - return { - startState: function(basecol) { - return {basecol: basecol || 0, indentDepth: 0, cur: normal}; - }, - - token: function(stream, state) { - if (stream.eatSpace()) return null; - var style = state.cur(stream, state); - var word = stream.current(); - if (style == "variable") { - if (keywords.test(word)) style = "keyword"; - else if (builtins.test(word)) style = "builtin"; - else if (specials.test(word)) style = "variable-2"; - } - if ((style != "comment") && (style != "string")){ - if (indentTokens.test(word)) ++state.indentDepth; - else if (dedentTokens.test(word)) --state.indentDepth; - } - return style; - }, - - indent: function(state, textAfter) { - var closing = dedentPartial.test(textAfter); - return state.basecol + indentUnit * (state.indentDepth - (closing ? 1 : 0)); - } - }; -}); - -CodeMirror.defineMIME("text/x-lua", "lua"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/markdown/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/markdown/index.html deleted file mode 100644 index cae8df4..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/markdown/index.html +++ /dev/null @@ -1,338 +0,0 @@ - - - - CodeMirror: Markdown mode - - - - - - - - -

CodeMirror: Markdown mode

- - -
- - - -

MIME types defined: text/x-markdown.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/markdown/markdown.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/markdown/markdown.js deleted file mode 100644 index af95753..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/markdown/markdown.js +++ /dev/null @@ -1,245 +0,0 @@ -CodeMirror.defineMode("markdown", function(cmCfg, modeCfg) { - - var htmlMode = CodeMirror.getMode(cmCfg, { name: 'xml', htmlMode: true }); - - var header = 'header' - , code = 'comment' - , quote = 'quote' - , list = 'string' - , hr = 'hr' - , linktext = 'link' - , linkhref = 'string' - , em = 'em' - , strong = 'strong' - , emstrong = 'emstrong'; - - var hrRE = /^([*\-=_])(?:\s*\1){2,}\s*$/ - , ulRE = /^[*\-+]\s+/ - , olRE = /^[0-9]+\.\s+/ - , headerRE = /^(?:\={3,}|-{3,})$/ - , textRE = /^[^\[*_\\<>`]+/; - - function switchInline(stream, state, f) { - state.f = state.inline = f; - return f(stream, state); - } - - function switchBlock(stream, state, f) { - state.f = state.block = f; - return f(stream, state); - } - - - // Blocks - - function blankLine(state) { - // Reset EM state - state.em = false; - // Reset STRONG state - state.strong = false; - return null; - } - - function blockNormal(stream, state) { - var match; - if (state.indentationDiff >= 4) { - state.indentation -= state.indentationDiff; - stream.skipToEnd(); - return code; - } else if (stream.eatSpace()) { - return null; - } else if (stream.peek() === '#' || stream.match(headerRE)) { - state.header = true; - } else if (stream.eat('>')) { - state.indentation++; - state.quote = true; - } else if (stream.peek() === '[') { - return switchInline(stream, state, footnoteLink); - } else if (stream.match(hrRE, true)) { - return hr; - } else if (match = stream.match(ulRE, true) || stream.match(olRE, true)) { - state.indentation += match[0].length; - return list; - } - - return switchInline(stream, state, state.inline); - } - - function htmlBlock(stream, state) { - var style = htmlMode.token(stream, state.htmlState); - if (style === 'tag' && state.htmlState.type !== 'openTag' && !state.htmlState.context) { - state.f = inlineNormal; - state.block = blockNormal; - } - return style; - } - - - // Inline - function getType(state) { - var styles = []; - - if (state.strong) { styles.push(state.em ? emstrong : strong); } - else if (state.em) { styles.push(em); } - - if (state.header) { styles.push(header); } - if (state.quote) { styles.push(quote); } - - return styles.length ? styles.join(' ') : null; - } - - function handleText(stream, state) { - if (stream.match(textRE, true)) { - return getType(state); - } - return undefined; - } - - function inlineNormal(stream, state) { - var style = state.text(stream, state) - if (typeof style !== 'undefined') - return style; - - var ch = stream.next(); - - if (ch === '\\') { - stream.next(); - return getType(state); - } - if (ch === '`') { - return switchInline(stream, state, inlineElement(code, '`')); - } - if (ch === '[') { - return switchInline(stream, state, linkText); - } - if (ch === '<' && stream.match(/^\w/, false)) { - stream.backUp(1); - return switchBlock(stream, state, htmlBlock); - } - - var t = getType(state); - if (ch === '*' || ch === '_') { - if (stream.eat(ch)) { - return (state.strong = !state.strong) ? getType(state) : t; - } - return (state.em = !state.em) ? getType(state) : t; - } - - return getType(state); - } - - function linkText(stream, state) { - while (!stream.eol()) { - var ch = stream.next(); - if (ch === '\\') stream.next(); - if (ch === ']') { - state.inline = state.f = linkHref; - return linktext; - } - } - return linktext; - } - - function linkHref(stream, state) { - stream.eatSpace(); - var ch = stream.next(); - if (ch === '(' || ch === '[') { - return switchInline(stream, state, inlineElement(linkhref, ch === '(' ? ')' : ']')); - } - return 'error'; - } - - function footnoteLink(stream, state) { - if (stream.match(/^[^\]]*\]:/, true)) { - state.f = footnoteUrl; - return linktext; - } - return switchInline(stream, state, inlineNormal); - } - - function footnoteUrl(stream, state) { - stream.eatSpace(); - stream.match(/^[^\s]+/, true); - state.f = state.inline = inlineNormal; - return linkhref; - } - - function inlineRE(endChar) { - if (!inlineRE[endChar]) { - // match any not-escaped-non-endChar and any escaped char - // then match endChar or eol - inlineRE[endChar] = new RegExp('^(?:[^\\\\\\' + endChar + ']|\\\\.)*(?:\\' + endChar + '|$)'); - } - return inlineRE[endChar]; - } - - function inlineElement(type, endChar, next) { - next = next || inlineNormal; - return function(stream, state) { - stream.match(inlineRE(endChar)); - state.inline = state.f = next; - return type; - }; - } - - return { - startState: function() { - return { - f: blockNormal, - - block: blockNormal, - htmlState: htmlMode.startState(), - indentation: 0, - - inline: inlineNormal, - text: handleText, - em: false, - strong: false, - header: false, - quote: false - }; - }, - - copyState: function(s) { - return { - f: s.f, - - block: s.block, - htmlState: CodeMirror.copyState(htmlMode, s.htmlState), - indentation: s.indentation, - - inline: s.inline, - text: s.text, - em: s.em, - strong: s.strong, - header: s.header, - quote: s.quote - }; - }, - - token: function(stream, state) { - if (stream.sol()) { - if (stream.match(/^\s*$/, true)) { return blankLine(state); } - - // Reset state.header - state.header = false; - // Reset state.quote - state.quote = false; - - state.f = state.block; - var indentation = stream.match(/^\s*/, true)[0].replace(/\t/g, ' ').length; - state.indentationDiff = indentation - state.indentation; - state.indentation = indentation; - if (indentation > 0) { return null; } - } - return state.f(stream, state); - }, - - blankLine: blankLine, - - getType: getType - }; - -}); - -CodeMirror.defineMIME("text/x-markdown", "markdown"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/mysql/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/mysql/index.html deleted file mode 100644 index 006918c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/mysql/index.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - CodeMirror: MySQL mode - - - - - - - -

CodeMirror: MySQL mode

-
- - -

MIME types defined: text/x-mysql.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/mysql/mysql.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/mysql/mysql.js deleted file mode 100644 index dca5b0f..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/mysql/mysql.js +++ /dev/null @@ -1,188 +0,0 @@ -/* - * MySQL Mode for CodeMirror 2 by MySQL-Tools - * @author James Thorne (partydroid) - * @link http://github.com/partydroid/MySQL-Tools - * @link http://mysqltools.org - * @version 02/Jan/2012 -*/ -CodeMirror.defineMode("mysql", function(config) { - var indentUnit = config.indentUnit; - var curPunc; - - function wordRegexp(words) { - return new RegExp("^(?:" + words.join("|") + ")$", "i"); - } - var ops = wordRegexp(["str", "lang", "langmatches", "datatype", "bound", "sameterm", "isiri", "isuri", - "isblank", "isliteral", "union", "a"]); - var keywords = wordRegexp([ - ('ACCESSIBLE'),('ALTER'),('AS'),('BEFORE'),('BINARY'),('BY'),('CASE'),('CHARACTER'),('COLUMN'),('CONTINUE'),('CROSS'),('CURRENT_TIMESTAMP'),('DATABASE'),('DAY_MICROSECOND'),('DEC'),('DEFAULT'), - ('DESC'),('DISTINCT'),('DOUBLE'),('EACH'),('ENCLOSED'),('EXIT'),('FETCH'),('FLOAT8'),('FOREIGN'),('GRANT'),('HIGH_PRIORITY'),('HOUR_SECOND'),('IN'),('INNER'),('INSERT'),('INT2'),('INT8'), - ('INTO'),('JOIN'),('KILL'),('LEFT'),('LINEAR'),('LOCALTIME'),('LONG'),('LOOP'),('MATCH'),('MEDIUMTEXT'),('MINUTE_SECOND'),('NATURAL'),('NULL'),('OPTIMIZE'),('OR'),('OUTER'),('PRIMARY'), - ('RANGE'),('READ_WRITE'),('REGEXP'),('REPEAT'),('RESTRICT'),('RIGHT'),('SCHEMAS'),('SENSITIVE'),('SHOW'),('SPECIFIC'),('SQLSTATE'),('SQL_CALC_FOUND_ROWS'),('STARTING'),('TERMINATED'), - ('TINYINT'),('TRAILING'),('UNDO'),('UNLOCK'),('USAGE'),('UTC_DATE'),('VALUES'),('VARCHARACTER'),('WHERE'),('WRITE'),('ZEROFILL'),('ALL'),('AND'),('ASENSITIVE'),('BIGINT'),('BOTH'),('CASCADE'), - ('CHAR'),('COLLATE'),('CONSTRAINT'),('CREATE'),('CURRENT_TIME'),('CURSOR'),('DAY_HOUR'),('DAY_SECOND'),('DECLARE'),('DELETE'),('DETERMINISTIC'),('DIV'),('DUAL'),('ELSEIF'),('EXISTS'),('FALSE'), - ('FLOAT4'),('FORCE'),('FULLTEXT'),('HAVING'),('HOUR_MINUTE'),('IGNORE'),('INFILE'),('INSENSITIVE'),('INT1'),('INT4'),('INTERVAL'),('ITERATE'),('KEYS'),('LEAVE'),('LIMIT'),('LOAD'),('LOCK'), - ('LONGTEXT'),('MASTER_SSL_VERIFY_SERVER_CERT'),('MEDIUMINT'),('MINUTE_MICROSECOND'),('MODIFIES'),('NO_WRITE_TO_BINLOG'),('ON'),('OPTIONALLY'),('OUT'),('PRECISION'),('PURGE'),('READS'), - ('REFERENCES'),('RENAME'),('REQUIRE'),('REVOKE'),('SCHEMA'),('SELECT'),('SET'),('SPATIAL'),('SQLEXCEPTION'),('SQL_BIG_RESULT'),('SSL'),('TABLE'),('TINYBLOB'),('TO'),('TRUE'),('UNIQUE'), - ('UPDATE'),('USING'),('UTC_TIMESTAMP'),('VARCHAR'),('WHEN'),('WITH'),('YEAR_MONTH'),('ADD'),('ANALYZE'),('ASC'),('BETWEEN'),('BLOB'),('CALL'),('CHANGE'),('CHECK'),('CONDITION'),('CONVERT'), - ('CURRENT_DATE'),('CURRENT_USER'),('DATABASES'),('DAY_MINUTE'),('DECIMAL'),('DELAYED'),('DESCRIBE'),('DISTINCTROW'),('DROP'),('ELSE'),('ESCAPED'),('EXPLAIN'),('FLOAT'),('FOR'),('FROM'), - ('GROUP'),('HOUR_MICROSECOND'),('IF'),('INDEX'),('INOUT'),('INT'),('INT3'),('INTEGER'),('IS'),('KEY'),('LEADING'),('LIKE'),('LINES'),('LOCALTIMESTAMP'),('LONGBLOB'),('LOW_PRIORITY'), - ('MEDIUMBLOB'),('MIDDLEINT'),('MOD'),('NOT'),('NUMERIC'),('OPTION'),('ORDER'),('OUTFILE'),('PROCEDURE'),('READ'),('REAL'),('RELEASE'),('REPLACE'),('RETURN'),('RLIKE'),('SECOND_MICROSECOND'), - ('SEPARATOR'),('SMALLINT'),('SQL'),('SQLWARNING'),('SQL_SMALL_RESULT'),('STRAIGHT_JOIN'),('THEN'),('TINYTEXT'),('TRIGGER'),('UNION'),('UNSIGNED'),('USE'),('UTC_TIME'),('VARBINARY'),('VARYING'), - ('WHILE'),('XOR'),('FULL'),('COLUMNS'),('MIN'),('MAX'),('STDEV'),('COUNT') - ]); - var operatorChars = /[*+\-<>=&|]/; - - function tokenBase(stream, state) { - var ch = stream.next(); - curPunc = null; - if (ch == "$" || ch == "?") { - stream.match(/^[\w\d]*/); - return "variable-2"; - } - else if (ch == "<" && !stream.match(/^[\s\u00a0=]/, false)) { - stream.match(/^[^\s\u00a0>]*>?/); - return "atom"; - } - else if (ch == "\"" || ch == "'") { - state.tokenize = tokenLiteral(ch); - return state.tokenize(stream, state); - } - else if (ch == "`") { - state.tokenize = tokenOpLiteral(ch); - return state.tokenize(stream, state); - } - else if (/[{}\(\),\.;\[\]]/.test(ch)) { - curPunc = ch; - return null; - } - else if (ch == "-") { - ch2 = stream.next(); - if(ch2=="-") - { - stream.skipToEnd(); - return "comment"; - } - - } - else if (operatorChars.test(ch)) { - stream.eatWhile(operatorChars); - return null; - } - else if (ch == ":") { - stream.eatWhile(/[\w\d\._\-]/); - return "atom"; - } - else { - stream.eatWhile(/[_\w\d]/); - if (stream.eat(":")) { - stream.eatWhile(/[\w\d_\-]/); - return "atom"; - } - var word = stream.current(), type; - if (ops.test(word)) - return null; - else if (keywords.test(word)) - return "keyword"; - else - return "variable"; - } - } - - function tokenLiteral(quote) { - return function(stream, state) { - var escaped = false, ch; - while ((ch = stream.next()) != null) { - if (ch == quote && !escaped) { - state.tokenize = tokenBase; - break; - } - escaped = !escaped && ch == "\\"; - } - return "string"; - }; - } - - function tokenOpLiteral(quote) { - return function(stream, state) { - var escaped = false, ch; - while ((ch = stream.next()) != null) { - if (ch == quote && !escaped) { - state.tokenize = tokenBase; - break; - } - escaped = !escaped && ch == "\\"; - } - return "variable-2"; - }; - } - - - function pushContext(state, type, col) { - state.context = {prev: state.context, indent: state.indent, col: col, type: type}; - } - function popContext(state) { - state.indent = state.context.indent; - state.context = state.context.prev; - } - - return { - startState: function(base) { - return {tokenize: tokenBase, - context: null, - indent: 0, - col: 0}; - }, - - token: function(stream, state) { - if (stream.sol()) { - if (state.context && state.context.align == null) state.context.align = false; - state.indent = stream.indentation(); - } - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - - if (style != "comment" && state.context && state.context.align == null && state.context.type != "pattern") { - state.context.align = true; - } - - if (curPunc == "(") pushContext(state, ")", stream.column()); - else if (curPunc == "[") pushContext(state, "]", stream.column()); - else if (curPunc == "{") pushContext(state, "}", stream.column()); - else if (/[\]\}\)]/.test(curPunc)) { - while (state.context && state.context.type == "pattern") popContext(state); - if (state.context && curPunc == state.context.type) popContext(state); - } - else if (curPunc == "." && state.context && state.context.type == "pattern") popContext(state); - else if (/atom|string|variable/.test(style) && state.context) { - if (/[\}\]]/.test(state.context.type)) - pushContext(state, "pattern", stream.column()); - else if (state.context.type == "pattern" && !state.context.align) { - state.context.align = true; - state.context.col = stream.column(); - } - } - - return style; - }, - - indent: function(state, textAfter) { - var firstChar = textAfter && textAfter.charAt(0); - var context = state.context; - if (/[\]\}]/.test(firstChar)) - while (context && context.type == "pattern") context = context.prev; - - var closing = context && firstChar == context.type; - if (!context) - return 0; - else if (context.type == "pattern") - return context.col; - else if (context.align) - return context.col + (closing ? 0 : 1); - else - return context.indent + (closing ? 0 : indentUnit); - } - }; -}); - -CodeMirror.defineMIME("text/x-mysql", "mysql"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/ntriples/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/ntriples/index.html deleted file mode 100644 index 08d33ba..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/ntriples/index.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - CodeMirror: NTriples mode - - - - - - - -

CodeMirror: NTriples mode

-
- - - - -

MIME types defined: text/n-triples.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/ntriples/ntriples.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/ntriples/ntriples.js deleted file mode 100644 index 3b6cb41..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/ntriples/ntriples.js +++ /dev/null @@ -1,172 +0,0 @@ -/********************************************************** -* This script provides syntax highlighting support for -* the Ntriples format. -* Ntriples format specification: -* http://www.w3.org/TR/rdf-testcases/#ntriples -***********************************************************/ - -/* - The following expression defines the defined ASF grammar transitions. - - pre_subject -> - { - ( writing_subject_uri | writing_bnode_uri ) - -> pre_predicate - -> writing_predicate_uri - -> pre_object - -> writing_object_uri | writing_object_bnode | - ( - writing_object_literal - -> writing_literal_lang | writing_literal_type - ) - -> post_object - -> BEGIN - } otherwise { - -> ERROR - } -*/ -CodeMirror.defineMode("ntriples", function() { - - Location = { - PRE_SUBJECT : 0, - WRITING_SUB_URI : 1, - WRITING_BNODE_URI : 2, - PRE_PRED : 3, - WRITING_PRED_URI : 4, - PRE_OBJ : 5, - WRITING_OBJ_URI : 6, - WRITING_OBJ_BNODE : 7, - WRITING_OBJ_LITERAL : 8, - WRITING_LIT_LANG : 9, - WRITING_LIT_TYPE : 10, - POST_OBJ : 11, - ERROR : 12 - }; - function transitState(currState, c) { - var currLocation = currState.location; - var ret; - - // Opening. - if (currLocation == Location.PRE_SUBJECT && c == '<') ret = Location.WRITING_SUB_URI; - else if(currLocation == Location.PRE_SUBJECT && c == '_') ret = Location.WRITING_BNODE_URI; - else if(currLocation == Location.PRE_PRED && c == '<') ret = Location.WRITING_PRED_URI; - else if(currLocation == Location.PRE_OBJ && c == '<') ret = Location.WRITING_OBJ_URI; - else if(currLocation == Location.PRE_OBJ && c == '_') ret = Location.WRITING_OBJ_BNODE; - else if(currLocation == Location.PRE_OBJ && c == '"') ret = Location.WRITING_OBJ_LITERAL; - - // Closing. - else if(currLocation == Location.WRITING_SUB_URI && c == '>') ret = Location.PRE_PRED; - else if(currLocation == Location.WRITING_BNODE_URI && c == ' ') ret = Location.PRE_PRED; - else if(currLocation == Location.WRITING_PRED_URI && c == '>') ret = Location.PRE_OBJ; - else if(currLocation == Location.WRITING_OBJ_URI && c == '>') ret = Location.POST_OBJ; - else if(currLocation == Location.WRITING_OBJ_BNODE && c == ' ') ret = Location.POST_OBJ; - else if(currLocation == Location.WRITING_OBJ_LITERAL && c == '"') ret = Location.POST_OBJ; - else if(currLocation == Location.WRITING_LIT_LANG && c == ' ') ret = Location.POST_OBJ; - else if(currLocation == Location.WRITING_LIT_TYPE && c == '>') ret = Location.POST_OBJ; - - // Closing typed and language literal. - else if(currLocation == Location.WRITING_OBJ_LITERAL && c == '@') ret = Location.WRITING_LIT_LANG; - else if(currLocation == Location.WRITING_OBJ_LITERAL && c == '^') ret = Location.WRITING_LIT_TYPE; - - // Spaces. - else if( c == ' ' && - ( - currLocation == Location.PRE_SUBJECT || - currLocation == Location.PRE_PRED || - currLocation == Location.PRE_OBJ || - currLocation == Location.POST_OBJ - ) - ) ret = currLocation; - - // Reset. - else if(currLocation == Location.POST_OBJ && c == '.') ret = Location.PRE_SUBJECT; - - // Error - else ret = Location.ERROR; - - currState.location=ret; - } - - untilSpace = function(c) { return c != ' '; }; - untilEndURI = function(c) { return c != '>'; }; - return { - startState: function() { - return { - location : Location.PRE_SUBJECT, - uris : [], - anchors : [], - bnodes : [], - langs : [], - types : [] - }; - }, - token: function(stream, state) { - var ch = stream.next(); - if(ch == '<') { - transitState(state, ch); - var parsedURI = ''; - stream.eatWhile( function(c) { if( c != '#' && c != '>' ) { parsedURI += c; return true; } return false;} ); - state.uris.push(parsedURI); - if( stream.match('#', false) ) return 'variable'; - stream.next(); - transitState(state, '>'); - return 'variable'; - } - if(ch == '#') { - var parsedAnchor = ''; - stream.eatWhile(function(c) { if(c != '>' && c != ' ') { parsedAnchor+= c; return true; } return false}); - state.anchors.push(parsedAnchor); - return 'variable-2'; - } - if(ch == '>') { - transitState(state, '>'); - return 'variable'; - } - if(ch == '_') { - transitState(state, ch); - var parsedBNode = ''; - stream.eatWhile(function(c) { if( c != ' ' ) { parsedBNode += c; return true; } return false;}); - state.bnodes.push(parsedBNode); - stream.next(); - transitState(state, ' '); - return 'builtin'; - } - if(ch == '"') { - transitState(state, ch); - stream.eatWhile( function(c) { return c != '"'; } ); - stream.next(); - if( stream.peek() != '@' && stream.peek() != '^' ) { - transitState(state, '"'); - } - return 'string'; - } - if( ch == '@' ) { - transitState(state, '@'); - var parsedLang = ''; - stream.eatWhile(function(c) { if( c != ' ' ) { parsedLang += c; return true; } return false;}); - state.langs.push(parsedLang); - stream.next(); - transitState(state, ' '); - return 'string-2'; - } - if( ch == '^' ) { - stream.next(); - transitState(state, '^'); - var parsedType = ''; - stream.eatWhile(function(c) { if( c != '>' ) { parsedType += c; return true; } return false;} ); - state.types.push(parsedType); - stream.next(); - transitState(state, '>'); - return 'variable'; - } - if( ch == ' ' ) { - transitState(state, ch); - } - if( ch == '.' ) { - transitState(state, ch); - } - } - }; -}); - -CodeMirror.defineMIME("text/n-triples", "ntriples"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/LICENSE b/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/LICENSE deleted file mode 100644 index 8e3747e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2011 souceLair - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/index.html deleted file mode 100644 index 6af6b46..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/index.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - CodeMirror: Pascal mode - - - - - - - -

CodeMirror: Pascal mode

- -
- - - -

MIME types defined: text/x-pascal.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/pascal.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/pascal.js deleted file mode 100644 index 9ac522f..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/pascal/pascal.js +++ /dev/null @@ -1,94 +0,0 @@ -CodeMirror.defineMode("pascal", function(config) { - function words(str) { - var obj = {}, words = str.split(" "); - for (var i = 0; i < words.length; ++i) obj[words[i]] = true; - return obj; - } - var keywords = words("and array begin case const div do downto else end file for forward integer " + - "boolean char function goto if in label mod nil not of or packed procedure " + - "program record repeat set string then to type until var while with"); - var atoms = {"null": true}; - - var isOperatorChar = /[+\-*&%=<>!?|\/]/; - - function tokenBase(stream, state) { - var ch = stream.next(); - if (ch == "#" && state.startOfLine) { - stream.skipToEnd(); - return "meta"; - } - if (ch == '"' || ch == "'") { - state.tokenize = tokenString(ch); - return state.tokenize(stream, state); - } - if (ch == "(" && stream.eat("*")) { - state.tokenize = tokenComment; - return tokenComment(stream, state); - } - if (/[\[\]{}\(\),;\:\.]/.test(ch)) { - return null - } - if (/\d/.test(ch)) { - stream.eatWhile(/[\w\.]/); - return "number"; - } - if (ch == "/") { - if (stream.eat("/")) { - stream.skipToEnd(); - return "comment"; - } - } - if (isOperatorChar.test(ch)) { - stream.eatWhile(isOperatorChar); - return "operator"; - } - stream.eatWhile(/[\w\$_]/); - var cur = stream.current(); - if (keywords.propertyIsEnumerable(cur)) return "keyword"; - if (atoms.propertyIsEnumerable(cur)) return "atom"; - return "word"; - } - - function tokenString(quote) { - return function(stream, state) { - var escaped = false, next, end = false; - while ((next = stream.next()) != null) { - if (next == quote && !escaped) {end = true; break;} - escaped = !escaped && next == "\\"; - } - if (end || !escaped) state.tokenize = null; - return "string"; - }; - } - - function tokenComment(stream, state) { - var maybeEnd = false, ch; - while (ch = stream.next()) { - if (ch == ")" && maybeEnd) { - state.tokenize = null; - break; - } - maybeEnd = (ch == "*"); - } - return "comment"; - } - - // Interface - - return { - startState: function(basecolumn) { - return {tokenize: null}; - }, - - token: function(stream, state) { - if (stream.eatSpace()) return null; - var style = (state.tokenize || tokenBase)(stream, state); - if (style == "comment" || style == "meta") return style; - return style; - }, - - electricChars: "{}" - }; -}); - -CodeMirror.defineMIME("text/x-pascal", "pascal"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/LICENSE b/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/LICENSE deleted file mode 100644 index 96f4115..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2011 by Sabaca under the MIT license. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/index.html deleted file mode 100644 index 5ef55d3..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/index.html +++ /dev/null @@ -1,62 +0,0 @@ - - - - CodeMirror: Perl mode - - - - - - - -

CodeMirror: Perl mode

- -
- - - -

MIME types defined: text/x-perl.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/perl.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/perl.js deleted file mode 100644 index 7fa129e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/perl/perl.js +++ /dev/null @@ -1,816 +0,0 @@ -// CodeMirror2 mode/perl/perl.js (text/x-perl) beta 0.10 (2011-11-08) -// This is a part of CodeMirror from https://github.com/sabaca/CodeMirror_mode_perl (mail@sabaca.com) -CodeMirror.defineMode("perl",function(config,parserConfig){ - // http://perldoc.perl.org - var PERL={ // null - magic touch - // 1 - keyword - // 2 - def - // 3 - atom - // 4 - operator - // 5 - variable-2 (predefined) - // [x,y] - x=1,2,3; y=must be defined if x{...} - // PERL operators - '->' : 4, - '++' : 4, - '--' : 4, - '**' : 4, - // ! ~ \ and unary + and - - '=~' : 4, - '!~' : 4, - '*' : 4, - '/' : 4, - '%' : 4, - 'x' : 4, - '+' : 4, - '-' : 4, - '.' : 4, - '<<' : 4, - '>>' : 4, - // named unary operators - '<' : 4, - '>' : 4, - '<=' : 4, - '>=' : 4, - 'lt' : 4, - 'gt' : 4, - 'le' : 4, - 'ge' : 4, - '==' : 4, - '!=' : 4, - '<=>' : 4, - 'eq' : 4, - 'ne' : 4, - 'cmp' : 4, - '~~' : 4, - '&' : 4, - '|' : 4, - '^' : 4, - '&&' : 4, - '||' : 4, - '//' : 4, - '..' : 4, - '...' : 4, - '?' : 4, - ':' : 4, - '=' : 4, - '+=' : 4, - '-=' : 4, - '*=' : 4, // etc. ??? - ',' : 4, - '=>' : 4, - '::' : 4, - // list operators (rightward) - 'not' : 4, - 'and' : 4, - 'or' : 4, - 'xor' : 4, - // PERL predefined variables (I know, what this is a paranoid idea, but may be needed for people, who learn PERL, and for me as well, ...and may be for you?;) - 'BEGIN' : [5,1], - 'END' : [5,1], - 'PRINT' : [5,1], - 'PRINTF' : [5,1], - 'GETC' : [5,1], - 'READ' : [5,1], - 'READLINE' : [5,1], - 'DESTROY' : [5,1], - 'TIE' : [5,1], - 'TIEHANDLE' : [5,1], - 'UNTIE' : [5,1], - 'STDIN' : 5, - 'STDIN_TOP' : 5, - 'STDOUT' : 5, - 'STDOUT_TOP' : 5, - 'STDERR' : 5, - 'STDERR_TOP' : 5, - '$ARG' : 5, - '$_' : 5, - '@ARG' : 5, - '@_' : 5, - '$LIST_SEPARATOR' : 5, - '$"' : 5, - '$PROCESS_ID' : 5, - '$PID' : 5, - '$$' : 5, - '$REAL_GROUP_ID' : 5, - '$GID' : 5, - '$(' : 5, - '$EFFECTIVE_GROUP_ID' : 5, - '$EGID' : 5, - '$)' : 5, - '$PROGRAM_NAME' : 5, - '$0' : 5, - '$SUBSCRIPT_SEPARATOR' : 5, - '$SUBSEP' : 5, - '$;' : 5, - '$REAL_USER_ID' : 5, - '$UID' : 5, - '$<' : 5, - '$EFFECTIVE_USER_ID' : 5, - '$EUID' : 5, - '$>' : 5, - '$a' : 5, - '$b' : 5, - '$COMPILING' : 5, - '$^C' : 5, - '$DEBUGGING' : 5, - '$^D' : 5, - '${^ENCODING}' : 5, - '$ENV' : 5, - '%ENV' : 5, - '$SYSTEM_FD_MAX' : 5, - '$^F' : 5, - '@F' : 5, - '${^GLOBAL_PHASE}' : 5, - '$^H' : 5, - '%^H' : 5, - '@INC' : 5, - '%INC' : 5, - '$INPLACE_EDIT' : 5, - '$^I' : 5, - '$^M' : 5, - '$OSNAME' : 5, - '$^O' : 5, - '${^OPEN}' : 5, - '$PERLDB' : 5, - '$^P' : 5, - '$SIG' : 5, - '%SIG' : 5, - '$BASETIME' : 5, - '$^T' : 5, - '${^TAINT}' : 5, - '${^UNICODE}' : 5, - '${^UTF8CACHE}' : 5, - '${^UTF8LOCALE}' : 5, - '$PERL_VERSION' : 5, - '$^V' : 5, - '${^WIN32_SLOPPY_STAT}' : 5, - '$EXECUTABLE_NAME' : 5, - '$^X' : 5, - '$1' : 5, // - regexp $1, $2... - '$MATCH' : 5, - '$&' : 5, - '${^MATCH}' : 5, - '$PREMATCH' : 5, - '$`' : 5, - '${^PREMATCH}' : 5, - '$POSTMATCH' : 5, - "$'" : 5, - '${^POSTMATCH}' : 5, - '$LAST_PAREN_MATCH' : 5, - '$+' : 5, - '$LAST_SUBMATCH_RESULT' : 5, - '$^N' : 5, - '@LAST_MATCH_END' : 5, - '@+' : 5, - '%LAST_PAREN_MATCH' : 5, - '%+' : 5, - '@LAST_MATCH_START' : 5, - '@-' : 5, - '%LAST_MATCH_START' : 5, - '%-' : 5, - '$LAST_REGEXP_CODE_RESULT' : 5, - '$^R' : 5, - '${^RE_DEBUG_FLAGS}' : 5, - '${^RE_TRIE_MAXBUF}' : 5, - '$ARGV' : 5, - '@ARGV' : 5, - 'ARGV' : 5, - 'ARGVOUT' : 5, - '$OUTPUT_FIELD_SEPARATOR' : 5, - '$OFS' : 5, - '$,' : 5, - '$INPUT_LINE_NUMBER' : 5, - '$NR' : 5, - '$.' : 5, - '$INPUT_RECORD_SEPARATOR' : 5, - '$RS' : 5, - '$/' : 5, - '$OUTPUT_RECORD_SEPARATOR' : 5, - '$ORS' : 5, - '$\\' : 5, - '$OUTPUT_AUTOFLUSH' : 5, - '$|' : 5, - '$ACCUMULATOR' : 5, - '$^A' : 5, - '$FORMAT_FORMFEED' : 5, - '$^L' : 5, - '$FORMAT_PAGE_NUMBER' : 5, - '$%' : 5, - '$FORMAT_LINES_LEFT' : 5, - '$-' : 5, - '$FORMAT_LINE_BREAK_CHARACTERS' : 5, - '$:' : 5, - '$FORMAT_LINES_PER_PAGE' : 5, - '$=' : 5, - '$FORMAT_TOP_NAME' : 5, - '$^' : 5, - '$FORMAT_NAME' : 5, - '$~' : 5, - '${^CHILD_ERROR_NATIVE}' : 5, - '$EXTENDED_OS_ERROR' : 5, - '$^E' : 5, - '$EXCEPTIONS_BEING_CAUGHT' : 5, - '$^S' : 5, - '$WARNING' : 5, - '$^W' : 5, - '${^WARNING_BITS}' : 5, - '$OS_ERROR' : 5, - '$ERRNO' : 5, - '$!' : 5, - '%OS_ERROR' : 5, - '%ERRNO' : 5, - '%!' : 5, - '$CHILD_ERROR' : 5, - '$?' : 5, - '$EVAL_ERROR' : 5, - '$@' : 5, - '$OFMT' : 5, - '$#' : 5, - '$*' : 5, - '$ARRAY_BASE' : 5, - '$[' : 5, - '$OLD_PERL_VERSION' : 5, - '$]' : 5, - // PERL blocks - 'if' :[1,1], - elsif :[1,1], - 'else' :[1,1], - 'while' :[1,1], - unless :[1,1], - 'for' :[1,1], - foreach :[1,1], - // PERL functions - 'abs' :1, // - absolute value function - accept :1, // - accept an incoming socket connect - alarm :1, // - schedule a SIGALRM - 'atan2' :1, // - arctangent of Y/X in the range -PI to PI - bind :1, // - binds an address to a socket - binmode :1, // - prepare binary files for I/O - bless :1, // - create an object - bootstrap :1, // - 'break' :1, // - break out of a "given" block - caller :1, // - get context of the current subroutine call - chdir :1, // - change your current working directory - chmod :1, // - changes the permissions on a list of files - chomp :1, // - remove a trailing record separator from a string - chop :1, // - remove the last character from a string - chown :1, // - change the owership on a list of files - chr :1, // - get character this number represents - chroot :1, // - make directory new root for path lookups - close :1, // - close file (or pipe or socket) handle - closedir :1, // - close directory handle - connect :1, // - connect to a remote socket - 'continue' :[1,1], // - optional trailing block in a while or foreach - 'cos' :1, // - cosine function - crypt :1, // - one-way passwd-style encryption - dbmclose :1, // - breaks binding on a tied dbm file - dbmopen :1, // - create binding on a tied dbm file - 'default' :1, // - defined :1, // - test whether a value, variable, or function is defined - 'delete' :1, // - deletes a value from a hash - die :1, // - raise an exception or bail out - 'do' :1, // - turn a BLOCK into a TERM - dump :1, // - create an immediate core dump - each :1, // - retrieve the next key/value pair from a hash - endgrent :1, // - be done using group file - endhostent :1, // - be done using hosts file - endnetent :1, // - be done using networks file - endprotoent :1, // - be done using protocols file - endpwent :1, // - be done using passwd file - endservent :1, // - be done using services file - eof :1, // - test a filehandle for its end - 'eval' :1, // - catch exceptions or compile and run code - 'exec' :1, // - abandon this program to run another - exists :1, // - test whether a hash key is present - exit :1, // - terminate this program - 'exp' :1, // - raise I to a power - fcntl :1, // - file control system call - fileno :1, // - return file descriptor from filehandle - flock :1, // - lock an entire file with an advisory lock - fork :1, // - create a new process just like this one - format :1, // - declare a picture format with use by the write() function - formline :1, // - internal function used for formats - getc :1, // - get the next character from the filehandle - getgrent :1, // - get next group record - getgrgid :1, // - get group record given group user ID - getgrnam :1, // - get group record given group name - gethostbyaddr :1, // - get host record given its address - gethostbyname :1, // - get host record given name - gethostent :1, // - get next hosts record - getlogin :1, // - return who logged in at this tty - getnetbyaddr :1, // - get network record given its address - getnetbyname :1, // - get networks record given name - getnetent :1, // - get next networks record - getpeername :1, // - find the other end of a socket connection - getpgrp :1, // - get process group - getppid :1, // - get parent process ID - getpriority :1, // - get current nice value - getprotobyname :1, // - get protocol record given name - getprotobynumber :1, // - get protocol record numeric protocol - getprotoent :1, // - get next protocols record - getpwent :1, // - get next passwd record - getpwnam :1, // - get passwd record given user login name - getpwuid :1, // - get passwd record given user ID - getservbyname :1, // - get services record given its name - getservbyport :1, // - get services record given numeric port - getservent :1, // - get next services record - getsockname :1, // - retrieve the sockaddr for a given socket - getsockopt :1, // - get socket options on a given socket - given :1, // - glob :1, // - expand filenames using wildcards - gmtime :1, // - convert UNIX time into record or string using Greenwich time - 'goto' :1, // - create spaghetti code - grep :1, // - locate elements in a list test true against a given criterion - hex :1, // - convert a string to a hexadecimal number - 'import' :1, // - patch a module's namespace into your own - index :1, // - find a substring within a string - 'int' :1, // - get the integer portion of a number - ioctl :1, // - system-dependent device control system call - 'join' :1, // - join a list into a string using a separator - keys :1, // - retrieve list of indices from a hash - kill :1, // - send a signal to a process or process group - last :1, // - exit a block prematurely - lc :1, // - return lower-case version of a string - lcfirst :1, // - return a string with just the next letter in lower case - length :1, // - return the number of bytes in a string - 'link' :1, // - create a hard link in the filesytem - listen :1, // - register your socket as a server - local : 2, // - create a temporary value for a global variable (dynamic scoping) - localtime :1, // - convert UNIX time into record or string using local time - lock :1, // - get a thread lock on a variable, subroutine, or method - 'log' :1, // - retrieve the natural logarithm for a number - lstat :1, // - stat a symbolic link - m :null, // - match a string with a regular expression pattern - map :1, // - apply a change to a list to get back a new list with the changes - mkdir :1, // - create a directory - msgctl :1, // - SysV IPC message control operations - msgget :1, // - get SysV IPC message queue - msgrcv :1, // - receive a SysV IPC message from a message queue - msgsnd :1, // - send a SysV IPC message to a message queue - my : 2, // - declare and assign a local variable (lexical scoping) - 'new' :1, // - next :1, // - iterate a block prematurely - no :1, // - unimport some module symbols or semantics at compile time - oct :1, // - convert a string to an octal number - open :1, // - open a file, pipe, or descriptor - opendir :1, // - open a directory - ord :1, // - find a character's numeric representation - our : 2, // - declare and assign a package variable (lexical scoping) - pack :1, // - convert a list into a binary representation - 'package' :1, // - declare a separate global namespace - pipe :1, // - open a pair of connected filehandles - pop :1, // - remove the last element from an array and return it - pos :1, // - find or set the offset for the last/next m//g search - print :1, // - output a list to a filehandle - printf :1, // - output a formatted list to a filehandle - prototype :1, // - get the prototype (if any) of a subroutine - push :1, // - append one or more elements to an array - q :null, // - singly quote a string - qq :null, // - doubly quote a string - qr :null, // - Compile pattern - quotemeta :null, // - quote regular expression magic characters - qw :null, // - quote a list of words - qx :null, // - backquote quote a string - rand :1, // - retrieve the next pseudorandom number - read :1, // - fixed-length buffered input from a filehandle - readdir :1, // - get a directory from a directory handle - readline :1, // - fetch a record from a file - readlink :1, // - determine where a symbolic link is pointing - readpipe :1, // - execute a system command and collect standard output - recv :1, // - receive a message over a Socket - redo :1, // - start this loop iteration over again - ref :1, // - find out the type of thing being referenced - rename :1, // - change a filename - require :1, // - load in external functions from a library at runtime - reset :1, // - clear all variables of a given name - 'return' :1, // - get out of a function early - reverse :1, // - flip a string or a list - rewinddir :1, // - reset directory handle - rindex :1, // - right-to-left substring search - rmdir :1, // - remove a directory - s :null, // - replace a pattern with a string - say :1, // - print with newline - scalar :1, // - force a scalar context - seek :1, // - reposition file pointer for random-access I/O - seekdir :1, // - reposition directory pointer - select :1, // - reset default output or do I/O multiplexing - semctl :1, // - SysV semaphore control operations - semget :1, // - get set of SysV semaphores - semop :1, // - SysV semaphore operations - send :1, // - send a message over a socket - setgrent :1, // - prepare group file for use - sethostent :1, // - prepare hosts file for use - setnetent :1, // - prepare networks file for use - setpgrp :1, // - set the process group of a process - setpriority :1, // - set a process's nice value - setprotoent :1, // - prepare protocols file for use - setpwent :1, // - prepare passwd file for use - setservent :1, // - prepare services file for use - setsockopt :1, // - set some socket options - shift :1, // - remove the first element of an array, and return it - shmctl :1, // - SysV shared memory operations - shmget :1, // - get SysV shared memory segment identifier - shmread :1, // - read SysV shared memory - shmwrite :1, // - write SysV shared memory - shutdown :1, // - close down just half of a socket connection - 'sin' :1, // - return the sine of a number - sleep :1, // - block for some number of seconds - socket :1, // - create a socket - socketpair :1, // - create a pair of sockets - 'sort' :1, // - sort a list of values - splice :1, // - add or remove elements anywhere in an array - 'split' :1, // - split up a string using a regexp delimiter - sprintf :1, // - formatted print into a string - 'sqrt' :1, // - square root function - srand :1, // - seed the random number generator - stat :1, // - get a file's status information - state :1, // - declare and assign a state variable (persistent lexical scoping) - study :1, // - optimize input data for repeated searches - 'sub' :1, // - declare a subroutine, possibly anonymously - 'substr' :1, // - get or alter a portion of a stirng - symlink :1, // - create a symbolic link to a file - syscall :1, // - execute an arbitrary system call - sysopen :1, // - open a file, pipe, or descriptor - sysread :1, // - fixed-length unbuffered input from a filehandle - sysseek :1, // - position I/O pointer on handle used with sysread and syswrite - system :1, // - run a separate program - syswrite :1, // - fixed-length unbuffered output to a filehandle - tell :1, // - get current seekpointer on a filehandle - telldir :1, // - get current seekpointer on a directory handle - tie :1, // - bind a variable to an object class - tied :1, // - get a reference to the object underlying a tied variable - time :1, // - return number of seconds since 1970 - times :1, // - return elapsed time for self and child processes - tr :null, // - transliterate a string - truncate :1, // - shorten a file - uc :1, // - return upper-case version of a string - ucfirst :1, // - return a string with just the next letter in upper case - umask :1, // - set file creation mode mask - undef :1, // - remove a variable or function definition - unlink :1, // - remove one link to a file - unpack :1, // - convert binary structure into normal perl variables - unshift :1, // - prepend more elements to the beginning of a list - untie :1, // - break a tie binding to a variable - use :1, // - load in a module at compile time - utime :1, // - set a file's last access and modify times - values :1, // - return a list of the values in a hash - vec :1, // - test or set particular bits in a string - wait :1, // - wait for any child process to die - waitpid :1, // - wait for a particular child process to die - wantarray :1, // - get void vs scalar vs list context of current subroutine call - warn :1, // - print debugging info - when :1, // - write :1, // - print a picture record - y :null}; // - transliterate a string - - var RXstyle="string-2"; - var RXmodifiers=/[goseximacplud]/; // NOTE: "m", "s", "y" and "tr" need to correct real modifiers for each regexp type - - function tokenChain(stream,state,chain,style,tail){ // NOTE: chain.length > 2 is not working now (it's for s[...][...]geos;) - state.chain=null; // 12 3tail - state.style=null; - state.tail=null; - state.tokenize=function(stream,state){ - var e=false,c,i=0; - while(c=stream.next()){ - if(c===chain[i]&&!e){ - if(chain[++i]!==undefined){ - state.chain=chain[i]; - state.style=style; - state.tail=tail} - else if(tail) - stream.eatWhile(tail); - state.tokenize=tokenPerl; - return style} - e=!e&&c=="\\"} - return style}; - return state.tokenize(stream,state)} - - function tokenSOMETHING(stream,state,string){ - state.tokenize=function(stream,state){ - if(stream.string==string) - state.tokenize=tokenPerl; - stream.skipToEnd(); - return "string"}; - return state.tokenize(stream,state)} - - function tokenPerl(stream,state){ - if(stream.eatSpace()) - return null; - if(state.chain) - return tokenChain(stream,state,state.chain,state.style,state.tail); - if(stream.match(/^\-?[\d\.]/,false)) - if(stream.match(/^(\-?(\d*\.\d+(e[+-]?\d+)?|\d+\.\d*)|0x[\da-fA-F]+|0b[01]+|\d+(e[+-]?\d+)?)/)) - return 'number'; - if(stream.match(/^<<(?=\w)/)){ // NOTE: <"],RXstyle,RXmodifiers)} - if(/[\^'"!~\/]/.test(c)){ - stream.eatSuffix(1); - return tokenChain(stream,state,[stream.eat(c)],RXstyle,RXmodifiers)}} - else if(c=="q"){ - c=stream.look(1); - if(c=="("){ - stream.eatSuffix(2); - return tokenChain(stream,state,[")"],"string")} - if(c=="["){ - stream.eatSuffix(2); - return tokenChain(stream,state,["]"],"string")} - if(c=="{"){ - stream.eatSuffix(2); - return tokenChain(stream,state,["}"],"string")} - if(c=="<"){ - stream.eatSuffix(2); - return tokenChain(stream,state,[">"],"string")} - if(/[\^'"!~\/]/.test(c)){ - stream.eatSuffix(1); - return tokenChain(stream,state,[stream.eat(c)],"string")}} - else if(c=="w"){ - c=stream.look(1); - if(c=="("){ - stream.eatSuffix(2); - return tokenChain(stream,state,[")"],"bracket")} - if(c=="["){ - stream.eatSuffix(2); - return tokenChain(stream,state,["]"],"bracket")} - if(c=="{"){ - stream.eatSuffix(2); - return tokenChain(stream,state,["}"],"bracket")} - if(c=="<"){ - stream.eatSuffix(2); - return tokenChain(stream,state,[">"],"bracket")} - if(/[\^'"!~\/]/.test(c)){ - stream.eatSuffix(1); - return tokenChain(stream,state,[stream.eat(c)],"bracket")}} - else if(c=="r"){ - c=stream.look(1); - if(c=="("){ - stream.eatSuffix(2); - return tokenChain(stream,state,[")"],RXstyle,RXmodifiers)} - if(c=="["){ - stream.eatSuffix(2); - return tokenChain(stream,state,["]"],RXstyle,RXmodifiers)} - if(c=="{"){ - stream.eatSuffix(2); - return tokenChain(stream,state,["}"],RXstyle,RXmodifiers)} - if(c=="<"){ - stream.eatSuffix(2); - return tokenChain(stream,state,[">"],RXstyle,RXmodifiers)} - if(/[\^'"!~\/]/.test(c)){ - stream.eatSuffix(1); - return tokenChain(stream,state,[stream.eat(c)],RXstyle,RXmodifiers)}} - else if(/[\^'"!~\/(\[{<]/.test(c)){ - if(c=="("){ - stream.eatSuffix(1); - return tokenChain(stream,state,[")"],"string")} - if(c=="["){ - stream.eatSuffix(1); - return tokenChain(stream,state,["]"],"string")} - if(c=="{"){ - stream.eatSuffix(1); - return tokenChain(stream,state,["}"],"string")} - if(c=="<"){ - stream.eatSuffix(1); - return tokenChain(stream,state,[">"],"string")} - if(/[\^'"!~\/]/.test(c)){ - return tokenChain(stream,state,[stream.eat(c)],"string")}}}} - if(ch=="m"){ - var c=stream.look(-2); - if(!(c&&/\w/.test(c))){ - c=stream.eat(/[(\[{<\^'"!~\/]/); - if(c){ - if(/[\^'"!~\/]/.test(c)){ - return tokenChain(stream,state,[c],RXstyle,RXmodifiers)} - if(c=="("){ - return tokenChain(stream,state,[")"],RXstyle,RXmodifiers)} - if(c=="["){ - return tokenChain(stream,state,["]"],RXstyle,RXmodifiers)} - if(c=="{"){ - return tokenChain(stream,state,["}"],RXstyle,RXmodifiers)} - if(c=="<"){ - return tokenChain(stream,state,[">"],RXstyle,RXmodifiers)}}}} - if(ch=="s"){ - var c=/[\/>\]})\w]/.test(stream.look(-2)); - if(!c){ - c=stream.eat(/[(\[{<\^'"!~\/]/); - if(c){ - if(c=="[") - return tokenChain(stream,state,["]","]"],RXstyle,RXmodifiers); - if(c=="{") - return tokenChain(stream,state,["}","}"],RXstyle,RXmodifiers); - if(c=="<") - return tokenChain(stream,state,[">",">"],RXstyle,RXmodifiers); - if(c=="(") - return tokenChain(stream,state,[")",")"],RXstyle,RXmodifiers); - return tokenChain(stream,state,[c,c],RXstyle,RXmodifiers)}}} - if(ch=="y"){ - var c=/[\/>\]})\w]/.test(stream.look(-2)); - if(!c){ - c=stream.eat(/[(\[{<\^'"!~\/]/); - if(c){ - if(c=="[") - return tokenChain(stream,state,["]","]"],RXstyle,RXmodifiers); - if(c=="{") - return tokenChain(stream,state,["}","}"],RXstyle,RXmodifiers); - if(c=="<") - return tokenChain(stream,state,[">",">"],RXstyle,RXmodifiers); - if(c=="(") - return tokenChain(stream,state,[")",")"],RXstyle,RXmodifiers); - return tokenChain(stream,state,[c,c],RXstyle,RXmodifiers)}}} - if(ch=="t"){ - var c=/[\/>\]})\w]/.test(stream.look(-2)); - if(!c){ - c=stream.eat("r");if(c){ - c=stream.eat(/[(\[{<\^'"!~\/]/); - if(c){ - if(c=="[") - return tokenChain(stream,state,["]","]"],RXstyle,RXmodifiers); - if(c=="{") - return tokenChain(stream,state,["}","}"],RXstyle,RXmodifiers); - if(c=="<") - return tokenChain(stream,state,[">",">"],RXstyle,RXmodifiers); - if(c=="(") - return tokenChain(stream,state,[")",")"],RXstyle,RXmodifiers); - return tokenChain(stream,state,[c,c],RXstyle,RXmodifiers)}}}} - if(ch=="`"){ - return tokenChain(stream,state,[ch],"variable-2")} - if(ch=="/"){ - if(!/~\s*$/.test(stream.prefix())) - return "operator"; - else - return tokenChain(stream,state,[ch],RXstyle,RXmodifiers)} - if(ch=="$"){ - var p=stream.pos; - if(stream.eatWhile(/\d/)||stream.eat("{")&&stream.eatWhile(/\d/)&&stream.eat("}")) - return "variable-2"; - else - stream.pos=p} - if(/[$@%]/.test(ch)){ - var p=stream.pos; - if(stream.eat("^")&&stream.eat(/[A-Z]/)||!/[@$%&]/.test(stream.look(-2))&&stream.eat(/[=|\\\-#?@;:&`~\^!\[\]*'"$+.,\/<>()]/)){ - var c=stream.current(); - if(PERL[c]) - return "variable-2"} - stream.pos=p} - if(/[$@%&]/.test(ch)){ - if(stream.eatWhile(/[\w$\[\]]/)||stream.eat("{")&&stream.eatWhile(/[\w$\[\]]/)&&stream.eat("}")){ - var c=stream.current(); - if(PERL[c]) - return "variable-2"; - else - return "variable"}} - if(ch=="#"){ - if(stream.look(-2)!="$"){ - stream.skipToEnd(); - return "comment"}} - if(/[:+\-\^*$&%@=<>!?|\/~\.]/.test(ch)){ - var p=stream.pos; - stream.eatWhile(/[:+\-\^*$&%@=<>!?|\/~\.]/); - if(PERL[stream.current()]) - return "operator"; - else - stream.pos=p} - if(ch=="_"){ - if(stream.pos==1){ - if(stream.suffix(6)=="_END__"){ - return tokenChain(stream,state,['\0'],"comment")} - else if(stream.suffix(7)=="_DATA__"){ - return tokenChain(stream,state,['\0'],"variable-2")} - else if(stream.suffix(7)=="_C__"){ - return tokenChain(stream,state,['\0'],"string")}}} - if(/\w/.test(ch)){ - var p=stream.pos; - if(stream.look(-2)=="{"&&(stream.look(0)=="}"||stream.eatWhile(/\w/)&&stream.look(0)=="}")) - return "string"; - else - stream.pos=p} - if(/[A-Z]/.test(ch)){ - var l=stream.look(-2); - var p=stream.pos; - stream.eatWhile(/[A-Z_]/); - if(/[\da-z]/.test(stream.look(0))){ - stream.pos=p} - else{ - var c=PERL[stream.current()]; - if(!c) - return "meta"; - if(c[1]) - c=c[0]; - if(l!=":"){ - if(c==1) - return "keyword"; - else if(c==2) - return "def"; - else if(c==3) - return "atom"; - else if(c==4) - return "operator"; - else if(c==5) - return "variable-2"; - else - return "meta"} - else - return "meta"}} - if(/[a-zA-Z_]/.test(ch)){ - var l=stream.look(-2); - stream.eatWhile(/\w/); - var c=PERL[stream.current()]; - if(!c) - return "meta"; - if(c[1]) - c=c[0]; - if(l!=":"){ - if(c==1) - return "keyword"; - else if(c==2) - return "def"; - else if(c==3) - return "atom"; - else if(c==4) - return "operator"; - else if(c==5) - return "variable-2"; - else - return "meta"} - else - return "meta"} - return null} - - return{ - startState:function(){ - return{ - tokenize:tokenPerl, - chain:null, - style:null, - tail:null}}, - token:function(stream,state){ - return (state.tokenize||tokenPerl)(stream,state)}, - electricChars:"{}"}}); - -CodeMirror.defineMIME("text/x-perl", "perl"); - -// it's like "peek", but need for look-ahead or look-behind if index < 0 -CodeMirror.StringStream.prototype.look=function(c){ - return this.string.charAt(this.pos+(c||0))}; - -// return a part of prefix of current stream from current position -CodeMirror.StringStream.prototype.prefix=function(c){ - if(c){ - var x=this.pos-c; - return this.string.substr((x>=0?x:0),c)} - else{ - return this.string.substr(0,this.pos-1)}}; - -// return a part of suffix of current stream from current position -CodeMirror.StringStream.prototype.suffix=function(c){ - var y=this.string.length; - var x=y-this.pos+1; - return this.string.substr(this.pos,(c&&c=(y=this.string.length-1)) - this.pos=y; - else - this.pos=x}; diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/php/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/php/index.html deleted file mode 100644 index 7949044..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/php/index.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - CodeMirror: PHP mode - - - - - - - - - - - -

CodeMirror: PHP mode

- -
- - - -

Simple HTML/PHP mode based on - the C-like mode. Depends on XML, - JavaScript, CSS, and C-like modes.

- -

MIME types defined: application/x-httpd-php (HTML with PHP code), text/x-php (plain, non-wrapped PHP code).

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/php/php.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/php/php.js deleted file mode 100644 index 5406a5c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/php/php.js +++ /dev/null @@ -1,150 +0,0 @@ -(function() { - function keywords(str) { - var obj = {}, words = str.split(" "); - for (var i = 0; i < words.length; ++i) obj[words[i]] = true; - return obj; - } - function heredoc(delim) { - return function(stream, state) { - if (stream.match(delim)) state.tokenize = null; - else stream.skipToEnd(); - return "string"; - } - } - var phpConfig = { - name: "clike", - keywords: keywords("abstract and array as break case catch class clone const continue declare default " + - "do else elseif enddeclare endfor endforeach endif endswitch endwhile extends final " + - "for foreach function global goto if implements interface instanceof namespace " + - "new or private protected public static switch throw trait try use var while xor " + - "die echo empty exit eval include include_once isset list require require_once return " + - "print unset __halt_compiler self static parent"), - blockKeywords: keywords("catch do else elseif for foreach if switch try while"), - atoms: keywords("true false null TRUE FALSE NULL"), - multiLineStrings: true, - hooks: { - "$": function(stream, state) { - stream.eatWhile(/[\w\$_]/); - return "variable-2"; - }, - "<": function(stream, state) { - if (stream.match(/<", false)) stream.next(); - return "comment"; - }, - "/": function(stream, state) { - if (stream.eat("/")) { - while (!stream.eol() && !stream.match("?>", false)) stream.next(); - return "comment"; - } - return false; - } - } - }; - - CodeMirror.defineMode("php", function(config, parserConfig) { - var htmlMode = CodeMirror.getMode(config, {name: "xml", htmlMode: true}); - var jsMode = CodeMirror.getMode(config, "javascript"); - var cssMode = CodeMirror.getMode(config, "css"); - var phpMode = CodeMirror.getMode(config, phpConfig); - - function dispatch(stream, state) { // TODO open PHP inside text/css - var isPHP = state.mode == "php"; - if (stream.sol() && state.pending != '"') state.pending = null; - if (state.curMode == htmlMode) { - if (stream.match(/^<\?\w*/)) { - state.curMode = phpMode; - state.curState = state.php; - state.curClose = "?>"; - state.mode = "php"; - return "meta"; - } - if (state.pending == '"') { - while (!stream.eol() && stream.next() != '"') {} - var style = "string"; - } else if (state.pending && stream.pos < state.pending.end) { - stream.pos = state.pending.end; - var style = state.pending.style; - } else { - var style = htmlMode.token(stream, state.curState); - } - state.pending = null; - var cur = stream.current(), openPHP = cur.search(/<\?/); - if (openPHP != -1) { - if (style == "string" && /\"$/.test(cur) && !/\?>/.test(cur)) state.pending = '"'; - else state.pending = {end: stream.pos, style: style}; - stream.backUp(cur.length - openPHP); - } else if (style == "tag" && stream.current() == ">" && state.curState.context) { - if (/^script$/i.test(state.curState.context.tagName)) { - state.curMode = jsMode; - state.curState = jsMode.startState(htmlMode.indent(state.curState, "")); - state.curClose = /^<\/\s*script\s*>/i; - state.mode = "javascript"; - } - else if (/^style$/i.test(state.curState.context.tagName)) { - state.curMode = cssMode; - state.curState = cssMode.startState(htmlMode.indent(state.curState, "")); - state.curClose = /^<\/\s*style\s*>/i; - state.mode = "css"; - } - } - return style; - } else if ((!isPHP || state.php.tokenize == null) && - stream.match(state.curClose, isPHP)) { - state.curMode = htmlMode; - state.curState = state.html; - state.curClose = null; - state.mode = "html"; - if (isPHP) return "meta"; - else return dispatch(stream, state); - } else { - return state.curMode.token(stream, state.curState); - } - } - - return { - startState: function() { - var html = htmlMode.startState(); - return {html: html, - php: phpMode.startState(), - curMode: parserConfig.startOpen ? phpMode : htmlMode, - curState: parserConfig.startOpen ? phpMode.startState() : html, - curClose: parserConfig.startOpen ? /^\?>/ : null, - mode: parserConfig.startOpen ? "php" : "html", - pending: null} - }, - - copyState: function(state) { - var html = state.html, htmlNew = CodeMirror.copyState(htmlMode, html), - php = state.php, phpNew = CodeMirror.copyState(phpMode, php), cur; - if (state.curState == html) cur = htmlNew; - else if (state.curState == php) cur = phpNew; - else cur = CodeMirror.copyState(state.curMode, state.curState); - return {html: htmlNew, php: phpNew, curMode: state.curMode, curState: cur, - curClose: state.curClose, mode: state.mode, - pending: state.pending}; - }, - - token: dispatch, - - indent: function(state, textAfter) { - if ((state.curMode != phpMode && /^\s*<\//.test(textAfter)) || - (state.curMode == phpMode && /^\?>/.test(textAfter))) - return htmlMode.indent(state.html, textAfter); - return state.curMode.indent(state.curState, textAfter); - }, - - electricChars: "/{}:" - } - }); - CodeMirror.defineMIME("application/x-httpd-php", "php"); - CodeMirror.defineMIME("application/x-httpd-php-open", {name: "php", startOpen: true}); - CodeMirror.defineMIME("text/x-php", phpConfig); -})(); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/plsql/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/plsql/index.html deleted file mode 100644 index be603d9..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/plsql/index.html +++ /dev/null @@ -1,62 +0,0 @@ - - - - CodeMirror: Oracle PL/SQL mode - - - - - - - -

CodeMirror: Oracle PL/SQL mode

- -
- - - -

- Simple mode that handles Oracle PL/SQL language (and Oracle SQL, of course). -

- -

MIME type defined: text/x-plsql - (PLSQL code) - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/plsql/plsql.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/plsql/plsql.js deleted file mode 100644 index a2ac2e8..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/plsql/plsql.js +++ /dev/null @@ -1,217 +0,0 @@ -CodeMirror.defineMode("plsql", function(config, parserConfig) { - var indentUnit = config.indentUnit, - keywords = parserConfig.keywords, - functions = parserConfig.functions, - types = parserConfig.types, - sqlplus = parserConfig.sqlplus, - multiLineStrings = parserConfig.multiLineStrings; - var isOperatorChar = /[+\-*&%=<>!?:\/|]/; - function chain(stream, state, f) { - state.tokenize = f; - return f(stream, state); - } - - var type; - function ret(tp, style) { - type = tp; - return style; - } - - function tokenBase(stream, state) { - var ch = stream.next(); - // start of string? - if (ch == '"' || ch == "'") - return chain(stream, state, tokenString(ch)); - // is it one of the special signs []{}().,;? Seperator? - else if (/[\[\]{}\(\),;\.]/.test(ch)) - return ret(ch); - // start of a number value? - else if (/\d/.test(ch)) { - stream.eatWhile(/[\w\.]/); - return ret("number", "number"); - } - // multi line comment or simple operator? - else if (ch == "/") { - if (stream.eat("*")) { - return chain(stream, state, tokenComment); - } - else { - stream.eatWhile(isOperatorChar); - return ret("operator", "operator"); - } - } - // single line comment or simple operator? - else if (ch == "-") { - if (stream.eat("-")) { - stream.skipToEnd(); - return ret("comment", "comment"); - } - else { - stream.eatWhile(isOperatorChar); - return ret("operator", "operator"); - } - } - // pl/sql variable? - else if (ch == "@" || ch == "$") { - stream.eatWhile(/[\w\d\$_]/); - return ret("word", "variable"); - } - // is it a operator? - else if (isOperatorChar.test(ch)) { - stream.eatWhile(isOperatorChar); - return ret("operator", "operator"); - } - else { - // get the whole word - stream.eatWhile(/[\w\$_]/); - // is it one of the listed keywords? - if (keywords && keywords.propertyIsEnumerable(stream.current().toLowerCase())) return ret("keyword", "keyword"); - // is it one of the listed functions? - if (functions && functions.propertyIsEnumerable(stream.current().toLowerCase())) return ret("keyword", "builtin"); - // is it one of the listed types? - if (types && types.propertyIsEnumerable(stream.current().toLowerCase())) return ret("keyword", "variable-2"); - // is it one of the listed sqlplus keywords? - if (sqlplus && sqlplus.propertyIsEnumerable(stream.current().toLowerCase())) return ret("keyword", "variable-3"); - // default: just a "word" - return ret("word", "plsql-word"); - } - } - - function tokenString(quote) { - return function(stream, state) { - var escaped = false, next, end = false; - while ((next = stream.next()) != null) { - if (next == quote && !escaped) {end = true; break;} - escaped = !escaped && next == "\\"; - } - if (end || !(escaped || multiLineStrings)) - state.tokenize = tokenBase; - return ret("string", "plsql-string"); - }; - } - - function tokenComment(stream, state) { - var maybeEnd = false, ch; - while (ch = stream.next()) { - if (ch == "/" && maybeEnd) { - state.tokenize = tokenBase; - break; - } - maybeEnd = (ch == "*"); - } - return ret("comment", "plsql-comment"); - } - - // Interface - - return { - startState: function(basecolumn) { - return { - tokenize: tokenBase, - startOfLine: true - }; - }, - - token: function(stream, state) { - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - return style; - } - }; -}); - -(function() { - function keywords(str) { - var obj = {}, words = str.split(" "); - for (var i = 0; i < words.length; ++i) obj[words[i]] = true; - return obj; - } - var cKeywords = "abort accept access add all alter and any array arraylen as asc assert assign at attributes audit " + - "authorization avg " + - "base_table begin between binary_integer body boolean by " + - "case cast char char_base check close cluster clusters colauth column comment commit compress connect " + - "connected constant constraint crash create current currval cursor " + - "data_base database date dba deallocate debugoff debugon decimal declare default definition delay delete " + - "desc digits dispose distinct do drop " + - "else elsif enable end entry escape exception exception_init exchange exclusive exists exit external " + - "fast fetch file for force form from function " + - "generic goto grant group " + - "having " + - "identified if immediate in increment index indexes indicator initial initrans insert interface intersect " + - "into is " + - "key " + - "level library like limited local lock log logging long loop " + - "master maxextents maxtrans member minextents minus mislabel mode modify multiset " + - "new next no noaudit nocompress nologging noparallel not nowait number_base " + - "object of off offline on online only open option or order out " + - "package parallel partition pctfree pctincrease pctused pls_integer positive positiven pragma primary prior " + - "private privileges procedure public " + - "raise range raw read rebuild record ref references refresh release rename replace resource restrict return " + - "returning reverse revoke rollback row rowid rowlabel rownum rows run " + - "savepoint schema segment select separate session set share snapshot some space split sql start statement " + - "storage subtype successful synonym " + - "tabauth table tables tablespace task terminate then to trigger truncate type " + - "union unique unlimited unrecoverable unusable update use using " + - "validate value values variable view views " + - "when whenever where while with work"; - - var cFunctions = "abs acos add_months ascii asin atan atan2 average " + - "bfilename " + - "ceil chartorowid chr concat convert cos cosh count " + - "decode deref dual dump dup_val_on_index " + - "empty error exp " + - "false floor found " + - "glb greatest " + - "hextoraw " + - "initcap instr instrb isopen " + - "last_day least lenght lenghtb ln lower lpad ltrim lub " + - "make_ref max min mod months_between " + - "new_time next_day nextval nls_charset_decl_len nls_charset_id nls_charset_name nls_initcap nls_lower " + - "nls_sort nls_upper nlssort no_data_found notfound null nvl " + - "others " + - "power " + - "rawtohex reftohex round rowcount rowidtochar rpad rtrim " + - "sign sin sinh soundex sqlcode sqlerrm sqrt stddev substr substrb sum sysdate " + - "tan tanh to_char to_date to_label to_multi_byte to_number to_single_byte translate true trunc " + - "uid upper user userenv " + - "variance vsize"; - - var cTypes = "bfile blob " + - "character clob " + - "dec " + - "float " + - "int integer " + - "mlslabel " + - "natural naturaln nchar nclob number numeric nvarchar2 " + - "real rowtype " + - "signtype smallint string " + - "varchar varchar2"; - - var cSqlplus = "appinfo arraysize autocommit autoprint autorecovery autotrace " + - "blockterminator break btitle " + - "cmdsep colsep compatibility compute concat copycommit copytypecheck " + - "define describe " + - "echo editfile embedded escape exec execute " + - "feedback flagger flush " + - "heading headsep " + - "instance " + - "linesize lno loboffset logsource long longchunksize " + - "markup " + - "native newpage numformat numwidth " + - "pagesize pause pno " + - "recsep recsepchar release repfooter repheader " + - "serveroutput shiftinout show showmode size spool sqlblanklines sqlcase sqlcode sqlcontinue sqlnumber " + - "sqlpluscompatibility sqlprefix sqlprompt sqlterminator suffix " + - "tab term termout time timing trimout trimspool ttitle " + - "underline " + - "verify version " + - "wrap"; - - CodeMirror.defineMIME("text/x-plsql", { - name: "plsql", - keywords: keywords(cKeywords), - functions: keywords(cFunctions), - types: keywords(cTypes), - sqlplus: keywords(cSqlplus) - }); -}()); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/properties/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/properties/index.html deleted file mode 100755 index 4f0c269..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/properties/index.html +++ /dev/null @@ -1,40 +0,0 @@ - - - - CodeMirror: Properties files mode - - - - - - - -

CodeMirror: Properties files mode

-
- - -

MIME types defined: text/x-properties, - text/x-ini.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/properties/properties.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/properties/properties.js deleted file mode 100755 index d3a13c7..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/properties/properties.js +++ /dev/null @@ -1,63 +0,0 @@ -CodeMirror.defineMode("properties", function() { - return { - token: function(stream, state) { - var sol = stream.sol() || state.afterSection; - var eol = stream.eol(); - - state.afterSection = false; - - if (sol) { - if (state.nextMultiline) { - state.inMultiline = true; - state.nextMultiline = false; - } else { - state.position = "def"; - } - } - - if (eol && ! state.nextMultiline) { - state.inMultiline = false; - state.position = "def"; - } - - if (sol) { - while(stream.eatSpace()); - } - - var ch = stream.next(); - - if (sol && (ch === "#" || ch === "!" || ch === ";")) { - state.position = "comment"; - stream.skipToEnd(); - return "comment"; - } else if (sol && ch === "[") { - state.afterSection = true; - stream.skipTo("]"); stream.eat("]"); - return "header"; - } else if (ch === "=" || ch === ":") { - state.position = "quote"; - return null; - } else if (ch === "\\" && state.position === "quote") { - if (stream.next() !== "u") { // u = Unicode sequence \u1234 - // Multiline value - state.nextMultiline = true; - } - } - - return state.position; - }, - - startState: function() { - return { - position : "def", // Current position, "def", "quote" or "comment" - nextMultiline : false, // Is the next line multiline value - inMultiline : false, // Is the current line a multiline value - afterSection : false // Did we just open a section - }; - } - - }; -}); - -CodeMirror.defineMIME("text/x-properties", "properties"); -CodeMirror.defineMIME("text/x-ini", "properties"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/python/LICENSE.txt b/src/lib/flask_debugtoolbar/static/codemirror/mode/python/LICENSE.txt deleted file mode 100644 index 918866b..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/python/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2010 Timothy Farrell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/python/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/python/index.html deleted file mode 100644 index 47e0e9d..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/python/index.html +++ /dev/null @@ -1,122 +0,0 @@ - - - - CodeMirror: Python mode - - - - - - - -

CodeMirror: Python mode

- -
- -

Configuration Options:

-
    -
  • version - 2/3 - The version of Python to recognize. Default is 2.
  • -
  • singleLineStringErrors - true/false - If you have a single-line string that is not terminated at the end of the line, this will show subsequent lines as errors if true, otherwise it will consider the newline as the end of the string. Default is false.
  • -
- -

MIME types defined: text/x-python.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/python/python.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/python/python.js deleted file mode 100644 index cfe8a77..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/python/python.js +++ /dev/null @@ -1,340 +0,0 @@ -CodeMirror.defineMode("python", function(conf, parserConf) { - var ERRORCLASS = 'error'; - - function wordRegexp(words) { - return new RegExp("^((" + words.join(")|(") + "))\\b"); - } - - var singleOperators = new RegExp("^[\\+\\-\\*/%&|\\^~<>!]"); - var singleDelimiters = new RegExp('^[\\(\\)\\[\\]\\{\\}@,:`=;\\.]'); - var doubleOperators = new RegExp("^((==)|(!=)|(<=)|(>=)|(<>)|(<<)|(>>)|(//)|(\\*\\*))"); - var doubleDelimiters = new RegExp("^((\\+=)|(\\-=)|(\\*=)|(%=)|(/=)|(&=)|(\\|=)|(\\^=))"); - var tripleDelimiters = new RegExp("^((//=)|(>>=)|(<<=)|(\\*\\*=))"); - var identifiers = new RegExp("^[_A-Za-z][_A-Za-z0-9]*"); - - var wordOperators = wordRegexp(['and', 'or', 'not', 'is', 'in']); - var commonkeywords = ['as', 'assert', 'break', 'class', 'continue', - 'def', 'del', 'elif', 'else', 'except', 'finally', - 'for', 'from', 'global', 'if', 'import', - 'lambda', 'pass', 'raise', 'return', - 'try', 'while', 'with', 'yield']; - var commonBuiltins = ['abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'callable', 'chr', - 'classmethod', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', - 'enumerate', 'eval', 'filter', 'float', 'format', 'frozenset', - 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', - 'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', - 'list', 'locals', 'map', 'max', 'memoryview', 'min', 'next', - 'object', 'oct', 'open', 'ord', 'pow', 'property', 'range', - 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', - 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', - 'type', 'vars', 'zip', '__import__', 'NotImplemented', - 'Ellipsis', '__debug__']; - var py2 = {'builtins': ['apply', 'basestring', 'buffer', 'cmp', 'coerce', 'execfile', - 'file', 'intern', 'long', 'raw_input', 'reduce', 'reload', - 'unichr', 'unicode', 'xrange', 'False', 'True', 'None'], - 'keywords': ['exec', 'print']}; - var py3 = {'builtins': ['ascii', 'bytes', 'exec', 'print'], - 'keywords': ['nonlocal', 'False', 'True', 'None']}; - - if (!!parserConf.version && parseInt(parserConf.version, 10) === 3) { - commonkeywords = commonkeywords.concat(py3.keywords); - commonBuiltins = commonBuiltins.concat(py3.builtins); - var stringPrefixes = new RegExp("^(([rb]|(br))?('{3}|\"{3}|['\"]))", "i"); - } else { - commonkeywords = commonkeywords.concat(py2.keywords); - commonBuiltins = commonBuiltins.concat(py2.builtins); - var stringPrefixes = new RegExp("^(([rub]|(ur)|(br))?('{3}|\"{3}|['\"]))", "i"); - } - var keywords = wordRegexp(commonkeywords); - var builtins = wordRegexp(commonBuiltins); - - var indentInfo = null; - - // tokenizers - function tokenBase(stream, state) { - // Handle scope changes - if (stream.sol()) { - var scopeOffset = state.scopes[0].offset; - if (stream.eatSpace()) { - var lineOffset = stream.indentation(); - if (lineOffset > scopeOffset) { - indentInfo = 'indent'; - } else if (lineOffset < scopeOffset) { - indentInfo = 'dedent'; - } - return null; - } else { - if (scopeOffset > 0) { - dedent(stream, state); - } - } - } - if (stream.eatSpace()) { - return null; - } - - var ch = stream.peek(); - - // Handle Comments - if (ch === '#') { - stream.skipToEnd(); - return 'comment'; - } - - // Handle Number Literals - if (stream.match(/^[0-9\.]/, false)) { - var floatLiteral = false; - // Floats - if (stream.match(/^\d*\.\d+(e[\+\-]?\d+)?/i)) { floatLiteral = true; } - if (stream.match(/^\d+\.\d*/)) { floatLiteral = true; } - if (stream.match(/^\.\d+/)) { floatLiteral = true; } - if (floatLiteral) { - // Float literals may be "imaginary" - stream.eat(/J/i); - return 'number'; - } - // Integers - var intLiteral = false; - // Hex - if (stream.match(/^0x[0-9a-f]+/i)) { intLiteral = true; } - // Binary - if (stream.match(/^0b[01]+/i)) { intLiteral = true; } - // Octal - if (stream.match(/^0o[0-7]+/i)) { intLiteral = true; } - // Decimal - if (stream.match(/^[1-9]\d*(e[\+\-]?\d+)?/)) { - // Decimal literals may be "imaginary" - stream.eat(/J/i); - // TODO - Can you have imaginary longs? - intLiteral = true; - } - // Zero by itself with no other piece of number. - if (stream.match(/^0(?![\dx])/i)) { intLiteral = true; } - if (intLiteral) { - // Integer literals may be "long" - stream.eat(/L/i); - return 'number'; - } - } - - // Handle Strings - if (stream.match(stringPrefixes)) { - state.tokenize = tokenStringFactory(stream.current()); - return state.tokenize(stream, state); - } - - // Handle operators and Delimiters - if (stream.match(tripleDelimiters) || stream.match(doubleDelimiters)) { - return null; - } - if (stream.match(doubleOperators) - || stream.match(singleOperators) - || stream.match(wordOperators)) { - return 'operator'; - } - if (stream.match(singleDelimiters)) { - return null; - } - - if (stream.match(keywords)) { - return 'keyword'; - } - - if (stream.match(builtins)) { - return 'builtin'; - } - - if (stream.match(identifiers)) { - return 'variable'; - } - - // Handle non-detected items - stream.next(); - return ERRORCLASS; - } - - function tokenStringFactory(delimiter) { - while ('rub'.indexOf(delimiter.charAt(0).toLowerCase()) >= 0) { - delimiter = delimiter.substr(1); - } - var singleline = delimiter.length == 1; - var OUTCLASS = 'string'; - - return function tokenString(stream, state) { - while (!stream.eol()) { - stream.eatWhile(/[^'"\\]/); - if (stream.eat('\\')) { - stream.next(); - if (singleline && stream.eol()) { - return OUTCLASS; - } - } else if (stream.match(delimiter)) { - state.tokenize = tokenBase; - return OUTCLASS; - } else { - stream.eat(/['"]/); - } - } - if (singleline) { - if (parserConf.singleLineStringErrors) { - return ERRORCLASS; - } else { - state.tokenize = tokenBase; - } - } - return OUTCLASS; - }; - } - - function indent(stream, state, type) { - type = type || 'py'; - var indentUnit = 0; - if (type === 'py') { - if (state.scopes[0].type !== 'py') { - state.scopes[0].offset = stream.indentation(); - return; - } - for (var i = 0; i < state.scopes.length; ++i) { - if (state.scopes[i].type === 'py') { - indentUnit = state.scopes[i].offset + conf.indentUnit; - break; - } - } - } else { - indentUnit = stream.column() + stream.current().length; - } - state.scopes.unshift({ - offset: indentUnit, - type: type - }); - } - - function dedent(stream, state, type) { - type = type || 'py'; - if (state.scopes.length == 1) return; - if (state.scopes[0].type === 'py') { - var _indent = stream.indentation(); - var _indent_index = -1; - for (var i = 0; i < state.scopes.length; ++i) { - if (_indent === state.scopes[i].offset) { - _indent_index = i; - break; - } - } - if (_indent_index === -1) { - return true; - } - while (state.scopes[0].offset !== _indent) { - state.scopes.shift(); - } - return false - } else { - if (type === 'py') { - state.scopes[0].offset = stream.indentation(); - return false; - } else { - if (state.scopes[0].type != type) { - return true; - } - state.scopes.shift(); - return false; - } - } - } - - function tokenLexer(stream, state) { - indentInfo = null; - var style = state.tokenize(stream, state); - var current = stream.current(); - - // Handle '.' connected identifiers - if (current === '.') { - style = state.tokenize(stream, state); - current = stream.current(); - if (style === 'variable' || style === 'builtin') { - return 'variable'; - } else { - return ERRORCLASS; - } - } - - // Handle decorators - if (current === '@') { - style = state.tokenize(stream, state); - current = stream.current(); - if (style === 'variable' - || current === '@staticmethod' - || current === '@classmethod') { - return 'meta'; - } else { - return ERRORCLASS; - } - } - - // Handle scope changes. - if (current === 'pass' || current === 'return') { - state.dedent += 1; - } - if ((current === ':' && !state.lambda && state.scopes[0].type == 'py') - || indentInfo === 'indent') { - indent(stream, state); - } - var delimiter_index = '[({'.indexOf(current); - if (delimiter_index !== -1) { - indent(stream, state, '])}'.slice(delimiter_index, delimiter_index+1)); - } - if (indentInfo === 'dedent') { - if (dedent(stream, state)) { - return ERRORCLASS; - } - } - delimiter_index = '])}'.indexOf(current); - if (delimiter_index !== -1) { - if (dedent(stream, state, current)) { - return ERRORCLASS; - } - } - if (state.dedent > 0 && stream.eol() && state.scopes[0].type == 'py') { - if (state.scopes.length > 1) state.scopes.shift(); - state.dedent -= 1; - } - - return style; - } - - var external = { - startState: function(basecolumn) { - return { - tokenize: tokenBase, - scopes: [{offset:basecolumn || 0, type:'py'}], - lastToken: null, - lambda: false, - dedent: 0 - }; - }, - - token: function(stream, state) { - var style = tokenLexer(stream, state); - - state.lastToken = {style:style, content: stream.current()}; - - if (stream.eol() && stream.lambda) { - state.lambda = false; - } - - return style; - }, - - indent: function(state, textAfter) { - if (state.tokenize != tokenBase) { - return 0; - } - - return state.scopes[0].offset; - } - - }; - return external; -}); - -CodeMirror.defineMIME("text/x-python", "python"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/r/LICENSE b/src/lib/flask_debugtoolbar/static/codemirror/mode/r/LICENSE deleted file mode 100644 index 2510ae1..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/r/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2011, Ubalo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Ubalo, Inc nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL UBALO, INC BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/r/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/r/index.html deleted file mode 100644 index 6977505..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/r/index.html +++ /dev/null @@ -1,73 +0,0 @@ - - - - CodeMirror: R mode - - - - - - - -

CodeMirror: R mode

-
- - -

MIME types defined: text/x-rsrc.

- -

Development of the CodeMirror R mode was kindly sponsored - by Ubalo, who hold - the license.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/r/r.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/r/r.js deleted file mode 100644 index 53647f2..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/r/r.js +++ /dev/null @@ -1,141 +0,0 @@ -CodeMirror.defineMode("r", function(config) { - function wordObj(str) { - var words = str.split(" "), res = {}; - for (var i = 0; i < words.length; ++i) res[words[i]] = true; - return res; - } - var atoms = wordObj("NULL NA Inf NaN NA_integer_ NA_real_ NA_complex_ NA_character_"); - var builtins = wordObj("list quote bquote eval return call parse deparse"); - var keywords = wordObj("if else repeat while function for in next break"); - var blockkeywords = wordObj("if else repeat while function for"); - var opChars = /[+\-*\/^<>=!&|~$:]/; - var curPunc; - - function tokenBase(stream, state) { - curPunc = null; - var ch = stream.next(); - if (ch == "#") { - stream.skipToEnd(); - return "comment"; - } else if (ch == "0" && stream.eat("x")) { - stream.eatWhile(/[\da-f]/i); - return "number"; - } else if (ch == "." && stream.eat(/\d/)) { - stream.match(/\d*(?:e[+\-]?\d+)?/); - return "number"; - } else if (/\d/.test(ch)) { - stream.match(/\d*(?:\.\d+)?(?:e[+\-]\d+)?L?/); - return "number"; - } else if (ch == "'" || ch == '"') { - state.tokenize = tokenString(ch); - return "string"; - } else if (ch == "." && stream.match(/.[.\d]+/)) { - return "keyword"; - } else if (/[\w\.]/.test(ch) && ch != "_") { - stream.eatWhile(/[\w\.]/); - var word = stream.current(); - if (atoms.propertyIsEnumerable(word)) return "atom"; - if (keywords.propertyIsEnumerable(word)) { - if (blockkeywords.propertyIsEnumerable(word)) curPunc = "block"; - return "keyword"; - } - if (builtins.propertyIsEnumerable(word)) return "builtin"; - return "variable"; - } else if (ch == "%") { - if (stream.skipTo("%")) stream.next(); - return "variable-2"; - } else if (ch == "<" && stream.eat("-")) { - return "arrow"; - } else if (ch == "=" && state.ctx.argList) { - return "arg-is"; - } else if (opChars.test(ch)) { - if (ch == "$") return "dollar"; - stream.eatWhile(opChars); - return "operator"; - } else if (/[\(\){}\[\];]/.test(ch)) { - curPunc = ch; - if (ch == ";") return "semi"; - return null; - } else { - return null; - } - } - - function tokenString(quote) { - return function(stream, state) { - if (stream.eat("\\")) { - var ch = stream.next(); - if (ch == "x") stream.match(/^[a-f0-9]{2}/i); - else if ((ch == "u" || ch == "U") && stream.eat("{") && stream.skipTo("}")) stream.next(); - else if (ch == "u") stream.match(/^[a-f0-9]{4}/i); - else if (ch == "U") stream.match(/^[a-f0-9]{8}/i); - else if (/[0-7]/.test(ch)) stream.match(/^[0-7]{1,2}/); - return "string-2"; - } else { - var next; - while ((next = stream.next()) != null) { - if (next == quote) { state.tokenize = tokenBase; break; } - if (next == "\\") { stream.backUp(1); break; } - } - return "string"; - } - }; - } - - function push(state, type, stream) { - state.ctx = {type: type, - indent: state.indent, - align: null, - column: stream.column(), - prev: state.ctx}; - } - function pop(state) { - state.indent = state.ctx.indent; - state.ctx = state.ctx.prev; - } - - return { - startState: function(base) { - return {tokenize: tokenBase, - ctx: {type: "top", - indent: -config.indentUnit, - align: false}, - indent: 0, - afterIdent: false}; - }, - - token: function(stream, state) { - if (stream.sol()) { - if (state.ctx.align == null) state.ctx.align = false; - state.indent = stream.indentation(); - } - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - if (style != "comment" && state.ctx.align == null) state.ctx.align = true; - - var ctype = state.ctx.type; - if ((curPunc == ";" || curPunc == "{" || curPunc == "}") && ctype == "block") pop(state); - if (curPunc == "{") push(state, "}", stream); - else if (curPunc == "(") { - push(state, ")", stream); - if (state.afterIdent) state.ctx.argList = true; - } - else if (curPunc == "[") push(state, "]", stream); - else if (curPunc == "block") push(state, "block", stream); - else if (curPunc == ctype) pop(state); - state.afterIdent = style == "variable" || style == "keyword"; - return style; - }, - - indent: function(state, textAfter) { - if (state.tokenize != tokenBase) return 0; - var firstChar = textAfter && textAfter.charAt(0), ctx = state.ctx, - closing = firstChar == ctx.type; - if (ctx.type == "block") return ctx.indent + (firstChar == "{" ? 0 : config.indentUnit); - else if (ctx.align) return ctx.column + (closing ? 0 : 1); - else return ctx.indent + (closing ? 0 : config.indentUnit); - } - }; -}); - -CodeMirror.defineMIME("text/x-rsrc", "r"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/changes/changes.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/changes/changes.js deleted file mode 100644 index cb45f9e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/changes/changes.js +++ /dev/null @@ -1,19 +0,0 @@ -CodeMirror.defineMode("changes", function(config, modeConfig) { - var headerSeperator = /^-+$/; - var headerLine = /^(Mon|Tue|Wed|Thu|Fri|Sat|Sun) (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ?\d{1,2} \d{2}:\d{2}(:\d{2})? [A-Z]{3,4} \d{4} - /; - var simpleEmail = /^[\w+.-]+@[\w.-]+/; - - return { - token: function(stream) { - if (stream.sol()) { - if (stream.match(headerSeperator)) { return 'tag'; } - if (stream.match(headerLine)) { return 'tag'; } - } - if (stream.match(simpleEmail)) { return 'string'; } - stream.next(); - return null; - } - }; -}); - -CodeMirror.defineMIME("text/x-rpm-changes", "changes"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/changes/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/changes/index.html deleted file mode 100644 index b7ff952..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/changes/index.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - CodeMirror: RPM changes mode - - - - - - - -

CodeMirror: RPM changes mode

- -
- - -

MIME types defined: text/x-rpm-changes.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/index.html deleted file mode 100644 index e3bca15..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/index.html +++ /dev/null @@ -1,99 +0,0 @@ - - - - CodeMirror: RPM spec mode - - - - - - - - -

CodeMirror: RPM spec mode

- -
- - -

MIME types defined: text/x-rpm-spec.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/spec.css b/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/spec.css deleted file mode 100644 index d0a5d43..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/spec.css +++ /dev/null @@ -1,5 +0,0 @@ -.cm-s-default span.cm-preamble {color: #b26818; font-weight: bold;} -.cm-s-default span.cm-macro {color: #b218b2;} -.cm-s-default span.cm-section {color: green; font-weight: bold;} -.cm-s-default span.cm-script {color: red;} -.cm-s-default span.cm-issue {color: yellow;} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/spec.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/spec.js deleted file mode 100644 index 902db6a..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rpm/spec/spec.js +++ /dev/null @@ -1,66 +0,0 @@ -// Quick and dirty spec file highlighting - -CodeMirror.defineMode("spec", function(config, modeConfig) { - var arch = /^(i386|i586|i686|x86_64|ppc64|ppc|ia64|s390x|s390|sparc64|sparcv9|sparc|noarch|alphaev6|alpha|hppa|mipsel)/; - - var preamble = /^(Name|Version|Release|License|Summary|Url|Group|Source|BuildArch|BuildRequires|BuildRoot|AutoReqProv|Provides|Requires(\(\w+\))?|Obsoletes|Conflicts|Recommends|Source\d*|Patch\d*|ExclusiveArch|NoSource|Supplements):/; - var section = /^%(debug_package|package|description|prep|build|install|files|clean|changelog|preun|postun|pre|post|triggerin|triggerun|pretrans|posttrans|verifyscript|check|triggerpostun|triggerprein|trigger)/; - var control_flow_complex = /^%(ifnarch|ifarch|if)/; // rpm control flow macros - var control_flow_simple = /^%(else|endif)/; // rpm control flow macros - var operators = /^(\!|\?|\<\=|\<|\>\=|\>|\=\=|\&\&|\|\|)/; // operators in control flow macros - - return { - startState: function () { - return { - controlFlow: false, - macroParameters: false, - section: false - }; - }, - token: function (stream, state) { - var ch = stream.peek(); - if (ch == "#") { stream.skipToEnd(); return "comment"; } - - if (stream.sol()) { - if (stream.match(preamble)) { return "preamble"; } - if (stream.match(section)) { return "section"; } - } - - if (stream.match(/^\$\w+/)) { return "def"; } // Variables like '$RPM_BUILD_ROOT' - if (stream.match(/^\$\{\w+\}/)) { return "def"; } // Variables like '${RPM_BUILD_ROOT}' - - if (stream.match(control_flow_simple)) { return "keyword"; } - if (stream.match(control_flow_complex)) { - state.controlFlow = true; - return "keyword"; - } - if (state.controlFlow) { - if (stream.match(operators)) { return "operator"; } - if (stream.match(/^(\d+)/)) { return "number"; } - if (stream.eol()) { state.controlFlow = false; } - } - - if (stream.match(arch)) { return "number"; } - - // Macros like '%make_install' or '%attr(0775,root,root)' - if (stream.match(/^%[\w]+/)) { - if (stream.match(/^\(/)) { state.macroParameters = true; } - return "macro"; - } - if (state.macroParameters) { - if (stream.match(/^\d+/)) { return "number";} - if (stream.match(/^\)/)) { - state.macroParameters = false; - return "macro"; - } - } - if (stream.match(/^%\{\??[\w \-]+\}/)) { return "macro"; } // Macros like '%{defined fedora}' - - //TODO: Include bash script sub-parser (CodeMirror supports that) - stream.next(); - return null; - } - }; -}); - -CodeMirror.defineMIME("text/x-rpm-spec", "spec"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rst/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/rst/index.html deleted file mode 100644 index fd75a28..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rst/index.html +++ /dev/null @@ -1,525 +0,0 @@ - - - - CodeMirror: reStructuredText mode - - - - - - - -

CodeMirror: reStructuredText mode

- -
- - -

The reStructuredText mode supports one configuration parameter:

-
-
verbatim (string)
-
A name or MIME type of a mode that will be used for highlighting - verbatim blocks. By default, reStructuredText mode uses uniform color - for whole block of verbatim text if no mode is given.
-
-

If python mode is available, - it will be used for highlighting blocks containing Python/IPython terminal - sessions (blocks starting with >>> (for Python) or - In [num]: (for IPython). - -

MIME types defined: text/x-rst.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rst/rst.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/rst/rst.js deleted file mode 100644 index 411bac5..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rst/rst.js +++ /dev/null @@ -1,326 +0,0 @@ -CodeMirror.defineMode('rst', function(config, options) { - function setState(state, fn, ctx) { - state.fn = fn; - setCtx(state, ctx); - } - - function setCtx(state, ctx) { - state.ctx = ctx || {}; - } - - function setNormal(state, ch) { - if (ch && (typeof ch !== 'string')) { - var str = ch.current(); - ch = str[str.length-1]; - } - - setState(state, normal, {back: ch}); - } - - function hasMode(mode) { - if (mode) { - var modes = CodeMirror.listModes(); - - for (var i in modes) { - if (modes[i] == mode) { - return true; - } - } - } - - return false; - } - - function getMode(mode) { - if (hasMode(mode)) { - return CodeMirror.getMode(config, mode); - } else { - return null; - } - } - - var verbatimMode = getMode(options.verbatim); - var pythonMode = getMode('python'); - - var reSection = /^[!"#$%&'()*+,-./:;<=>?@[\\\]^_`{|}~]/; - var reDirective = /^\s*\w([-:.\w]*\w)?::(\s|$)/; - var reHyperlink = /^\s*_[\w-]+:(\s|$)/; - var reFootnote = /^\s*\[(\d+|#)\](\s|$)/; - var reCitation = /^\s*\[[A-Za-z][\w-]*\](\s|$)/; - var reFootnoteRef = /^\[(\d+|#)\]_/; - var reCitationRef = /^\[[A-Za-z][\w-]*\]_/; - var reDirectiveMarker = /^\.\.(\s|$)/; - var reVerbatimMarker = /^::\s*$/; - var rePreInline = /^[-\s"([{/:.,;!?\\_]/; - var reEnumeratedList = /^\s*((\d+|[A-Za-z#])[.)]|\((\d+|[A-Z-a-z#])\))\s/; - var reBulletedList = /^\s*[-\+\*]\s/; - var reExamples = /^\s+(>>>|In \[\d+\]:)\s/; - - function normal(stream, state) { - var ch, sol, i; - - if (stream.eat(/\\/)) { - ch = stream.next(); - setNormal(state, ch); - return null; - } - - sol = stream.sol(); - - if (sol && (ch = stream.eat(reSection))) { - for (i = 0; stream.eat(ch); i++); - - if (i >= 3 && stream.match(/^\s*$/)) { - setNormal(state, null); - return 'header'; - } else { - stream.backUp(i + 1); - } - } - - if (sol && stream.match(reDirectiveMarker)) { - if (!stream.eol()) { - setState(state, directive); - } - return 'meta'; - } - - if (stream.match(reVerbatimMarker)) { - if (!verbatimMode) { - setState(state, verbatim); - } else { - var mode = verbatimMode; - - setState(state, verbatim, { - mode: mode, - local: mode.startState() - }); - } - return 'meta'; - } - - if (sol && stream.match(reExamples, false)) { - if (!pythonMode) { - setState(state, verbatim); - return 'meta'; - } else { - var mode = pythonMode; - - setState(state, verbatim, { - mode: mode, - local: mode.startState() - }); - - return null; - } - } - - function testBackward(re) { - return sol || !state.ctx.back || re.test(state.ctx.back); - } - - function testForward(re) { - return stream.eol() || stream.match(re, false); - } - - function testInline(re) { - return stream.match(re) && testBackward(/\W/) && testForward(/\W/); - } - - if (testInline(reFootnoteRef)) { - setNormal(state, stream); - return 'footnote'; - } - - if (testInline(reCitationRef)) { - setNormal(state, stream); - return 'citation'; - } - - ch = stream.next(); - - if (testBackward(rePreInline)) { - if ((ch === ':' || ch === '|') && stream.eat(/\S/)) { - var token; - - if (ch === ':') { - token = 'builtin'; - } else { - token = 'atom'; - } - - setState(state, inline, { - ch: ch, - wide: false, - prev: null, - token: token - }); - - return token; - } - - if (ch === '*' || ch === '`') { - var orig = ch, - wide = false; - - ch = stream.next(); - - if (ch == orig) { - wide = true; - ch = stream.next(); - } - - if (ch && !/\s/.test(ch)) { - var token; - - if (orig === '*') { - token = wide ? 'strong' : 'em'; - } else { - token = wide ? 'string' : 'string-2'; - } - - setState(state, inline, { - ch: orig, // inline() has to know what to search for - wide: wide, // are we looking for `ch` or `chch` - prev: null, // terminator must not be preceeded with whitespace - token: token // I don't want to recompute this all the time - }); - - return token; - } - } - } - - setNormal(state, ch); - return null; - } - - function inline(stream, state) { - var ch = stream.next(), - token = state.ctx.token; - - function finish(ch) { - state.ctx.prev = ch; - return token; - } - - if (ch != state.ctx.ch) { - return finish(ch); - } - - if (/\s/.test(state.ctx.prev)) { - return finish(ch); - } - - if (state.ctx.wide) { - ch = stream.next(); - - if (ch != state.ctx.ch) { - return finish(ch); - } - } - - if (!stream.eol() && !rePostInline.test(stream.peek())) { - if (state.ctx.wide) { - stream.backUp(1); - } - - return finish(ch); - } - - setState(state, normal); - setNormal(state, ch); - - return token; - } - - function directive(stream, state) { - var token = null; - - if (stream.match(reDirective)) { - token = 'attribute'; - } else if (stream.match(reHyperlink)) { - token = 'link'; - } else if (stream.match(reFootnote)) { - token = 'quote'; - } else if (stream.match(reCitation)) { - token = 'quote'; - } else { - stream.eatSpace(); - - if (stream.eol()) { - setNormal(state, stream); - return null; - } else { - stream.skipToEnd(); - setState(state, comment); - return 'comment'; - } - } - - // FIXME this is unreachable - setState(state, body, {start: true}); - return token; - } - - function body(stream, state) { - var token = 'body'; - - if (!state.ctx.start || stream.sol()) { - return block(stream, state, token); - } - - stream.skipToEnd(); - setCtx(state); - - return token; - } - - function comment(stream, state) { - return block(stream, state, 'comment'); - } - - function verbatim(stream, state) { - if (!verbatimMode) { - return block(stream, state, 'meta'); - } else { - if (stream.sol()) { - if (!stream.eatSpace()) { - setNormal(state, stream); - } - - return null; - } - - return verbatimMode.token(stream, state.ctx.local); - } - } - - function block(stream, state, token) { - if (stream.eol() || stream.eatSpace()) { - stream.skipToEnd(); - return token; - } else { - setNormal(state, stream); - return null; - } - } - - return { - startState: function() { - return {fn: normal, ctx: {}}; - }, - - copyState: function(state) { - return {fn: state.fn, ctx: state.ctx}; - }, - - token: function(stream, state) { - var token = state.fn(stream, state); - return token; - } - }; -}); - -CodeMirror.defineMIME("text/x-rst", "rst"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/LICENSE b/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/LICENSE deleted file mode 100644 index ac09fc4..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2011, Ubalo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Ubalo, Inc. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL UBALO, INC BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/index.html deleted file mode 100644 index 6d33db1..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/index.html +++ /dev/null @@ -1,171 +0,0 @@ - - - - CodeMirror: Ruby mode - - - - - - - -

CodeMirror: Ruby mode

-
- - -

MIME types defined: text/x-ruby.

- -

Development of the CodeMirror Ruby mode was kindly sponsored - by Ubalo, who hold - the license.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/ruby.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/ruby.js deleted file mode 100644 index b647efd..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/ruby/ruby.js +++ /dev/null @@ -1,200 +0,0 @@ -CodeMirror.defineMode("ruby", function(config, parserConfig) { - function wordObj(words) { - var o = {}; - for (var i = 0, e = words.length; i < e; ++i) o[words[i]] = true; - return o; - } - var keywords = wordObj([ - "alias", "and", "BEGIN", "begin", "break", "case", "class", "def", "defined?", "do", "else", - "elsif", "END", "end", "ensure", "false", "for", "if", "in", "module", "next", "not", "or", - "redo", "rescue", "retry", "return", "self", "super", "then", "true", "undef", "unless", - "until", "when", "while", "yield", "nil", "raise", "throw", "catch", "fail", "loop", "callcc", - "caller", "lambda", "proc", "public", "protected", "private", "require", "load", - "require_relative", "extend", "autoload" - ]); - var indentWords = wordObj(["def", "class", "case", "for", "while", "do", "module", "then", - "catch", "loop", "proc", "begin"]); - var dedentWords = wordObj(["end", "until"]); - var matching = {"[": "]", "{": "}", "(": ")"}; - var curPunc; - - function chain(newtok, stream, state) { - state.tokenize.push(newtok); - return newtok(stream, state); - } - - function tokenBase(stream, state) { - curPunc = null; - if (stream.sol() && stream.match("=begin") && stream.eol()) { - state.tokenize.push(readBlockComment); - return "comment"; - } - if (stream.eatSpace()) return null; - var ch = stream.next(); - if (ch == "`" || ch == "'" || ch == '"' || - (ch == "/" && !stream.eol() && stream.peek() != " ")) { - return chain(readQuoted(ch, "string", ch == '"' || ch == "`"), stream, state); - } else if (ch == "%") { - var style, embed = false; - if (stream.eat("s")) style = "atom"; - else if (stream.eat(/[WQ]/)) { style = "string"; embed = true; } - else if (stream.eat(/[wxqr]/)) style = "string"; - var delim = stream.eat(/[^\w\s]/); - if (!delim) return "operator"; - if (matching.propertyIsEnumerable(delim)) delim = matching[delim]; - return chain(readQuoted(delim, style, embed, true), stream, state); - } else if (ch == "#") { - stream.skipToEnd(); - return "comment"; - } else if (ch == "<" && stream.eat("<")) { - stream.eat("-"); - stream.eat(/[\'\"\`]/); - var match = stream.match(/^\w+/); - stream.eat(/[\'\"\`]/); - if (match) return chain(readHereDoc(match[0]), stream, state); - return null; - } else if (ch == "0") { - if (stream.eat("x")) stream.eatWhile(/[\da-fA-F]/); - else if (stream.eat("b")) stream.eatWhile(/[01]/); - else stream.eatWhile(/[0-7]/); - return "number"; - } else if (/\d/.test(ch)) { - stream.match(/^[\d_]*(?:\.[\d_]+)?(?:[eE][+\-]?[\d_]+)?/); - return "number"; - } else if (ch == "?") { - while (stream.match(/^\\[CM]-/)) {} - if (stream.eat("\\")) stream.eatWhile(/\w/); - else stream.next(); - return "string"; - } else if (ch == ":") { - if (stream.eat("'")) return chain(readQuoted("'", "atom", false), stream, state); - if (stream.eat('"')) return chain(readQuoted('"', "atom", true), stream, state); - stream.eatWhile(/[\w\?]/); - return "atom"; - } else if (ch == "@") { - stream.eat("@"); - stream.eatWhile(/[\w\?]/); - return "variable-2"; - } else if (ch == "$") { - stream.next(); - stream.eatWhile(/[\w\?]/); - return "variable-3"; - } else if (/\w/.test(ch)) { - stream.eatWhile(/[\w\?]/); - if (stream.eat(":")) return "atom"; - return "ident"; - } else if (ch == "|" && (state.varList || state.lastTok == "{" || state.lastTok == "do")) { - curPunc = "|"; - return null; - } else if (/[\(\)\[\]{}\\;]/.test(ch)) { - curPunc = ch; - return null; - } else if (ch == "-" && stream.eat(">")) { - return "arrow"; - } else if (/[=+\-\/*:\.^%<>~|]/.test(ch)) { - stream.eatWhile(/[=+\-\/*:\.^%<>~|]/); - return "operator"; - } else { - return null; - } - } - - function tokenBaseUntilBrace() { - var depth = 1; - return function(stream, state) { - if (stream.peek() == "}") { - depth--; - if (depth == 0) { - state.tokenize.pop(); - return state.tokenize[state.tokenize.length-1](stream, state); - } - } else if (stream.peek() == "{") { - depth++; - } - return tokenBase(stream, state); - }; - } - function readQuoted(quote, style, embed, unescaped) { - return function(stream, state) { - var escaped = false, ch; - while ((ch = stream.next()) != null) { - if (ch == quote && (unescaped || !escaped)) { - state.tokenize.pop(); - break; - } - if (embed && ch == "#" && !escaped && stream.eat("{")) { - state.tokenize.push(tokenBaseUntilBrace(arguments.callee)); - break; - } - escaped = !escaped && ch == "\\"; - } - return style; - }; - } - function readHereDoc(phrase) { - return function(stream, state) { - if (stream.match(phrase)) state.tokenize.pop(); - else stream.skipToEnd(); - return "string"; - }; - } - function readBlockComment(stream, state) { - if (stream.sol() && stream.match("=end") && stream.eol()) - state.tokenize.pop(); - stream.skipToEnd(); - return "comment"; - } - - return { - startState: function() { - return {tokenize: [tokenBase], - indented: 0, - context: {type: "top", indented: -config.indentUnit}, - continuedLine: false, - lastTok: null, - varList: false}; - }, - - token: function(stream, state) { - if (stream.sol()) state.indented = stream.indentation(); - var style = state.tokenize[state.tokenize.length-1](stream, state), kwtype; - if (style == "ident") { - var word = stream.current(); - style = keywords.propertyIsEnumerable(stream.current()) ? "keyword" - : /^[A-Z]/.test(word) ? "tag" - : (state.lastTok == "def" || state.lastTok == "class" || state.varList) ? "def" - : "variable"; - if (indentWords.propertyIsEnumerable(word)) kwtype = "indent"; - else if (dedentWords.propertyIsEnumerable(word)) kwtype = "dedent"; - else if ((word == "if" || word == "unless") && stream.column() == stream.indentation()) - kwtype = "indent"; - } - if (curPunc || (style && style != "comment")) state.lastTok = word || curPunc || style; - if (curPunc == "|") state.varList = !state.varList; - - if (kwtype == "indent" || /[\(\[\{]/.test(curPunc)) - state.context = {prev: state.context, type: curPunc || style, indented: state.indented}; - else if ((kwtype == "dedent" || /[\)\]\}]/.test(curPunc)) && state.context.prev) - state.context = state.context.prev; - - if (stream.eol()) - state.continuedLine = (curPunc == "\\" || style == "operator"); - return style; - }, - - indent: function(state, textAfter) { - if (state.tokenize[state.tokenize.length-1] != tokenBase) return 0; - var firstChar = textAfter && textAfter.charAt(0); - var ct = state.context; - var closing = ct.type == matching[firstChar] || - ct.type == "keyword" && /^(?:end|until|else|elsif|when|rescue)\b/.test(textAfter); - return ct.indented + (closing ? 0 : config.indentUnit) + - (state.continuedLine ? config.indentUnit : 0); - }, - electricChars: "}de" // enD and rescuE - - }; -}); - -CodeMirror.defineMIME("text/x-ruby", "ruby"); - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rust/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/rust/index.html deleted file mode 100644 index 3b811e3..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rust/index.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - CodeMirror: Rust mode - - - - - - - -

CodeMirror: Rust mode

- -
- - - -

MIME types defined: text/x-rustsrc.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/rust/rust.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/rust/rust.js deleted file mode 100644 index 2a5caac..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/rust/rust.js +++ /dev/null @@ -1,432 +0,0 @@ -CodeMirror.defineMode("rust", function() { - var indentUnit = 4, altIndentUnit = 2; - var valKeywords = { - "if": "if-style", "while": "if-style", "else": "else-style", - "do": "else-style", "ret": "else-style", "fail": "else-style", - "break": "atom", "cont": "atom", "const": "let", "resource": "fn", - "let": "let", "fn": "fn", "for": "for", "alt": "alt", "iface": "iface", - "impl": "impl", "type": "type", "enum": "enum", "mod": "mod", - "as": "op", "true": "atom", "false": "atom", "assert": "op", "check": "op", - "claim": "op", "native": "ignore", "unsafe": "ignore", "import": "else-style", - "export": "else-style", "copy": "op", "log": "op", "log_err": "op", - "use": "op", "bind": "op", "self": "atom" - }; - var typeKeywords = function() { - var keywords = {"fn": "fn", "block": "fn", "obj": "obj"}; - var atoms = "bool uint int i8 i16 i32 i64 u8 u16 u32 u64 float f32 f64 str char".split(" "); - for (var i = 0, e = atoms.length; i < e; ++i) keywords[atoms[i]] = "atom"; - return keywords; - }(); - var operatorChar = /[+\-*&%=<>!?|\.@]/; - - // Tokenizer - - // Used as scratch variable to communicate multiple values without - // consing up tons of objects. - var tcat, content; - function r(tc, style) { - tcat = tc; - return style; - } - - function tokenBase(stream, state) { - var ch = stream.next(); - if (ch == '"') { - state.tokenize = tokenString; - return state.tokenize(stream, state); - } - if (ch == "'") { - tcat = "atom"; - if (stream.eat("\\")) { - if (stream.skipTo("'")) { stream.next(); return "string"; } - else { return "error"; } - } else { - stream.next(); - return stream.eat("'") ? "string" : "error"; - } - } - if (ch == "/") { - if (stream.eat("/")) { stream.skipToEnd(); return "comment"; } - if (stream.eat("*")) { - state.tokenize = tokenComment(1); - return state.tokenize(stream, state); - } - } - if (ch == "#") { - if (stream.eat("[")) { tcat = "open-attr"; return null; } - stream.eatWhile(/\w/); - return r("macro", "meta"); - } - if (ch == ":" && stream.match(":<")) { - return r("op", null); - } - if (ch.match(/\d/) || (ch == "." && stream.eat(/\d/))) { - var flp = false; - if (!stream.match(/^x[\da-f]+/i) && !stream.match(/^b[01]+/)) { - stream.eatWhile(/\d/); - if (stream.eat(".")) { flp = true; stream.eatWhile(/\d/); } - if (stream.match(/^e[+\-]?\d+/i)) { flp = true; } - } - if (flp) stream.match(/^f(?:32|64)/); - else stream.match(/^[ui](?:8|16|32|64)/); - return r("atom", "number"); - } - if (ch.match(/[()\[\]{}:;,]/)) return r(ch, null); - if (ch == "-" && stream.eat(">")) return r("->", null); - if (ch.match(operatorChar)) { - stream.eatWhile(operatorChar); - return r("op", null); - } - stream.eatWhile(/\w/); - content = stream.current(); - if (stream.match(/^::\w/)) { - stream.backUp(1); - return r("prefix", "variable-2"); - } - if (state.keywords.propertyIsEnumerable(content)) - return r(state.keywords[content], content.match(/true|false/) ? "atom" : "keyword"); - return r("name", "variable"); - } - - function tokenString(stream, state) { - var ch, escaped = false; - while (ch = stream.next()) { - if (ch == '"' && !escaped) { - state.tokenize = tokenBase; - return r("atom", "string"); - } - escaped = !escaped && ch == "\\"; - } - // Hack to not confuse the parser when a string is split in - // pieces. - return r("op", "string"); - } - - function tokenComment(depth) { - return function(stream, state) { - var lastCh = null, ch; - while (ch = stream.next()) { - if (ch == "/" && lastCh == "*") { - if (depth == 1) { - state.tokenize = tokenBase; - break; - } else { - state.tokenize = tokenComment(depth - 1); - return state.tokenize(stream, state); - } - } - if (ch == "*" && lastCh == "/") { - state.tokenize = tokenComment(depth + 1); - return state.tokenize(stream, state); - } - lastCh = ch; - } - return "comment"; - }; - } - - // Parser - - var cx = {state: null, stream: null, marked: null, cc: null}; - function pass() { - for (var i = arguments.length - 1; i >= 0; i--) cx.cc.push(arguments[i]); - } - function cont() { - pass.apply(null, arguments); - return true; - } - - function pushlex(type, info) { - var result = function() { - var state = cx.state; - state.lexical = {indented: state.indented, column: cx.stream.column(), - type: type, prev: state.lexical, info: info}; - }; - result.lex = true; - return result; - } - function poplex() { - var state = cx.state; - if (state.lexical.prev) { - if (state.lexical.type == ")") - state.indented = state.lexical.indented; - state.lexical = state.lexical.prev; - } - } - function typecx() { cx.state.keywords = typeKeywords; } - function valcx() { cx.state.keywords = valKeywords; } - poplex.lex = typecx.lex = valcx.lex = true; - - function commasep(comb, end) { - function more(type) { - if (type == ",") return cont(comb, more); - if (type == end) return cont(); - return cont(more); - } - return function(type) { - if (type == end) return cont(); - return pass(comb, more); - }; - } - - function stat_of(comb, tag) { - return cont(pushlex("stat", tag), comb, poplex, block); - } - function block(type) { - if (type == "}") return cont(); - if (type == "let") return stat_of(letdef1, "let"); - if (type == "fn") return stat_of(fndef); - if (type == "type") return cont(pushlex("stat"), tydef, endstatement, poplex, block); - if (type == "enum") return stat_of(enumdef); - if (type == "mod") return stat_of(mod); - if (type == "iface") return stat_of(iface); - if (type == "impl") return stat_of(impl); - if (type == "open-attr") return cont(pushlex("]"), commasep(expression, "]"), poplex); - if (type == "ignore" || type.match(/[\]\);,]/)) return cont(block); - return pass(pushlex("stat"), expression, poplex, endstatement, block); - } - function endstatement(type) { - if (type == ";") return cont(); - return pass(); - } - function expression(type) { - if (type == "atom" || type == "name") return cont(maybeop); - if (type == "{") return cont(pushlex("}"), exprbrace, poplex); - if (type.match(/[\[\(]/)) return matchBrackets(type, expression); - if (type.match(/[\]\)\};,]/)) return pass(); - if (type == "if-style") return cont(expression, expression); - if (type == "else-style" || type == "op") return cont(expression); - if (type == "for") return cont(pattern, maybetype, inop, expression, expression); - if (type == "alt") return cont(expression, altbody); - if (type == "fn") return cont(fndef); - if (type == "macro") return cont(macro); - return cont(); - } - function maybeop(type) { - if (content == ".") return cont(maybeprop); - if (content == "::<"){return cont(typarams, maybeop);} - if (type == "op" || content == ":") return cont(expression); - if (type == "(" || type == "[") return matchBrackets(type, expression); - return pass(); - } - function maybeprop(type) { - if (content.match(/^\w+$/)) {cx.marked = "variable"; return cont(maybeop);} - return pass(expression); - } - function exprbrace(type) { - if (type == "op") { - if (content == "|") return cont(blockvars, poplex, pushlex("}", "block"), block); - if (content == "||") return cont(poplex, pushlex("}", "block"), block); - } - if (content == "mutable" || (content.match(/^\w+$/) && cx.stream.peek() == ":" - && !cx.stream.match("::", false))) - return pass(record_of(expression)); - return pass(block); - } - function record_of(comb) { - function ro(type) { - if (content == "mutable" || content == "with") {cx.marked = "keyword"; return cont(ro);} - if (content.match(/^\w*$/)) {cx.marked = "variable"; return cont(ro);} - if (type == ":") return cont(comb, ro); - if (type == "}") return cont(); - return cont(ro); - } - return ro; - } - function blockvars(type) { - if (type == "name") {cx.marked = "def"; return cont(blockvars);} - if (type == "op" && content == "|") return cont(); - return cont(blockvars); - } - - function letdef1(type) { - if (type.match(/[\]\)\};]/)) return cont(); - if (content == "=") return cont(expression, letdef2); - if (type == ",") return cont(letdef1); - return pass(pattern, maybetype, letdef1); - } - function letdef2(type) { - if (type.match(/[\]\)\};,]/)) return pass(letdef1); - else return pass(expression, letdef2); - } - function maybetype(type) { - if (type == ":") return cont(typecx, rtype, valcx); - return pass(); - } - function inop(type) { - if (type == "name" && content == "in") {cx.marked = "keyword"; return cont();} - return pass(); - } - function fndef(type) { - if (content == "@" || content == "~") {cx.marked = "keyword"; return cont(fndef);} - if (type == "name") {cx.marked = "def"; return cont(fndef);} - if (content == "<") return cont(typarams, fndef); - if (type == "{") return pass(expression); - if (type == "(") return cont(pushlex(")"), commasep(argdef, ")"), poplex, fndef); - if (type == "->") return cont(typecx, rtype, valcx, fndef); - if (type == ";") return cont(); - return cont(fndef); - } - function tydef(type) { - if (type == "name") {cx.marked = "def"; return cont(tydef);} - if (content == "<") return cont(typarams, tydef); - if (content == "=") return cont(typecx, rtype, valcx); - return cont(tydef); - } - function enumdef(type) { - if (type == "name") {cx.marked = "def"; return cont(enumdef);} - if (content == "<") return cont(typarams, enumdef); - if (content == "=") return cont(typecx, rtype, valcx, endstatement); - if (type == "{") return cont(pushlex("}"), typecx, enumblock, valcx, poplex); - return cont(enumdef); - } - function enumblock(type) { - if (type == "}") return cont(); - if (type == "(") return cont(pushlex(")"), commasep(rtype, ")"), poplex, enumblock); - if (content.match(/^\w+$/)) cx.marked = "def"; - return cont(enumblock); - } - function mod(type) { - if (type == "name") {cx.marked = "def"; return cont(mod);} - if (type == "{") return cont(pushlex("}"), block, poplex); - return pass(); - } - function iface(type) { - if (type == "name") {cx.marked = "def"; return cont(iface);} - if (content == "<") return cont(typarams, iface); - if (type == "{") return cont(pushlex("}"), block, poplex); - return pass(); - } - function impl(type) { - if (content == "<") return cont(typarams, impl); - if (content == "of" || content == "for") {cx.marked = "keyword"; return cont(rtype, impl);} - if (type == "name") {cx.marked = "def"; return cont(impl);} - if (type == "{") return cont(pushlex("}"), block, poplex); - return pass(); - } - function typarams(type) { - if (content == ">") return cont(); - if (content == ",") return cont(typarams); - if (content == ":") return cont(rtype, typarams); - return pass(rtype, typarams); - } - function argdef(type) { - if (type == "name") {cx.marked = "def"; return cont(argdef);} - if (type == ":") return cont(typecx, rtype, valcx); - return pass(); - } - function rtype(type) { - if (type == "name") {cx.marked = "variable-3"; return cont(rtypemaybeparam); } - if (content == "mutable") {cx.marked = "keyword"; return cont(rtype);} - if (type == "atom") return cont(rtypemaybeparam); - if (type == "op" || type == "obj") return cont(rtype); - if (type == "fn") return cont(fntype); - if (type == "{") return cont(pushlex("{"), record_of(rtype), poplex); - return matchBrackets(type, rtype); - } - function rtypemaybeparam(type) { - if (content == "<") return cont(typarams); - return pass(); - } - function fntype(type) { - if (type == "(") return cont(pushlex("("), commasep(rtype, ")"), poplex, fntype); - if (type == "->") return cont(rtype); - return pass(); - } - function pattern(type) { - if (type == "name") {cx.marked = "def"; return cont(patternmaybeop);} - if (type == "atom") return cont(patternmaybeop); - if (type == "op") return cont(pattern); - if (type.match(/[\]\)\};,]/)) return pass(); - return matchBrackets(type, pattern); - } - function patternmaybeop(type) { - if (type == "op" && content == ".") return cont(); - if (content == "to") {cx.marked = "keyword"; return cont(pattern);} - else return pass(); - } - function altbody(type) { - if (type == "{") return cont(pushlex("}", "alt"), altblock1, poplex); - return pass(); - } - function altblock1(type) { - if (type == "}") return cont(); - if (type == "|") return cont(altblock1); - if (content == "when") {cx.marked = "keyword"; return cont(expression, altblock2);} - if (type.match(/[\]\);,]/)) return cont(altblock1); - return pass(pattern, altblock2); - } - function altblock2(type) { - if (type == "{") return cont(pushlex("}", "alt"), block, poplex, altblock1); - else return pass(altblock1); - } - - function macro(type) { - if (type.match(/[\[\(\{]/)) return matchBrackets(type, expression); - return pass(); - } - function matchBrackets(type, comb) { - if (type == "[") return cont(pushlex("]"), commasep(comb, "]"), poplex); - if (type == "(") return cont(pushlex(")"), commasep(comb, ")"), poplex); - if (type == "{") return cont(pushlex("}"), commasep(comb, "}"), poplex); - return cont(); - } - - function parse(state, stream, style) { - var cc = state.cc; - // Communicate our context to the combinators. - // (Less wasteful than consing up a hundred closures on every call.) - cx.state = state; cx.stream = stream; cx.marked = null, cx.cc = cc; - - while (true) { - var combinator = cc.length ? cc.pop() : block; - if (combinator(tcat)) { - while(cc.length && cc[cc.length - 1].lex) - cc.pop()(); - return cx.marked || style; - } - } - } - - return { - startState: function() { - return { - tokenize: tokenBase, - cc: [], - lexical: {indented: -indentUnit, column: 0, type: "top", align: false}, - keywords: valKeywords, - indented: 0 - }; - }, - - token: function(stream, state) { - if (stream.sol()) { - if (!state.lexical.hasOwnProperty("align")) - state.lexical.align = false; - state.indented = stream.indentation(); - } - if (stream.eatSpace()) return null; - tcat = content = null; - var style = state.tokenize(stream, state); - if (style == "comment") return style; - if (!state.lexical.hasOwnProperty("align")) - state.lexical.align = true; - if (tcat == "prefix") return style; - if (!content) content = stream.current(); - return parse(state, stream, style); - }, - - indent: function(state, textAfter) { - if (state.tokenize != tokenBase) return 0; - var firstChar = textAfter && textAfter.charAt(0), lexical = state.lexical, - type = lexical.type, closing = firstChar == type; - if (type == "stat") return lexical.indented + indentUnit; - if (lexical.align) return lexical.column + (closing ? 0 : 1); - return lexical.indented + (closing ? 0 : (lexical.info == "alt" ? altIndentUnit : indentUnit)); - }, - - electricChars: "{}" - }; -}); - -CodeMirror.defineMIME("text/x-rustsrc", "rust"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/scheme/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/scheme/index.html deleted file mode 100644 index 2a6105f..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/scheme/index.html +++ /dev/null @@ -1,64 +0,0 @@ - - - - CodeMirror: Scheme mode - - - - - - - -

CodeMirror: Scheme mode

-
- - -

MIME types defined: text/x-scheme.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/scheme/scheme.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/scheme/scheme.js deleted file mode 100644 index caf78db..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/scheme/scheme.js +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Author: Koh Zi Han, based on implementation by Koh Zi Chun - */ -CodeMirror.defineMode("scheme", function (config, mode) { - var BUILTIN = "builtin", COMMENT = "comment", STRING = "string", - ATOM = "atom", NUMBER = "number", BRACKET = "bracket", KEYWORD="keyword"; - var INDENT_WORD_SKIP = 2, KEYWORDS_SKIP = 1; - - function makeKeywords(str) { - var obj = {}, words = str.split(" "); - for (var i = 0; i < words.length; ++i) obj[words[i]] = true; - return obj; - } - - var keywords = makeKeywords("λ case-lambda call/cc class define-class exit-handler field import inherit init-field interface let*-values let-values let/ec mixin opt-lambda override protect provide public rename require require-for-syntax syntax syntax-case syntax-error unit/sig unless when with-syntax and begin call-with-current-continuation call-with-input-file call-with-output-file case cond define define-syntax delay do dynamic-wind else for-each if lambda let let* let-syntax letrec letrec-syntax map or syntax-rules abs acos angle append apply asin assoc assq assv atan boolean? caar cadr call-with-input-file call-with-output-file call-with-values car cdddar cddddr cdr ceiling char->integer char-alphabetic? char-ci<=? char-ci=? char-ci>? char-downcase char-lower-case? char-numeric? char-ready? char-upcase char-upper-case? char-whitespace? char<=? char=? char>? char? close-input-port close-output-port complex? cons cos current-input-port current-output-port denominator display eof-object? eq? equal? eqv? eval even? exact->inexact exact? exp expt #f floor force gcd imag-part inexact->exact inexact? input-port? integer->char integer? interaction-environment lcm length list list->string list->vector list-ref list-tail list? load log magnitude make-polar make-rectangular make-string make-vector max member memq memv min modulo negative? newline not null-environment null? number->string number? numerator odd? open-input-file open-output-file output-port? pair? peek-char port? positive? procedure? quasiquote quote quotient rational? rationalize read read-char real-part real? remainder reverse round scheme-report-environment set! set-car! set-cdr! sin sqrt string string->list string->number string->symbol string-append string-ci<=? string-ci=? string-ci>? string-copy string-fill! string-length string-ref string-set! string<=? string=? string>? string? substring symbol->string symbol? #t tan transcript-off transcript-on truncate values vector vector->list vector-fill! vector-length vector-ref vector-set! with-input-from-file with-output-to-file write write-char zero?"); - var indentKeys = makeKeywords("define let letrec let* lambda"); - - - function stateStack(indent, type, prev) { // represents a state stack object - this.indent = indent; - this.type = type; - this.prev = prev; - } - - function pushStack(state, indent, type) { - state.indentStack = new stateStack(indent, type, state.indentStack); - } - - function popStack(state) { - state.indentStack = state.indentStack.prev; - } - - /** - * Scheme numbers are complicated unfortunately. - * Checks if we're looking at a number, which might be possibly a fraction. - * Also checks that it is not part of a longer identifier. Returns true/false accordingly. - */ - function isNumber(ch, stream){ - if(/[0-9]/.exec(ch) != null){ - stream.eatWhile(/[0-9]/); - stream.eat(/\//); - stream.eatWhile(/[0-9]/); - if (stream.eol() || !(/[a-zA-Z\-\_\/]/.exec(stream.peek()))) return true; - stream.backUp(stream.current().length - 1); // undo all the eating - } - return false; - } - - return { - startState: function () { - return { - indentStack: null, - indentation: 0, - mode: false, - sExprComment: false - }; - }, - - token: function (stream, state) { - if (state.indentStack == null && stream.sol()) { - // update indentation, but only if indentStack is empty - state.indentation = stream.indentation(); - } - - // skip spaces - if (stream.eatSpace()) { - return null; - } - var returnType = null; - - switch(state.mode){ - case "string": // multi-line string parsing mode - var next, escaped = false; - while ((next = stream.next()) != null) { - if (next == "\"" && !escaped) { - - state.mode = false; - break; - } - escaped = !escaped && next == "\\"; - } - returnType = STRING; // continue on in scheme-string mode - break; - case "comment": // comment parsing mode - var next, maybeEnd = false; - while ((next = stream.next()) != null) { - if (next == "#" && maybeEnd) { - - state.mode = false; - break; - } - maybeEnd = (next == "|"); - } - returnType = COMMENT; - break; - case "s-expr-comment": // s-expr commenting mode - state.mode = false; - if(stream.peek() == "(" || stream.peek() == "["){ - // actually start scheme s-expr commenting mode - state.sExprComment = 0; - }else{ - // if not we just comment the entire of the next token - stream.eatWhile(/[^/s]/); // eat non spaces - returnType = COMMENT; - break; - } - default: // default parsing mode - var ch = stream.next(); - - if (ch == "\"") { - state.mode = "string"; - returnType = STRING; - - } else if (ch == "'") { - returnType = ATOM; - } else if (ch == '#') { - if (stream.eat("|")) { // Multi-line comment - state.mode = "comment"; // toggle to comment mode - returnType = COMMENT; - } else if (stream.eat(/[tf]/)) { // #t/#f (atom) - returnType = ATOM; - } else if (stream.eat(';')) { // S-Expr comment - state.mode = "s-expr-comment"; - returnType = COMMENT; - } - - } else if (ch == ";") { // comment - stream.skipToEnd(); // rest of the line is a comment - returnType = COMMENT; - } else if (ch == "-"){ - - if(!isNaN(parseInt(stream.peek()))){ - stream.eatWhile(/[\/0-9]/); - returnType = NUMBER; - }else{ - returnType = null; - } - } else if (isNumber(ch,stream)){ - returnType = NUMBER; - } else if (ch == "(" || ch == "[") { - var keyWord = ''; var indentTemp = stream.column(); - /** - Either - (indent-word .. - (non-indent-word .. - (;something else, bracket, etc. - */ - - while ((letter = stream.eat(/[^\s\(\[\;\)\]]/)) != null) { - keyWord += letter; - } - - if (keyWord.length > 0 && indentKeys.propertyIsEnumerable(keyWord)) { // indent-word - - pushStack(state, indentTemp + INDENT_WORD_SKIP, ch); - } else { // non-indent word - // we continue eating the spaces - stream.eatSpace(); - if (stream.eol() || stream.peek() == ";") { - // nothing significant after - // we restart indentation 1 space after - pushStack(state, indentTemp + 1, ch); - } else { - pushStack(state, indentTemp + stream.current().length, ch); // else we match - } - } - stream.backUp(stream.current().length - 1); // undo all the eating - - if(typeof state.sExprComment == "number") state.sExprComment++; - - returnType = BRACKET; - } else if (ch == ")" || ch == "]") { - returnType = BRACKET; - if (state.indentStack != null && state.indentStack.type == (ch == ")" ? "(" : "[")) { - popStack(state); - - if(typeof state.sExprComment == "number"){ - if(--state.sExprComment == 0){ - returnType = COMMENT; // final closing bracket - state.sExprComment = false; // turn off s-expr commenting mode - } - } - } - } else { - stream.eatWhile(/[\w\$_\-]/); - - if (keywords && keywords.propertyIsEnumerable(stream.current())) { - returnType = BUILTIN; - }else returnType = null; - } - } - return (typeof state.sExprComment == "number") ? COMMENT : returnType; - }, - - indent: function (state, textAfter) { - if (state.indentStack == null) return state.indentation; - return state.indentStack.indent; - } - }; -}); - -CodeMirror.defineMIME("text/x-scheme", "scheme"); \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/smalltalk/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/smalltalk/index.html deleted file mode 100644 index 8a85c39..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/smalltalk/index.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - CodeMirror: Smalltalk mode - - - - - - - -

CodeMirror: Smalltalk mode

- -
- - - -

Simple Smalltalk mode.

- -

MIME types defined: text/x-stsrc.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/smalltalk/smalltalk.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/smalltalk/smalltalk.js deleted file mode 100644 index e589332..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/smalltalk/smalltalk.js +++ /dev/null @@ -1,139 +0,0 @@ -CodeMirror.defineMode('smalltalk', function(config, modeConfig) { - - var specialChars = /[+\-/\\*~<>=@%|&?!.:;^]/; - var keywords = /true|false|nil|self|super|thisContext/; - - var Context = function(tokenizer, parent) { - this.next = tokenizer; - this.parent = parent; - }; - - var Token = function(name, context, eos) { - this.name = name; - this.context = context; - this.eos = eos; - }; - - var State = function() { - this.context = new Context(next, null); - this.expectVariable = true; - this.indentation = 0; - this.userIndentationDelta = 0; - }; - - State.prototype.userIndent = function(indentation) { - this.userIndentationDelta = indentation > 0 ? (indentation / config.indentUnit - this.indentation) : 0; - }; - - var next = function(stream, context, state) { - var token = new Token(null, context, false); - var aChar = stream.next(); - - if (aChar === '"') { - token = nextComment(stream, new Context(nextComment, context)); - - } else if (aChar === '\'') { - token = nextString(stream, new Context(nextString, context)); - - } else if (aChar === '#') { - stream.eatWhile(/[^ .]/); - token.name = 'string-2'; - - } else if (aChar === '$') { - stream.eatWhile(/[^ ]/); - token.name = 'string-2'; - - } else if (aChar === '|' && state.expectVariable) { - token.context = new Context(nextTemporaries, context); - - } else if (/[\[\]{}()]/.test(aChar)) { - token.name = 'bracket'; - token.eos = /[\[{(]/.test(aChar); - - if (aChar === '[') { - state.indentation++; - } else if (aChar === ']') { - state.indentation = Math.max(0, state.indentation - 1); - } - - } else if (specialChars.test(aChar)) { - stream.eatWhile(specialChars); - token.name = 'operator'; - token.eos = aChar !== ';'; // ; cascaded message expression - - } else if (/\d/.test(aChar)) { - stream.eatWhile(/[\w\d]/); - token.name = 'number' - - } else if (/[\w_]/.test(aChar)) { - stream.eatWhile(/[\w\d_]/); - token.name = state.expectVariable ? (keywords.test(stream.current()) ? 'keyword' : 'variable') : null; - - } else { - token.eos = state.expectVariable; - } - - return token; - }; - - var nextComment = function(stream, context) { - stream.eatWhile(/[^"]/); - return new Token('comment', stream.eat('"') ? context.parent : context, true); - }; - - var nextString = function(stream, context) { - stream.eatWhile(/[^']/); - return new Token('string', stream.eat('\'') ? context.parent : context, false); - }; - - var nextTemporaries = function(stream, context, state) { - var token = new Token(null, context, false); - var aChar = stream.next(); - - if (aChar === '|') { - token.context = context.parent; - token.eos = true; - - } else { - stream.eatWhile(/[^|]/); - token.name = 'variable'; - } - - return token; - } - - return { - startState: function() { - return new State; - }, - - token: function(stream, state) { - state.userIndent(stream.indentation()); - - if (stream.eatSpace()) { - return null; - } - - var token = state.context.next(stream, state.context, state); - state.context = token.context; - state.expectVariable = token.eos; - - state.lastToken = token; - return token.name; - }, - - blankLine: function(state) { - state.userIndent(0); - }, - - indent: function(state, textAfter) { - var i = state.context.next === next && textAfter && textAfter.charAt(0) === ']' ? -1 : state.userIndentationDelta; - return (state.indentation + i) * config.indentUnit; - }, - - electricChars: ']' - }; - -}); - -CodeMirror.defineMIME('text/x-stsrc', {name: 'smalltalk'}); \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/smarty/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/smarty/index.html deleted file mode 100644 index ad4dccf..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/smarty/index.html +++ /dev/null @@ -1,82 +0,0 @@ - - - - CodeMirror: Smarty mode - - - - - - - -

CodeMirror: Smarty mode

- -
- - - -
- -
- - - -

A plain text/Smarty mode which allows for custom delimiter tags (defaults to { and }).

- -

MIME types defined: text/x-smarty

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/smarty/smarty.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/smarty/smarty.js deleted file mode 100644 index 9da7da6..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/smarty/smarty.js +++ /dev/null @@ -1,148 +0,0 @@ -CodeMirror.defineMode("smarty", function(config, parserConfig) { - var keyFuncs = ["debug", "extends", "function", "include", "literal"]; - var last; - var regs = { - operatorChars: /[+\-*&%=<>!?]/, - validIdentifier: /[a-zA-Z0-9\_]/, - stringChar: /[\'\"]/ - } - var leftDelim = (typeof config.mode.leftDelimiter != 'undefined') ? config.mode.leftDelimiter : "{"; - var rightDelim = (typeof config.mode.rightDelimiter != 'undefined') ? config.mode.rightDelimiter : "}"; - function ret(style, lst) { last = lst; return style; } - - - function tokenizer(stream, state) { - function chain(parser) { - state.tokenize = parser; - return parser(stream, state); - } - - if (stream.match(leftDelim, true)) { - if (stream.eat("*")) { - return chain(inBlock("comment", "*" + rightDelim)); - } - else { - state.tokenize = inSmarty; - return "tag"; - } - } - else { - // I'd like to do an eatWhile() here, but I can't get it to eat only up to the rightDelim string/char - stream.next(); - return null; - } - } - - function inSmarty(stream, state) { - if (stream.match(rightDelim, true)) { - state.tokenize = tokenizer; - return ret("tag", null); - } - - var ch = stream.next(); - if (ch == "$") { - stream.eatWhile(regs.validIdentifier); - return ret("variable-2", "variable"); - } - else if (ch == ".") { - return ret("operator", "property"); - } - else if (regs.stringChar.test(ch)) { - state.tokenize = inAttribute(ch); - return ret("string", "string"); - } - else if (regs.operatorChars.test(ch)) { - stream.eatWhile(regs.operatorChars); - return ret("operator", "operator"); - } - else if (ch == "[" || ch == "]") { - return ret("bracket", "bracket"); - } - else if (/\d/.test(ch)) { - stream.eatWhile(/\d/); - return ret("number", "number"); - } - else { - if (state.last == "variable") { - if (ch == "@") { - stream.eatWhile(regs.validIdentifier); - return ret("property", "property"); - } - else if (ch == "|") { - stream.eatWhile(regs.validIdentifier); - return ret("qualifier", "modifier"); - } - } - else if (state.last == "whitespace") { - stream.eatWhile(regs.validIdentifier); - return ret("attribute", "modifier"); - } - else if (state.last == "property") { - stream.eatWhile(regs.validIdentifier); - return ret("property", null); - } - else if (/\s/.test(ch)) { - last = "whitespace"; - return null; - } - - var str = ""; - if (ch != "/") { - str += ch; - } - var c = ""; - while ((c = stream.eat(regs.validIdentifier))) { - str += c; - } - var i, j; - for (i=0, j=keyFuncs.length; i - - - CodeMirror: SPARQL mode - - - - - - - -

CodeMirror: SPARQL mode

-
- - -

MIME types defined: application/x-sparql-query.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/sparql/sparql.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/sparql/sparql.js deleted file mode 100644 index ceb5294..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/sparql/sparql.js +++ /dev/null @@ -1,143 +0,0 @@ -CodeMirror.defineMode("sparql", function(config) { - var indentUnit = config.indentUnit; - var curPunc; - - function wordRegexp(words) { - return new RegExp("^(?:" + words.join("|") + ")$", "i"); - } - var ops = wordRegexp(["str", "lang", "langmatches", "datatype", "bound", "sameterm", "isiri", "isuri", - "isblank", "isliteral", "union", "a"]); - var keywords = wordRegexp(["base", "prefix", "select", "distinct", "reduced", "construct", "describe", - "ask", "from", "named", "where", "order", "limit", "offset", "filter", "optional", - "graph", "by", "asc", "desc"]); - var operatorChars = /[*+\-<>=&|]/; - - function tokenBase(stream, state) { - var ch = stream.next(); - curPunc = null; - if (ch == "$" || ch == "?") { - stream.match(/^[\w\d]*/); - return "variable-2"; - } - else if (ch == "<" && !stream.match(/^[\s\u00a0=]/, false)) { - stream.match(/^[^\s\u00a0>]*>?/); - return "atom"; - } - else if (ch == "\"" || ch == "'") { - state.tokenize = tokenLiteral(ch); - return state.tokenize(stream, state); - } - else if (/[{}\(\),\.;\[\]]/.test(ch)) { - curPunc = ch; - return null; - } - else if (ch == "#") { - stream.skipToEnd(); - return "comment"; - } - else if (operatorChars.test(ch)) { - stream.eatWhile(operatorChars); - return null; - } - else if (ch == ":") { - stream.eatWhile(/[\w\d\._\-]/); - return "atom"; - } - else { - stream.eatWhile(/[_\w\d]/); - if (stream.eat(":")) { - stream.eatWhile(/[\w\d_\-]/); - return "atom"; - } - var word = stream.current(), type; - if (ops.test(word)) - return null; - else if (keywords.test(word)) - return "keyword"; - else - return "variable"; - } - } - - function tokenLiteral(quote) { - return function(stream, state) { - var escaped = false, ch; - while ((ch = stream.next()) != null) { - if (ch == quote && !escaped) { - state.tokenize = tokenBase; - break; - } - escaped = !escaped && ch == "\\"; - } - return "string"; - }; - } - - function pushContext(state, type, col) { - state.context = {prev: state.context, indent: state.indent, col: col, type: type}; - } - function popContext(state) { - state.indent = state.context.indent; - state.context = state.context.prev; - } - - return { - startState: function(base) { - return {tokenize: tokenBase, - context: null, - indent: 0, - col: 0}; - }, - - token: function(stream, state) { - if (stream.sol()) { - if (state.context && state.context.align == null) state.context.align = false; - state.indent = stream.indentation(); - } - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - - if (style != "comment" && state.context && state.context.align == null && state.context.type != "pattern") { - state.context.align = true; - } - - if (curPunc == "(") pushContext(state, ")", stream.column()); - else if (curPunc == "[") pushContext(state, "]", stream.column()); - else if (curPunc == "{") pushContext(state, "}", stream.column()); - else if (/[\]\}\)]/.test(curPunc)) { - while (state.context && state.context.type == "pattern") popContext(state); - if (state.context && curPunc == state.context.type) popContext(state); - } - else if (curPunc == "." && state.context && state.context.type == "pattern") popContext(state); - else if (/atom|string|variable/.test(style) && state.context) { - if (/[\}\]]/.test(state.context.type)) - pushContext(state, "pattern", stream.column()); - else if (state.context.type == "pattern" && !state.context.align) { - state.context.align = true; - state.context.col = stream.column(); - } - } - - return style; - }, - - indent: function(state, textAfter) { - var firstChar = textAfter && textAfter.charAt(0); - var context = state.context; - if (/[\]\}]/.test(firstChar)) - while (context && context.type == "pattern") context = context.prev; - - var closing = context && firstChar == context.type; - if (!context) - return 0; - else if (context.type == "pattern") - return context.col; - else if (context.align) - return context.col + (closing ? 0 : 1); - else - return context.indent + (closing ? 0 : indentUnit); - } - }; -}); - -CodeMirror.defineMIME("application/x-sparql-query", "sparql"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/index.html deleted file mode 100644 index e49289e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/index.html +++ /dev/null @@ -1,95 +0,0 @@ - - - - CodeMirror: sTeX mode - - - - - - - -

CodeMirror: sTeX mode

-
- - -

MIME types defined: text/x-stex.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/stex.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/stex.js deleted file mode 100644 index b89e619..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/stex.js +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Author: Constantin Jucovschi (c.jucovschi@jacobs-university.de) - * Licence: MIT - */ - -CodeMirror.defineMode("stex", function(cmCfg, modeCfg) -{ - function pushCommand(state, command) { - state.cmdState.push(command); - } - - function peekCommand(state) { - if (state.cmdState.length>0) - return state.cmdState[state.cmdState.length-1]; - else - return null; - } - - function popCommand(state) { - if (state.cmdState.length>0) { - var plug = state.cmdState.pop(); - plug.closeBracket(); - } - } - - function applyMostPowerful(state) { - var context = state.cmdState; - for (var i = context.length - 1; i >= 0; i--) { - var plug = context[i]; - if (plug.name=="DEFAULT") - continue; - return plug.styleIdentifier(); - } - return null; - } - - function addPluginPattern(pluginName, cmdStyle, brackets, styles) { - return function () { - this.name=pluginName; - this.bracketNo = 0; - this.style=cmdStyle; - this.styles = styles; - this.brackets = brackets; - - this.styleIdentifier = function(content) { - if (this.bracketNo<=this.styles.length) - return this.styles[this.bracketNo-1]; - else - return null; - }; - this.openBracket = function(content) { - this.bracketNo++; - return "bracket"; - }; - this.closeBracket = function(content) { - }; - } - } - - var plugins = new Array(); - - plugins["importmodule"] = addPluginPattern("importmodule", "tag", "{[", ["string", "builtin"]); - plugins["documentclass"] = addPluginPattern("documentclass", "tag", "{[", ["", "atom"]); - plugins["usepackage"] = addPluginPattern("documentclass", "tag", "[", ["atom"]); - plugins["begin"] = addPluginPattern("documentclass", "tag", "[", ["atom"]); - plugins["end"] = addPluginPattern("documentclass", "tag", "[", ["atom"]); - - plugins["DEFAULT"] = function () { - this.name="DEFAULT"; - this.style="tag"; - - this.styleIdentifier = function(content) { - }; - this.openBracket = function(content) { - }; - this.closeBracket = function(content) { - }; - }; - - function setState(state, f) { - state.f = f; - } - - function normal(source, state) { - if (source.match(/^\\[a-zA-Z@]+/)) { - var cmdName = source.current(); - cmdName = cmdName.substr(1, cmdName.length-1); - var plug = plugins[cmdName]; - if (typeof(plug) == 'undefined') { - plug = plugins["DEFAULT"]; - } - plug = new plug(); - pushCommand(state, plug); - setState(state, beginParams); - return plug.style; - } - - // escape characters - if (source.match(/^\\[$&%#{}_]/)) { - return "tag"; - } - - // white space control characters - if (source.match(/^\\[,;!\/]/)) { - return "tag"; - } - - var ch = source.next(); - if (ch == "%") { - // special case: % at end of its own line; stay in same state - if (!source.eol()) { - setState(state, inCComment); - } - return "comment"; - } - else if (ch=='}' || ch==']') { - plug = peekCommand(state); - if (plug) { - plug.closeBracket(ch); - setState(state, beginParams); - } else - return "error"; - return "bracket"; - } else if (ch=='{' || ch=='[') { - plug = plugins["DEFAULT"]; - plug = new plug(); - pushCommand(state, plug); - return "bracket"; - } - else if (/\d/.test(ch)) { - source.eatWhile(/[\w.%]/); - return "atom"; - } - else { - source.eatWhile(/[\w-_]/); - return applyMostPowerful(state); - } - } - - function inCComment(source, state) { - source.skipToEnd(); - setState(state, normal); - return "comment"; - } - - function beginParams(source, state) { - var ch = source.peek(); - if (ch == '{' || ch == '[') { - var lastPlug = peekCommand(state); - var style = lastPlug.openBracket(ch); - source.eat(ch); - setState(state, normal); - return "bracket"; - } - if (/[ \t\r]/.test(ch)) { - source.eat(ch); - return null; - } - setState(state, normal); - lastPlug = peekCommand(state); - if (lastPlug) { - popCommand(state); - } - return normal(source, state); - } - - return { - startState: function() { return { f:normal, cmdState:[] }; }, - copyState: function(s) { return { f: s.f, cmdState: s.cmdState.slice(0, s.cmdState.length) }; }, - - token: function(stream, state) { - var t = state.f(stream, state); - var w = stream.current(); - return t; - } - }; -}); - - -CodeMirror.defineMIME("text/x-stex", "stex"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/test.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/test.html deleted file mode 100644 index b53a6a2..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/stex/test.html +++ /dev/null @@ -1,251 +0,0 @@ - - - - CodeMirror: sTeX mode - - - - - - - - -

Tests for the sTeX Mode

-

Basics

- - -

Tags

- - -

Comments

- - -

Errors

- - -

Character Escapes

- - -

Spacing control

- - - -

Summary

- - - - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/index.html deleted file mode 100644 index 2f7da0e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/index.html +++ /dev/null @@ -1,140 +0,0 @@ - - - - CodeMirror: TiddlyWiki mode - - - - - - - - -

CodeMirror: TiddlyWiki mode

- -
- - - -

TiddlyWiki mode supports a single configuration.

- -

MIME types defined: text/x-tiddlywiki.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/tiddlywiki.css b/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/tiddlywiki.css deleted file mode 100644 index dba05c0..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/tiddlywiki.css +++ /dev/null @@ -1,21 +0,0 @@ -.cm-s-default span.cm-header {color: blue; font-weight:bold;} -.cm-s-default span.cm-code {color: #a50;} -.cm-s-default span.cm-code-inline {color: #660;} - -.cm-s-default span.cm-quote {color: #555;} -.cm-s-default span.cm-list {color: #c60;} -.cm-s-default span.cm-hr {color: #999;} -.cm-s-default span.cm-em {font-style: italic;} -.cm-s-default span.cm-strong {font-weight: bold;} - -.cm-s-default span.cm-link-external {color: blue;} -.cm-s-default span.cm-brace {color: #170; font-weight: bold;} -.cm-s-default span.cm-macro {color: #9E3825;} -.cm-s-default span.cm-table {color: blue; font-weight: bold;} -.cm-s-default span.cm-warning {color: red; font-weight: bold;} - -.cm-s-default span.cm-underlined {text-decoration: underline;} -.cm-s-default span.cm-line-through {text-decoration: line-through;} - -.cm-s-default span.cm-comment {color: #666;} - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/tiddlywiki.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/tiddlywiki.js deleted file mode 100644 index 1d26967..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/tiddlywiki/tiddlywiki.js +++ /dev/null @@ -1,384 +0,0 @@ -/*** -|''Name''|tiddlywiki.js| -|''Description''|Enables TiddlyWikiy syntax highlighting using CodeMirror| -|''Author''|PMario| -|''Version''|0.1.7| -|''Status''|''stable''| -|''Source''|[[GitHub|https://github.com/pmario/CodeMirror2/blob/tw-syntax/mode/tiddlywiki]]| -|''Documentation''|http://codemirror.tiddlyspace.com/| -|''License''|[[MIT License|http://www.opensource.org/licenses/mit-license.php]]| -|''CoreVersion''|2.5.0| -|''Requires''|codemirror.js| -|''Keywords''|syntax highlighting color code mirror codemirror| -! Info -CoreVersion parameter is needed for TiddlyWiki only! -***/ -//{{{ -CodeMirror.defineMode("tiddlywiki", function (config, parserConfig) { - var indentUnit = config.indentUnit; - - // Tokenizer - var textwords = function () { - function kw(type) { - return { - type: type, - style: "text" - }; - } - return {}; - }(); - - var keywords = function () { - function kw(type) { - return { type: type, style: "macro"}; - } - return { - "allTags": kw('allTags'), "closeAll": kw('closeAll'), "list": kw('list'), - "newJournal": kw('newJournal'), "newTiddler": kw('newTiddler'), - "permaview": kw('permaview'), "saveChanges": kw('saveChanges'), - "search": kw('search'), "slider": kw('slider'), "tabs": kw('tabs'), - "tag": kw('tag'), "tagging": kw('tagging'), "tags": kw('tags'), - "tiddler": kw('tiddler'), "timeline": kw('timeline'), - "today": kw('today'), "version": kw('version'), "option": kw('option'), - - "with": kw('with'), - "filter": kw('filter') - }; - }(); - - var isSpaceName = /[\w_\-]/i, - reHR = /^\-\-\-\-+$/, //
- reWikiCommentStart = /^\/\*\*\*$/, // /*** - reWikiCommentStop = /^\*\*\*\/$/, // ***/ - reBlockQuote = /^<<<$/, - - reJsCodeStart = /^\/\/\{\{\{$/, // //{{{ js block start - reJsCodeStop = /^\/\/\}\}\}$/, // //}}} js stop - reXmlCodeStart = /^$/, // xml block start - reXmlCodeStop = /^$/, // xml stop - - reCodeBlockStart = /^\{\{\{$/, // {{{ TW text div block start - reCodeBlockStop = /^\}\}\}$/, // }}} TW text stop - - reCodeStart = /\{\{\{/, // {{{ code span start - reUntilCodeStop = /.*?\}\}\}/; - - function chain(stream, state, f) { - state.tokenize = f; - return f(stream, state); - } - - // used for strings - function nextUntilUnescaped(stream, end) { - var escaped = false, - next; - while ((next = stream.next()) != null) { - if (next == end && !escaped) return false; - escaped = !escaped && next == "\\"; - } - return escaped; - } - - // Used as scratch variables to communicate multiple values without - // consing up tons of objects. - var type, content; - - function ret(tp, style, cont) { - type = tp; - content = cont; - return style; - } - - function jsTokenBase(stream, state) { - var sol = stream.sol(), - ch, tch; - - state.block = false; // indicates the start of a code block. - - ch = stream.peek(); // don't eat, to make matching simpler - - // check start of blocks - if (sol && /[<\/\*{}\-]/.test(ch)) { - if (stream.match(reCodeBlockStart)) { - state.block = true; - return chain(stream, state, twTokenCode); - } - if (stream.match(reBlockQuote)) { - return ret('quote', 'quote'); - } - if (stream.match(reWikiCommentStart) || stream.match(reWikiCommentStop)) { - return ret('code', 'code'); - } - if (stream.match(reJsCodeStart) || stream.match(reJsCodeStop) || stream.match(reXmlCodeStart) || stream.match(reXmlCodeStop)) { - return ret('code', 'code'); - } - if (stream.match(reHR)) { - return ret('hr', 'hr'); - } - } // sol - ch = stream.next(); - - if (sol && /[\/\*!#;:>|]/.test(ch)) { - if (ch == "!") { // tw header - stream.skipToEnd(); - return ret("header", "header"); - } - if (ch == "*") { // tw list - stream.eatWhile('*'); - return ret("list", "list"); - } - if (ch == "#") { // tw numbered list - stream.eatWhile('#'); - return ret("list", "list"); - } - if (ch == ";") { // definition list, term - stream.eatWhile(';'); - return ret("list", "list"); - } - if (ch == ":") { // definition list, description - stream.eatWhile(':'); - return ret("list", "list"); - } - if (ch == ">") { // single line quote - stream.eatWhile(">"); - return ret("quote", "quote"); - } - if (ch == '|') { - return ret('table', 'table'); - } - } - - if (ch == '{' && stream.match(/\{\{/)) { - return chain(stream, state, twTokenCode); - } - - // rudimentary html:// file:// link matching. TW knows much more ... - if (/[hf]/i.test(ch)) { - if (/[ti]/i.test(stream.peek()) && stream.match(/\b(ttps?|tp|ile):\/\/[\-A-Z0-9+&@#\/%?=~_|$!:,.;]*[A-Z0-9+&@#\/%=~_|$]/i)) { - return ret("link-external", "link-external"); - } - } - // just a little string indicator, don't want to have the whole string covered - if (ch == '"') { - return ret('string', 'string'); - } - if (ch == '~') { // _no_ CamelCase indicator should be bold - return ret('text', 'brace'); - } - if (/[\[\]]/.test(ch)) { // check for [[..]] - if (stream.peek() == ch) { - stream.next(); - return ret('brace', 'brace'); - } - } - if (ch == "@") { // check for space link. TODO fix @@...@@ highlighting - stream.eatWhile(isSpaceName); - return ret("link-external", "link-external"); - } - if (/\d/.test(ch)) { // numbers - stream.eatWhile(/\d/); - return ret("number", "number"); - } - if (ch == "/") { // tw invisible comment - if (stream.eat("%")) { - return chain(stream, state, twTokenComment); - } - else if (stream.eat("/")) { // - return chain(stream, state, twTokenEm); - } - } - if (ch == "_") { // tw underline - if (stream.eat("_")) { - return chain(stream, state, twTokenUnderline); - } - } - // strikethrough and mdash handling - if (ch == "-") { - if (stream.eat("-")) { - // if strikethrough looks ugly, change CSS. - if (stream.peek() != ' ') - return chain(stream, state, twTokenStrike); - // mdash - if (stream.peek() == ' ') - return ret('text', 'brace'); - } - } - if (ch == "'") { // tw bold - if (stream.eat("'")) { - return chain(stream, state, twTokenStrong); - } - } - if (ch == "<") { // tw macro - if (stream.eat("<")) { - return chain(stream, state, twTokenMacro); - } - } - else { - return ret(ch); - } - - // core macro handling - stream.eatWhile(/[\w\$_]/); - var word = stream.current(), - known = textwords.propertyIsEnumerable(word) && textwords[word]; - - return known ? ret(known.type, known.style, word) : ret("text", null, word); - - } // jsTokenBase() - - function twTokenString(quote) { - return function (stream, state) { - if (!nextUntilUnescaped(stream, quote)) state.tokenize = jsTokenBase; - return ret("string", "string"); - }; - } - - // tw invisible comment - function twTokenComment(stream, state) { - var maybeEnd = false, - ch; - while (ch = stream.next()) { - if (ch == "/" && maybeEnd) { - state.tokenize = jsTokenBase; - break; - } - maybeEnd = (ch == "%"); - } - return ret("comment", "comment"); - } - - // tw strong / bold - function twTokenStrong(stream, state) { - var maybeEnd = false, - ch; - while (ch = stream.next()) { - if (ch == "'" && maybeEnd) { - state.tokenize = jsTokenBase; - break; - } - maybeEnd = (ch == "'"); - } - return ret("text", "strong"); - } - - // tw code - function twTokenCode(stream, state) { - var ch, sb = state.block; - - if (sb && stream.current()) { - return ret("code", "code"); - } - - if (!sb && stream.match(reUntilCodeStop)) { - state.tokenize = jsTokenBase; - return ret("code", "code-inline"); - } - - if (sb && stream.sol() && stream.match(reCodeBlockStop)) { - state.tokenize = jsTokenBase; - return ret("code", "code"); - } - - ch = stream.next(); - return (sb) ? ret("code", "code") : ret("code", "code-inline"); - } - - // tw em / italic - function twTokenEm(stream, state) { - var maybeEnd = false, - ch; - while (ch = stream.next()) { - if (ch == "/" && maybeEnd) { - state.tokenize = jsTokenBase; - break; - } - maybeEnd = (ch == "/"); - } - return ret("text", "em"); - } - - // tw underlined text - function twTokenUnderline(stream, state) { - var maybeEnd = false, - ch; - while (ch = stream.next()) { - if (ch == "_" && maybeEnd) { - state.tokenize = jsTokenBase; - break; - } - maybeEnd = (ch == "_"); - } - return ret("text", "underlined"); - } - - // tw strike through text looks ugly - // change CSS if needed - function twTokenStrike(stream, state) { - var maybeEnd = false, - ch, nr; - - while (ch = stream.next()) { - if (ch == "-" && maybeEnd) { - state.tokenize = jsTokenBase; - break; - } - maybeEnd = (ch == "-"); - } - return ret("text", "line-through"); - } - - // macro - function twTokenMacro(stream, state) { - var ch, tmp, word, known; - - if (stream.current() == '<<') { - return ret('brace', 'macro'); - } - - ch = stream.next(); - if (!ch) { - state.tokenize = jsTokenBase; - return ret(ch); - } - if (ch == ">") { - if (stream.peek() == '>') { - stream.next(); - state.tokenize = jsTokenBase; - return ret("brace", "macro"); - } - } - - stream.eatWhile(/[\w\$_]/); - word = stream.current(); - known = keywords.propertyIsEnumerable(word) && keywords[word]; - - if (known) { - return ret(known.type, known.style, word); - } - else { - return ret("macro", null, word); - } - } - - // Interface - return { - startState: function (basecolumn) { - return { - tokenize: jsTokenBase, - indented: 0, - level: 0 - }; - }, - - token: function (stream, state) { - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - return style; - }, - - electricChars: "" - }; -}); - -CodeMirror.defineMIME("text/x-tiddlywiki", "tiddlywiki"); -//}}} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/vbscript/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/vbscript/index.html deleted file mode 100644 index dd207ca..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/vbscript/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - CodeMirror: VBScript mode - - - - - - - -

CodeMirror: VBScript mode

- -
- - - -

MIME types defined: text/vbscript.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/vbscript/vbscript.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/vbscript/vbscript.js deleted file mode 100644 index 65d6c21..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/vbscript/vbscript.js +++ /dev/null @@ -1,26 +0,0 @@ -CodeMirror.defineMode("vbscript", function() { - var regexVBScriptKeyword = /^(?:Call|Case|CDate|Clear|CInt|CLng|Const|CStr|Description|Dim|Do|Each|Else|ElseIf|End|Err|Error|Exit|False|For|Function|If|LCase|Loop|LTrim|Next|Nothing|Now|Number|On|Preserve|Quit|ReDim|Resume|RTrim|Select|Set|Sub|Then|To|Trim|True|UBound|UCase|Until|VbCr|VbCrLf|VbLf|VbTab)$/im; - - return { - token: function(stream) { - if (stream.eatSpace()) return null; - var ch = stream.next(); - if (ch == "'") { - stream.skipToEnd(); - return "comment"; - } - if (ch == '"') { - stream.skipTo('"'); - return "string"; - } - - if (/\w/.test(ch)) { - stream.eatWhile(/\w/); - if (regexVBScriptKeyword.test(stream.current())) return "keyword"; - } - return null; - } - }; -}); - -CodeMirror.defineMIME("text/vbscript", "vbscript"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/velocity/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/velocity/index.html deleted file mode 100644 index 49dba38..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/velocity/index.html +++ /dev/null @@ -1,103 +0,0 @@ - - - - CodeMirror: Velocity mode - - - - - - - - -

CodeMirror: Velocity mode

-
- - -

MIME types defined: text/velocity.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/velocity/velocity.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/velocity/velocity.js deleted file mode 100644 index 0b80c75..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/velocity/velocity.js +++ /dev/null @@ -1,146 +0,0 @@ -CodeMirror.defineMode("velocity", function(config) { - function parseWords(str) { - var obj = {}, words = str.split(" "); - for (var i = 0; i < words.length; ++i) obj[words[i]] = true; - return obj; - } - - var indentUnit = config.indentUnit - var keywords = parseWords("#end #else #break #stop #[[ #]] " + - "#{end} #{else} #{break} #{stop}"); - var functions = parseWords("#if #elseif #foreach #set #include #parse #macro #define #evaluate " + - "#{if} #{elseif} #{foreach} #{set} #{include} #{parse} #{macro} #{define} #{evaluate}"); - var specials = parseWords("$foreach.count $foreach.hasNext $foreach.first $foreach.last $foreach.topmost $foreach.parent $velocityCount"); - var isOperatorChar = /[+\-*&%=<>!?:\/|]/; - var multiLineStrings =true; - - function chain(stream, state, f) { - state.tokenize = f; - return f(stream, state); - } - function tokenBase(stream, state) { - var beforeParams = state.beforeParams; - state.beforeParams = false; - var ch = stream.next(); - // start of string? - if ((ch == '"' || ch == "'") && state.inParams) - return chain(stream, state, tokenString(ch)); - // is it one of the special signs []{}().,;? Seperator? - else if (/[\[\]{}\(\),;\.]/.test(ch)) { - if (ch == "(" && beforeParams) state.inParams = true; - else if (ch == ")") state.inParams = false; - return null; - } - // start of a number value? - else if (/\d/.test(ch)) { - stream.eatWhile(/[\w\.]/); - return "number"; - } - // multi line comment? - else if (ch == "#" && stream.eat("*")) { - return chain(stream, state, tokenComment); - } - // unparsed content? - else if (ch == "#" && stream.match(/ *\[ *\[/)) { - return chain(stream, state, tokenUnparsed); - } - // single line comment? - else if (ch == "#" && stream.eat("#")) { - stream.skipToEnd(); - return "comment"; - } - // variable? - else if (ch == "$") { - stream.eatWhile(/[\w\d\$_\.{}]/); - // is it one of the specials? - if (specials && specials.propertyIsEnumerable(stream.current().toLowerCase())) { - return "keyword"; - } - else { - state.beforeParams = true; - return "builtin"; - } - } - // is it a operator? - else if (isOperatorChar.test(ch)) { - stream.eatWhile(isOperatorChar); - return "operator"; - } - else { - // get the whole word - stream.eatWhile(/[\w\$_{}]/); - var word = stream.current().toLowerCase(); - // is it one of the listed keywords? - if (keywords && keywords.propertyIsEnumerable(word)) - return "keyword"; - // is it one of the listed functions? - if (functions && functions.propertyIsEnumerable(word) || - stream.current().match(/^#[a-z0-9_]+ *$/i) && stream.peek()=="(") { - state.beforeParams = true; - return "keyword"; - } - // default: just a "word" - return null; - } - } - - function tokenString(quote) { - return function(stream, state) { - var escaped = false, next, end = false; - while ((next = stream.next()) != null) { - if (next == quote && !escaped) { - end = true; - break; - } - escaped = !escaped && next == "\\"; - } - if (end) state.tokenize = tokenBase; - return "string"; - }; - } - - function tokenComment(stream, state) { - var maybeEnd = false, ch; - while (ch = stream.next()) { - if (ch == "#" && maybeEnd) { - state.tokenize = tokenBase; - break; - } - maybeEnd = (ch == "*"); - } - return "comment"; - } - - function tokenUnparsed(stream, state) { - var maybeEnd = 0, ch; - while (ch = stream.next()) { - if (ch == "#" && maybeEnd == 2) { - state.tokenize = tokenBase; - break; - } - if (ch == "]") - maybeEnd++; - else if (ch != " ") - maybeEnd = 0; - } - return "meta"; - } - // Interface - - return { - startState: function(basecolumn) { - return { - tokenize: tokenBase, - beforeParams: false, - inParams: false - }; - }, - - token: function(stream, state) { - if (stream.eatSpace()) return null; - return state.tokenize(stream, state); - } - }; -}); - -CodeMirror.defineMIME("text/velocity", "velocity"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/verilog/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/verilog/index.html deleted file mode 100644 index 775dd53..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/verilog/index.html +++ /dev/null @@ -1,210 +0,0 @@ - - - - CodeMirror: Verilog mode - - - - - - - -

CodeMirror: Verilog mode

- -
- - - -

Simple mode that tries to handle Verilog-like languages as well as it - can. Takes one configuration parameters: keywords, an - object whose property names are the keywords in the language.

- -

MIME types defined: text/x-verilog (Verilog code).

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/verilog/verilog.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/verilog/verilog.js deleted file mode 100644 index 736d16a..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/verilog/verilog.js +++ /dev/null @@ -1,194 +0,0 @@ -CodeMirror.defineMode("verilog", function(config, parserConfig) { - var indentUnit = config.indentUnit, - keywords = parserConfig.keywords || {}, - blockKeywords = parserConfig.blockKeywords || {}, - atoms = parserConfig.atoms || {}, - hooks = parserConfig.hooks || {}, - multiLineStrings = parserConfig.multiLineStrings; - var isOperatorChar = /[&|~> - - - CodeMirror: XML mode - - - - - - - -

CodeMirror: XML mode

-
- -

The XML mode supports two configuration parameters:

-
-
htmlMode (boolean)
-
This switches the mode to parse HTML instead of XML. This - means attributes do not have to be quoted, and some elements - (such as br) do not require a closing tag.
-
alignCDATA (boolean)
-
Setting this to true will force the opening tag of CDATA - blocks to not be indented.
-
- -

MIME types defined: application/xml, text/html.

- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xml/xml.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xml/xml.js deleted file mode 100644 index d8f17ef..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xml/xml.js +++ /dev/null @@ -1,268 +0,0 @@ -CodeMirror.defineMode("xml", function(config, parserConfig) { - var indentUnit = config.indentUnit; - var Kludges = parserConfig.htmlMode ? { - autoSelfClosers: {"br": true, "img": true, "hr": true, "link": true, "input": true, - "meta": true, "col": true, "frame": true, "base": true, "area": true}, - doNotIndent: {"pre": true}, - allowUnquoted: true, - allowMissing: false - } : {autoSelfClosers: {}, doNotIndent: {}, allowUnquoted: false, allowMissing: false}; - var alignCDATA = parserConfig.alignCDATA; - - // Return variables for tokenizers - var tagName, type; - - function inText(stream, state) { - function chain(parser) { - state.tokenize = parser; - return parser(stream, state); - } - - var ch = stream.next(); - if (ch == "<") { - if (stream.eat("!")) { - if (stream.eat("[")) { - if (stream.match("CDATA[")) return chain(inBlock("atom", "]]>")); - else return null; - } - else if (stream.match("--")) return chain(inBlock("comment", "-->")); - else if (stream.match("DOCTYPE", true, true)) { - stream.eatWhile(/[\w\._\-]/); - return chain(doctype(1)); - } - else return null; - } - else if (stream.eat("?")) { - stream.eatWhile(/[\w\._\-]/); - state.tokenize = inBlock("meta", "?>"); - return "meta"; - } - else { - type = stream.eat("/") ? "closeTag" : "openTag"; - stream.eatSpace(); - tagName = ""; - var c; - while ((c = stream.eat(/[^\s\u00a0=<>\"\'\/?]/))) tagName += c; - state.tokenize = inTag; - return "tag"; - } - } - else if (ch == "&") { - var ok; - if (stream.eat("#")) { - if (stream.eat("x")) { - ok = stream.eatWhile(/[a-fA-F\d]/) && stream.eat(";"); - } else { - ok = stream.eatWhile(/[\d]/) && stream.eat(";"); - } - } else { - ok = stream.eatWhile(/[\w\.\-:]/) && stream.eat(";"); - } - return ok ? "atom" : "error"; - } - else { - stream.eatWhile(/[^&<]/); - return null; - } - } - - function inTag(stream, state) { - var ch = stream.next(); - if (ch == ">" || (ch == "/" && stream.eat(">"))) { - state.tokenize = inText; - type = ch == ">" ? "endTag" : "selfcloseTag"; - return "tag"; - } - else if (ch == "=") { - type = "equals"; - return null; - } - else if (/[\'\"]/.test(ch)) { - state.tokenize = inAttribute(ch); - return state.tokenize(stream, state); - } - else { - stream.eatWhile(/[^\s\u00a0=<>\"\'\/?]/); - return "word"; - } - } - - function inAttribute(quote) { - return function(stream, state) { - while (!stream.eol()) { - if (stream.next() == quote) { - state.tokenize = inTag; - break; - } - } - return "string"; - }; - } - - function inBlock(style, terminator) { - return function(stream, state) { - while (!stream.eol()) { - if (stream.match(terminator)) { - state.tokenize = inText; - break; - } - stream.next(); - } - return style; - }; - } - function doctype(depth) { - return function(stream, state) { - var ch; - while ((ch = stream.next()) != null) { - if (ch == "<") { - state.tokenize = doctype(depth + 1); - return state.tokenize(stream, state); - } else if (ch == ">") { - if (depth == 1) { - state.tokenize = inText; - break; - } else { - state.tokenize = doctype(depth - 1); - return state.tokenize(stream, state); - } - } - } - return "meta"; - }; - } - - var curState, setStyle; - function pass() { - for (var i = arguments.length - 1; i >= 0; i--) curState.cc.push(arguments[i]); - } - function cont() { - pass.apply(null, arguments); - return true; - } - - function pushContext(tagName, startOfLine) { - var noIndent = Kludges.doNotIndent.hasOwnProperty(tagName) || (curState.context && curState.context.noIndent); - curState.context = { - prev: curState.context, - tagName: tagName, - indent: curState.indented, - startOfLine: startOfLine, - noIndent: noIndent - }; - } - function popContext() { - if (curState.context) curState.context = curState.context.prev; - } - - function element(type) { - if (type == "openTag") { - curState.tagName = tagName; - return cont(attributes, endtag(curState.startOfLine)); - } else if (type == "closeTag") { - var err = false; - if (curState.context) { - err = curState.context.tagName != tagName; - } else { - err = true; - } - if (err) setStyle = "error"; - return cont(endclosetag(err)); - } - return cont(); - } - function endtag(startOfLine) { - return function(type) { - if (type == "selfcloseTag" || - (type == "endTag" && Kludges.autoSelfClosers.hasOwnProperty(curState.tagName.toLowerCase()))) - return cont(); - if (type == "endTag") {pushContext(curState.tagName, startOfLine); return cont();} - return cont(); - }; - } - function endclosetag(err) { - return function(type) { - if (err) setStyle = "error"; - if (type == "endTag") { popContext(); return cont(); } - setStyle = "error"; - return cont(arguments.callee); - } - } - - function attributes(type) { - if (type == "word") {setStyle = "attribute"; return cont(attribute, attributes);} - if (type == "endTag" || type == "selfcloseTag") return pass(); - setStyle = "error"; - return cont(attributes); - } - function attribute(type) { - if (type == "equals") return cont(attvalue, attributes); - if (!Kludges.allowMissing) setStyle = "error"; - return (type == "endTag" || type == "selfcloseTag") ? pass() : cont(); - } - function attvalue(type) { - if (type == "string") return cont(attvaluemaybe); - if (type == "word" && Kludges.allowUnquoted) {setStyle = "string"; return cont();} - setStyle = "error"; - return (type == "endTag" || type == "selfCloseTag") ? pass() : cont(); - } - function attvaluemaybe(type) { - if (type == "string") return cont(attvaluemaybe); - else return pass(); - } - - return { - startState: function() { - return {tokenize: inText, cc: [], indented: 0, startOfLine: true, tagName: null, context: null}; - }, - - token: function(stream, state) { - if (stream.sol()) { - state.startOfLine = true; - state.indented = stream.indentation(); - } - if (stream.eatSpace()) return null; - - setStyle = type = tagName = null; - var style = state.tokenize(stream, state); - state.type = type; - if ((style || type) && style != "comment") { - curState = state; - while (true) { - var comb = state.cc.pop() || element; - if (comb(type || style)) break; - } - } - state.startOfLine = false; - return setStyle || style; - }, - - indent: function(state, textAfter, fullLine) { - var context = state.context; - if ((state.tokenize != inTag && state.tokenize != inText) || - context && context.noIndent) - return fullLine ? fullLine.match(/^(\s*)/)[0].length : 0; - if (alignCDATA && / - - - CodeMirror: Pure XML mode - - - - - - - -

CodeMirror: XML mode

-
- - -

This is my XML parser, based on the original:

-
    -
  • No html mode - this is pure xml
  • -
  • Illegal attributes and element names are errors
  • -
  • Attributes must have a value
  • -
  • XML declaration supported (e.g.: <?xml version="1.0" encoding="utf-8" standalone="no" ?>)
  • -
  • CDATA and comment blocks are not indented (except for their start-tag)
  • -
  • Better handling of errors per line with the state object - provides good infrastructure for extending it
  • -
- -

What's missing:

-
    -
  • Make sure only a single root element exists at the document level
  • -
  • Multi-line attributes should NOT indent
  • -
  • Start tags are not painted red when they have no matching end tags (is this really wrong?)
  • -
- -

MIME types defined: application/xml, text/xml.

- -

@author: Dror BG (deebug.dev[at]gmail.com)
-

@date: August, 2011
-

@github: https://github.com/deebugger/CodeMirror2

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xmlpure/xmlpure.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xmlpure/xmlpure.js deleted file mode 100644 index 18d710c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xmlpure/xmlpure.js +++ /dev/null @@ -1,490 +0,0 @@ -/** - * xmlpure.js - * - * Building upon and improving the CodeMirror 2 XML parser - * @author: Dror BG (deebug.dev@gmail.com) - * @date: August, 2011 - */ - -CodeMirror.defineMode("xmlpure", function(config, parserConfig) { - // constants - var STYLE_ERROR = "error"; - var STYLE_INSTRUCTION = "comment"; - var STYLE_COMMENT = "comment"; - var STYLE_ELEMENT_NAME = "tag"; - var STYLE_ATTRIBUTE = "attribute"; - var STYLE_WORD = "string"; - var STYLE_TEXT = "atom"; - var STYLE_ENTITIES = "string"; - - var TAG_INSTRUCTION = "!instruction"; - var TAG_CDATA = "!cdata"; - var TAG_COMMENT = "!comment"; - var TAG_TEXT = "!text"; - - var doNotIndent = { - "!cdata": true, - "!comment": true, - "!text": true, - "!instruction": true - }; - - // options - var indentUnit = config.indentUnit; - - /////////////////////////////////////////////////////////////////////////// - // helper functions - - // chain a parser to another parser - function chain(stream, state, parser) { - state.tokenize = parser; - return parser(stream, state); - } - - // parse a block (comment, CDATA or text) - function inBlock(style, terminator, nextTokenize) { - return function(stream, state) { - while (!stream.eol()) { - if (stream.match(terminator)) { - popContext(state); - state.tokenize = nextTokenize; - break; - } - stream.next(); - } - return style; - }; - } - - // go down a level in the document - // (hint: look at who calls this function to know what the contexts are) - function pushContext(state, tagName) { - var noIndent = doNotIndent.hasOwnProperty(tagName) || (state.context && state.context.doIndent); - var newContext = { - tagName: tagName, - prev: state.context, - indent: state.context ? state.context.indent + indentUnit : 0, - lineNumber: state.lineNumber, - indented: state.indented, - noIndent: noIndent - }; - state.context = newContext; - } - - // go up a level in the document - function popContext(state) { - if (state.context) { - var oldContext = state.context; - state.context = oldContext.prev; - return oldContext; - } - - // we shouldn't be here - it means we didn't have a context to pop - return null; - } - - // return true if the current token is seperated from the tokens before it - // which means either this is the start of the line, or there is at least - // one space or tab character behind the token - // otherwise returns false - function isTokenSeparated(stream) { - return stream.sol() || - stream.string.charAt(stream.start - 1) == " " || - stream.string.charAt(stream.start - 1) == "\t"; - } - - /////////////////////////////////////////////////////////////////////////// - // context: document - // - // an XML document can contain: - // - a single declaration (if defined, it must be the very first line) - // - exactly one root element - // @todo try to actually limit the number of root elements to 1 - // - zero or more comments - function parseDocument(stream, state) { - if(stream.eat("<")) { - if(stream.eat("?")) { - // processing instruction - pushContext(state, TAG_INSTRUCTION); - state.tokenize = parseProcessingInstructionStartTag; - return STYLE_INSTRUCTION; - } else if(stream.match("!--")) { - // new context: comment - pushContext(state, TAG_COMMENT); - return chain(stream, state, inBlock(STYLE_COMMENT, "-->", parseDocument)); - } else if(stream.eatSpace() || stream.eol() ) { - stream.skipToEnd(); - return STYLE_ERROR; - } else { - // element - state.tokenize = parseElementTagName; - return STYLE_ELEMENT_NAME; - } - } - - // error on line - stream.skipToEnd(); - return STYLE_ERROR; - } - - /////////////////////////////////////////////////////////////////////////// - // context: XML element start-tag or end-tag - // - // - element start-tag can contain attributes - // - element start-tag may self-close (or start an element block if it doesn't) - // - element end-tag can contain only the tag name - function parseElementTagName(stream, state) { - // get the name of the tag - var startPos = stream.pos; - if(stream.match(/^[a-zA-Z_:][-a-zA-Z0-9_:.]*/)) { - // element start-tag - var tagName = stream.string.substring(startPos, stream.pos); - pushContext(state, tagName); - state.tokenize = parseElement; - return STYLE_ELEMENT_NAME; - } else if(stream.match(/^\/[a-zA-Z_:][-a-zA-Z0-9_:.]*( )*>/)) { - // element end-tag - var endTagName = stream.string.substring(startPos + 1, stream.pos - 1).trim(); - var oldContext = popContext(state); - state.tokenize = state.context == null ? parseDocument : parseElementBlock; - if(oldContext == null || endTagName != oldContext.tagName) { - // the start and end tag names should match - error - return STYLE_ERROR; - } - return STYLE_ELEMENT_NAME; - } else { - // no tag name - error - state.tokenize = state.context == null ? parseDocument : parseElementBlock; - stream.eatWhile(/[^>]/); - stream.eat(">"); - return STYLE_ERROR; - } - - stream.skipToEnd(); - return null; - } - - function parseElement(stream, state) { - if(stream.match(/^\/>/)) { - // self-closing tag - popContext(state); - state.tokenize = state.context == null ? parseDocument : parseElementBlock; - return STYLE_ELEMENT_NAME; - } else if(stream.eat(/^>/)) { - state.tokenize = parseElementBlock; - return STYLE_ELEMENT_NAME; - } else if(isTokenSeparated(stream) && stream.match(/^[a-zA-Z_:][-a-zA-Z0-9_:.]*( )*=/)) { - // attribute - state.tokenize = parseAttribute; - return STYLE_ATTRIBUTE; - } - - // no other options - this is an error - state.tokenize = state.context == null ? parseDocument : parseDocument; - stream.eatWhile(/[^>]/); - stream.eat(">"); - return STYLE_ERROR; - } - - /////////////////////////////////////////////////////////////////////////// - // context: attribute - // - // attribute values may contain everything, except: - // - the ending quote (with ' or ") - this marks the end of the value - // - the character "<" - should never appear - // - ampersand ("&") - unless it starts a reference: a string that ends with a semi-colon (";") - // ---> note: this parser is lax in what may be put into a reference string, - // ---> consult http://www.w3.org/TR/REC-xml/#NT-Reference if you want to make it tighter - function parseAttribute(stream, state) { - var quote = stream.next(); - if(quote != "\"" && quote != "'") { - // attribute must be quoted - stream.skipToEnd(); - state.tokenize = parseElement; - return STYLE_ERROR; - } - - state.tokParams.quote = quote; - state.tokenize = parseAttributeValue; - return STYLE_WORD; - } - - // @todo: find out whether this attribute value spans multiple lines, - // and if so, push a context for it in order not to indent it - // (or something of the sort..) - function parseAttributeValue(stream, state) { - var ch = ""; - while(!stream.eol()) { - ch = stream.next(); - if(ch == state.tokParams.quote) { - // end quote found - state.tokenize = parseElement; - return STYLE_WORD; - } else if(ch == "<") { - // can't have less-than signs in an attribute value, ever - stream.skipToEnd() - state.tokenize = parseElement; - return STYLE_ERROR; - } else if(ch == "&") { - // reference - look for a semi-colon, or return error if none found - ch = stream.next(); - - // make sure that semi-colon isn't right after the ampersand - if(ch == ';') { - stream.skipToEnd() - state.tokenize = parseElement; - return STYLE_ERROR; - } - - // make sure no less-than characters slipped in - while(!stream.eol() && ch != ";") { - if(ch == "<") { - // can't have less-than signs in an attribute value, ever - stream.skipToEnd() - state.tokenize = parseElement; - return STYLE_ERROR; - } - ch = stream.next(); - } - if(stream.eol() && ch != ";") { - // no ampersand found - error - stream.skipToEnd(); - state.tokenize = parseElement; - return STYLE_ERROR; - } - } - } - - // attribute value continues to next line - return STYLE_WORD; - } - - /////////////////////////////////////////////////////////////////////////// - // context: element block - // - // a block can contain: - // - elements - // - text - // - CDATA sections - // - comments - function parseElementBlock(stream, state) { - if(stream.eat("<")) { - if(stream.match("?")) { - pushContext(state, TAG_INSTRUCTION); - state.tokenize = parseProcessingInstructionStartTag; - return STYLE_INSTRUCTION; - } else if(stream.match("!--")) { - // new context: comment - pushContext(state, TAG_COMMENT); - return chain(stream, state, inBlock(STYLE_COMMENT, "-->", - state.context == null ? parseDocument : parseElementBlock)); - } else if(stream.match("![CDATA[")) { - // new context: CDATA section - pushContext(state, TAG_CDATA); - return chain(stream, state, inBlock(STYLE_TEXT, "]]>", - state.context == null ? parseDocument : parseElementBlock)); - } else if(stream.eatSpace() || stream.eol() ) { - stream.skipToEnd(); - return STYLE_ERROR; - } else { - // element - state.tokenize = parseElementTagName; - return STYLE_ELEMENT_NAME; - } - } else if(stream.eat("&")) { - stream.eatWhile(/[^;]/); - stream.eat(";"); - return STYLE_ENTITIES; - } else { - // new context: text - pushContext(state, TAG_TEXT); - state.tokenize = parseText; - return null; - } - - state.tokenize = state.context == null ? parseDocument : parseElementBlock; - stream.skipToEnd(); - return null; - } - - function parseText(stream, state) { - stream.eatWhile(/[^<]/); - if(!stream.eol()) { - // we cannot possibly be in the document context, - // just inside an element block - popContext(state); - state.tokenize = parseElementBlock; - } - return STYLE_TEXT; - } - - /////////////////////////////////////////////////////////////////////////// - // context: XML processing instructions - // - // XML processing instructions (PIs) allow documents to contain instructions for applications. - // PI format: - // - 'name' can be anything other than 'xml' (case-insensitive) - // - 'data' can be anything which doesn't contain '?>' - // XML declaration is a special PI (see XML declaration context below) - function parseProcessingInstructionStartTag(stream, state) { - if(stream.match("xml", true, true)) { - // xml declaration - if(state.lineNumber > 1 || stream.pos > 5) { - state.tokenize = parseDocument; - stream.skipToEnd(); - return STYLE_ERROR; - } else { - state.tokenize = parseDeclarationVersion; - return STYLE_INSTRUCTION; - } - } - - // regular processing instruction - if(isTokenSeparated(stream) || stream.match("?>")) { - // we have a space after the start-tag, or nothing but the end-tag - // either way - error! - state.tokenize = parseDocument; - stream.skipToEnd(); - return STYLE_ERROR; - } - - state.tokenize = parseProcessingInstructionBody; - return STYLE_INSTRUCTION; - } - - function parseProcessingInstructionBody(stream, state) { - stream.eatWhile(/[^?]/); - if(stream.eat("?")) { - if(stream.eat(">")) { - popContext(state); - state.tokenize = state.context == null ? parseDocument : parseElementBlock; - } - } - return STYLE_INSTRUCTION; - } - - - /////////////////////////////////////////////////////////////////////////// - // context: XML declaration - // - // XML declaration is of the following format: - // - // - must start at the first character of the first line - // - may span multiple lines - // - must include 'version' - // - may include 'encoding' and 'standalone' (in that order after 'version') - // - attribute names must be lowercase - // - cannot contain anything else on the line - function parseDeclarationVersion(stream, state) { - state.tokenize = parseDeclarationEncoding; - - if(isTokenSeparated(stream) && stream.match(/^version( )*=( )*"([a-zA-Z0-9_.:]|\-)+"/)) { - return STYLE_INSTRUCTION; - } - stream.skipToEnd(); - return STYLE_ERROR; - } - - function parseDeclarationEncoding(stream, state) { - state.tokenize = parseDeclarationStandalone; - - if(isTokenSeparated(stream) && stream.match(/^encoding( )*=( )*"[A-Za-z]([A-Za-z0-9._]|\-)*"/)) { - return STYLE_INSTRUCTION; - } - return null; - } - - function parseDeclarationStandalone(stream, state) { - state.tokenize = parseDeclarationEndTag; - - if(isTokenSeparated(stream) && stream.match(/^standalone( )*=( )*"(yes|no)"/)) { - return STYLE_INSTRUCTION; - } - return null; - } - - function parseDeclarationEndTag(stream, state) { - state.tokenize = parseDocument; - - if(stream.match("?>") && stream.eol()) { - popContext(state); - return STYLE_INSTRUCTION; - } - stream.skipToEnd(); - return STYLE_ERROR; - } - - /////////////////////////////////////////////////////////////////////////// - // returned object - return { - electricChars: "/[", - - startState: function() { - return { - tokenize: parseDocument, - tokParams: {}, - lineNumber: 0, - lineError: false, - context: null, - indented: 0 - }; - }, - - token: function(stream, state) { - if(stream.sol()) { - // initialize a new line - state.lineNumber++; - state.lineError = false; - state.indented = stream.indentation(); - } - - // eat all (the spaces) you can - if(stream.eatSpace()) return null; - - // run the current tokenize function, according to the state - var style = state.tokenize(stream, state); - - // is there an error somewhere in the line? - state.lineError = (state.lineError || style == "error"); - - return style; - }, - - blankLine: function(state) { - // blank lines are lines too! - state.lineNumber++; - state.lineError = false; - }, - - indent: function(state, textAfter) { - if(state.context) { - if(state.context.noIndent == true) { - // do not indent - no return value at all - return; - } - if(textAfter.match(/^<\/.*/)) { - // end-tag - indent back to last context - return state.context.indent; - } - if(textAfter.match(/^ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/index.html deleted file mode 100644 index 82f00d2..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/index.html +++ /dev/null @@ -1,222 +0,0 @@ - - - - - CodeMirror 2: JavaScript mode - - - - - - - - -

CodeMirror 2: XQuery mode

- -
- -
- - - -

MIME types defined: application/xquery.

- -

Development of the CodeMirror XQuery mode was sponsored by - MarkLogic and developed by - Mike Brevoort. -

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/index.html deleted file mode 100644 index ba82e54..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/index.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - - - - -

XQuery CodeMirror Mode

-

-

-
    -
-
- - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testBase.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testBase.js deleted file mode 100644 index d40e9ee..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testBase.js +++ /dev/null @@ -1,42 +0,0 @@ - $(document).ready(function(){ - module("testBase"); - test("eviltest", function() { - expect(1); - - var input = 'xquery version "1.0-ml";\ - (: this is\ - : a \ - "comment" :)\ - let $let := <x attr="value">"test"<func>function() $var {function()} {$var}</func></x>\ - let $joe:=1\ - return element element {\ - attribute attribute { 1 },\ - element test { 'a' }, \ - attribute foo { "bar" },\ - fn:doc()[ foo/@bar eq $let ],\ - //x } \ - \ - (: a more \'evil\' test :)\ - (: Modified Blakeley example (: with nested comment :) ... :)\ - declare private function local:declare() {()};\ - declare private function local:private() {()};\ - declare private function local:function() {()};\ - declare private function local:local() {()};\ - let $let := <let>let $let := "let"</let>\ - return element element {\ - attribute attribute { try { xdmp:version() } catch($e) { xdmp:log($e) } },\ - attribute fn:doc { "bar" castable as xs:string },\ - element text { text { "text" } },\ - fn:doc()[ child::eq/(@bar | attribute::attribute) eq $let ],\ - //fn:doc\ - }'; - var expected = 'xquery version "1.0-ml"; (: this is : a "comment" :) let $let := <x attr="value">"test"<func>function() $var {function()} {$var}</func></x> let $joe:=1 return element element { attribute attribute { 1 }, element test { \'a\' }, attribute foo { "bar" }, fn:doc()[ foo/@bar eq $let ], //x } (: a more \'evil\' test :) (: Modified Blakeley example (: with nested comment :) ... :) declare private function local:declare() {()}; declare private function local:private() {()}; declare private function local:function() {()}; declare private function local:local() {()}; let $let := <let>let $let := "let"</let> return element element { attribute attribute { try { xdmp:version() } catch($e) { xdmp:log($e) } }, attribute fn:doc { "bar" castable as xs:string }, element text { text { "text" } }, fn:doc()[ child::eq/(@bar | attribute::attribute) eq $let ], //fn:doc }'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - }); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testEmptySequenceKeyword.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testEmptySequenceKeyword.js deleted file mode 100644 index 39ed090..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testEmptySequenceKeyword.js +++ /dev/null @@ -1,16 +0,0 @@ -$(document).ready(function(){ - module("testEmptySequenceKeyword"); - test("testEmptySequenceKeyword", function() { - expect(1); - - var input = '"foo" instance of empty-sequence()'; - var expected = '"foo" instance of empty-sequence()'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); -}); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testMultiAttr.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testMultiAttr.js deleted file mode 100644 index 8e98c47..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testMultiAttr.js +++ /dev/null @@ -1,16 +0,0 @@ - $(document).ready(function(){ - module("testMultiAttr"); - test("test1", function() { - expect(1); - - var expected = '<p a1="foo" a2="bar">hello world</p>'; - - $("#sandbox").html(''); - $("#editor").html('

hello world

'); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - }); \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testNamespaces.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testNamespaces.js deleted file mode 100644 index 4efea63..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testNamespaces.js +++ /dev/null @@ -1,91 +0,0 @@ -$(document).ready(function(){ - module("test namespaces"); - -// -------------------------------------------------------------------------------- -// this test is based on this: -//http://mbrevoort.github.com/CodeMirror2/#!exprSeqTypes/PrologExpr/VariableProlog/ExternalVariablesWith/K2-ExternalVariablesWith-10.xq -// -------------------------------------------------------------------------------- - test("test namespaced variable", function() { - expect(1); - - var input = 'declare namespace e = "http://example.com/ANamespace";\ -declare variable $e:exampleComThisVarIsNotRecognized as element(*) external;'; - - var expected = 'declare namespace e = "http://example.com/ANamespace";declare variable $e:exampleComThisVarIsNotRecognized as element(*) external;'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - - -// -------------------------------------------------------------------------------- -// this test is based on: -// http://mbrevoort.github.com/CodeMirror2/#!Basics/EQNames/eqname-002.xq -// -------------------------------------------------------------------------------- - test("test EQName variable", function() { - expect(1); - - var input = 'declare variable $"http://www.example.com/ns/my":var := 12;\ -{$"http://www.example.com/ns/my":var}'; - - var expected = 'declare variable $"http://www.example.com/ns/my":var := 12;<out>{$"http://www.example.com/ns/my":var}</out>'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - -// -------------------------------------------------------------------------------- -// this test is based on: -// http://mbrevoort.github.com/CodeMirror2/#!Basics/EQNames/eqname-003.xq -// -------------------------------------------------------------------------------- - test("test EQName function", function() { - expect(1); - - var input = 'declare function "http://www.example.com/ns/my":fn ($a as xs:integer) as xs:integer {\ - $a + 2\ -};\ -{"http://www.example.com/ns/my":fn(12)}'; - - var expected = 'declare function "http://www.example.com/ns/my":fn ($a as xs:integer) as xs:integer { $a + 2};<out>{"http://www.example.com/ns/my":fn(12)}</out>'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - -// -------------------------------------------------------------------------------- -// this test is based on: -// http://mbrevoort.github.com/CodeMirror2/#!Basics/EQNames/eqname-003.xq -// -------------------------------------------------------------------------------- - test("test EQName function with single quotes", function() { - expect(1); - - var input = 'declare function \'http://www.example.com/ns/my\':fn ($a as xs:integer) as xs:integer {\ - $a + 2\ -};\ -{\'http://www.example.com/ns/my\':fn(12)}'; - - var expected = 'declare function \'http://www.example.com/ns/my\':fn ($a as xs:integer) as xs:integer { $a + 2};<out>{\'http://www.example.com/ns/my\':fn(12)}</out>'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - -}); - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testProcessingInstructions.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testProcessingInstructions.js deleted file mode 100644 index 9b75305..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testProcessingInstructions.js +++ /dev/null @@ -1,16 +0,0 @@ -$(document).ready(function(){ - module("testProcessingInstructions"); - test("testProcessingInstructions", function() { - expect(1); - - var input = 'data() instance of xs:string'; - var expected = 'data(<?target content?>) instance of xs:string'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); -}); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testQuotes.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testQuotes.js deleted file mode 100644 index 79e5142..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/test/testQuotes.js +++ /dev/null @@ -1,19 +0,0 @@ - $(document).ready(function(){ - module("testQuoteEscape"); - test("testQuoteEscapeDouble", function() { - expect(1); - - var input = 'let $rootfolder := "c:\\builds\\winnt\\HEAD\\qa\\scripts\\"\ -let $keysfolder := concat($rootfolder, "keys\\")\ -return\ -$keysfolder'; - var expected = 'let $rootfolder := "c:\\builds\\winnt\\HEAD\\qa\\scripts\\"let $keysfolder := concat($rootfolder, "keys\\")return$keysfolder'; - - $("#sandbox").html(''); - var editor = CodeMirror.fromTextArea($("#editor")[0]); - var result = $(".CodeMirror-lines div div pre")[0].innerHTML; - - equal(result, expected); - $("#editor").html(""); - }); - }); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/xquery.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/xquery.js deleted file mode 100644 index 78916da..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/xquery/xquery.js +++ /dev/null @@ -1,448 +0,0 @@ -/* -Copyright (C) 2011 by MarkLogic Corporation -Author: Mike Brevoort - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ -CodeMirror.defineMode("xquery", function(config, parserConfig) { - - // The keywords object is set to the result of this self executing - // function. Each keyword is a property of the keywords object whose - // value is {type: atype, style: astyle} - var keywords = function(){ - // conveinence functions used to build keywords object - function kw(type) {return {type: type, style: "keyword"};} - var A = kw("keyword a") - , B = kw("keyword b") - , C = kw("keyword c") - , operator = kw("operator") - , atom = {type: "atom", style: "atom"} - , punctuation = {type: "punctuation", style: ""} - , qualifier = {type: "axis_specifier", style: "qualifier"}; - - // kwObj is what is return from this function at the end - var kwObj = { - 'if': A, 'switch': A, 'while': A, 'for': A, - 'else': B, 'then': B, 'try': B, 'finally': B, 'catch': B, - 'element': C, 'attribute': C, 'let': C, 'implements': C, 'import': C, 'module': C, 'namespace': C, - 'return': C, 'super': C, 'this': C, 'throws': C, 'where': C, 'private': C, - ',': punctuation, - 'null': atom, 'fn:false()': atom, 'fn:true()': atom - }; - - // a list of 'basic' keywords. For each add a property to kwObj with the value of - // {type: basic[i], style: "keyword"} e.g. 'after' --> {type: "after", style: "keyword"} - var basic = ['after','ancestor','ancestor-or-self','and','as','ascending','assert','attribute','before', - 'by','case','cast','child','comment','declare','default','define','descendant','descendant-or-self', - 'descending','document','document-node','element','else','eq','every','except','external','following', - 'following-sibling','follows','for','function','if','import','in','instance','intersect','item', - 'let','module','namespace','node','node','of','only','or','order','parent','precedes','preceding', - 'preceding-sibling','processing-instruction','ref','return','returns','satisfies','schema','schema-element', - 'self','some','sortby','stable','text','then','to','treat','typeswitch','union','variable','version','where', - 'xquery', 'empty-sequence']; - for(var i=0, l=basic.length; i < l; i++) { kwObj[basic[i]] = kw(basic[i])}; - - // a list of types. For each add a property to kwObj with the value of - // {type: "atom", style: "atom"} - var types = ['xs:string', 'xs:float', 'xs:decimal', 'xs:double', 'xs:integer', 'xs:boolean', 'xs:date', 'xs:dateTime', - 'xs:time', 'xs:duration', 'xs:dayTimeDuration', 'xs:time', 'xs:yearMonthDuration', 'numeric', 'xs:hexBinary', - 'xs:base64Binary', 'xs:anyURI', 'xs:QName', 'xs:byte','xs:boolean','xs:anyURI','xf:yearMonthDuration']; - for(var i=0, l=types.length; i < l; i++) { kwObj[types[i]] = atom;}; - - // each operator will add a property to kwObj with value of {type: "operator", style: "keyword"} - var operators = ['eq', 'ne', 'lt', 'le', 'gt', 'ge', ':=', '=', '>', '>=', '<', '<=', '.', '|', '?', 'and', 'or', 'div', 'idiv', 'mod', '*', '/', '+', '-']; - for(var i=0, l=operators.length; i < l; i++) { kwObj[operators[i]] = operator;}; - - // each axis_specifiers will add a property to kwObj with value of {type: "axis_specifier", style: "qualifier"} - var axis_specifiers = ["self::", "attribute::", "child::", "descendant::", "descendant-or-self::", "parent::", - "ancestor::", "ancestor-or-self::", "following::", "preceding::", "following-sibling::", "preceding-sibling::"]; - for(var i=0, l=axis_specifiers.length; i < l; i++) { kwObj[axis_specifiers[i]] = qualifier; }; - - return kwObj; - }(); - - // Used as scratch variables to communicate multiple values without - // consing up tons of objects. - var type, content; - - function ret(tp, style, cont) { - type = tp; content = cont; - return style; - } - - function chain(stream, state, f) { - state.tokenize = f; - return f(stream, state); - } - - // the primary mode tokenizer - function tokenBase(stream, state) { - var ch = stream.next(), - mightBeFunction = false, - isEQName = isEQNameAhead(stream); - - // an XML tag (if not in some sub, chained tokenizer) - if (ch == "<") { - if(stream.match("!--", true)) - return chain(stream, state, tokenXMLComment); - - if(stream.match("![CDATA", false)) { - state.tokenize = tokenCDATA; - return ret("tag", "tag"); - } - - if(stream.match("?", false)) { - return chain(stream, state, tokenPreProcessing); - } - - var isclose = stream.eat("/"); - stream.eatSpace(); - var tagName = "", c; - while ((c = stream.eat(/[^\s\u00a0=<>\"\'\/?]/))) tagName += c; - - return chain(stream, state, tokenTag(tagName, isclose)); - } - // start code block - else if(ch == "{") { - pushStateStack(state,{ type: "codeblock"}); - return ret("", ""); - } - // end code block - else if(ch == "}") { - popStateStack(state); - return ret("", ""); - } - // if we're in an XML block - else if(isInXmlBlock(state)) { - if(ch == ">") - return ret("tag", "tag"); - else if(ch == "/" && stream.eat(">")) { - popStateStack(state); - return ret("tag", "tag"); - } - else - return ret("word", "word"); - } - // if a number - else if (/\d/.test(ch)) { - stream.match(/^\d*(?:\.\d*)?(?:E[+\-]?\d+)?/); - return ret("number", "atom"); - } - // comment start - else if (ch === "(" && stream.eat(":")) { - pushStateStack(state, { type: "comment"}); - return chain(stream, state, tokenComment); - } - // quoted string - else if ( !isEQName && (ch === '"' || ch === "'")) - return chain(stream, state, tokenString(ch)); - // variable - else if(ch === "$") { - return chain(stream, state, tokenVariable); - } - // assignment - else if(ch ===":" && stream.eat("=")) { - return ret("operator", "keyword"); - } - // open paren - else if(ch === "(") { - pushStateStack(state, { type: "paren"}); - return ret("", ""); - } - // close paren - else if(ch === ")") { - popStateStack(state); - return ret("", ""); - } - // open paren - else if(ch === "[") { - pushStateStack(state, { type: "bracket"}); - return ret("", ""); - } - // close paren - else if(ch === "]") { - popStateStack(state); - return ret("", ""); - } - else { - var known = keywords.propertyIsEnumerable(ch) && keywords[ch]; - - // if there's a EQName ahead, consume the rest of the string portion, it's likely a function - if(isEQName && ch === '\"') while(stream.next() !== '"'){} - if(isEQName && ch === '\'') while(stream.next() !== '\''){} - - // gobble up a word if the character is not known - if(!known) stream.eatWhile(/[\w\$_-]/); - - // gobble a colon in the case that is a lib func type call fn:doc - var foundColon = stream.eat(":") - - // if there's not a second colon, gobble another word. Otherwise, it's probably an axis specifier - // which should get matched as a keyword - if(!stream.eat(":") && foundColon) { - stream.eatWhile(/[\w\$_-]/); - } - // if the next non whitespace character is an open paren, this is probably a function (if not a keyword of other sort) - if(stream.match(/^[ \t]*\(/, false)) { - mightBeFunction = true; - } - // is the word a keyword? - var word = stream.current(); - known = keywords.propertyIsEnumerable(word) && keywords[word]; - - // if we think it's a function call but not yet known, - // set style to variable for now for lack of something better - if(mightBeFunction && !known) known = {type: "function_call", style: "variable def"}; - - // if the previous word was element, attribute, axis specifier, this word should be the name of that - if(isInXmlConstructor(state)) { - popStateStack(state); - return ret("word", "word", word); - } - // as previously checked, if the word is element,attribute, axis specifier, call it an "xmlconstructor" and - // push the stack so we know to look for it on the next word - if(word == "element" || word == "attribute" || known.type == "axis_specifier") pushStateStack(state, {type: "xmlconstructor"}); - - // if the word is known, return the details of that else just call this a generic 'word' - return known ? ret(known.type, known.style, word) : - ret("word", "word", word); - } - } - - // handle comments, including nested - function tokenComment(stream, state) { - var maybeEnd = false, maybeNested = false, nestedCount = 0, ch; - while (ch = stream.next()) { - if (ch == ")" && maybeEnd) { - if(nestedCount > 0) - nestedCount--; - else { - popStateStack(state); - break; - } - } - else if(ch == ":" && maybeNested) { - nestedCount++; - } - maybeEnd = (ch == ":"); - maybeNested = (ch == "("); - } - - return ret("comment", "comment"); - } - - // tokenizer for string literals - // optionally pass a tokenizer function to set state.tokenize back to when finished - function tokenString(quote, f) { - return function(stream, state) { - var ch; - - if(isInString(state) && stream.current() == quote) { - popStateStack(state); - if(f) state.tokenize = f; - return ret("string", "string"); - } - - pushStateStack(state, { type: "string", name: quote, tokenize: tokenString(quote, f) }); - - // if we're in a string and in an XML block, allow an embedded code block - if(stream.match("{", false) && isInXmlAttributeBlock(state)) { - state.tokenize = tokenBase; - return ret("string", "string"); - } - - - while (ch = stream.next()) { - if (ch == quote) { - popStateStack(state); - if(f) state.tokenize = f; - break; - } - else { - // if we're in a string and in an XML block, allow an embedded code block in an attribute - if(stream.match("{", false) && isInXmlAttributeBlock(state)) { - state.tokenize = tokenBase; - return ret("string", "string"); - } - - } - } - - return ret("string", "string"); - }; - } - - // tokenizer for variables - function tokenVariable(stream, state) { - var isVariableChar = /[\w\$_-]/; - - // a variable may start with a quoted EQName so if the next character is quote, consume to the next quote - if(stream.eat("\"")) { - while(stream.next() !== '\"'){}; - stream.eat(":"); - } else { - stream.eatWhile(isVariableChar); - if(!stream.match(":=", false)) stream.eat(":"); - } - stream.eatWhile(isVariableChar); - state.tokenize = tokenBase; - return ret("variable", "variable"); - } - - // tokenizer for XML tags - function tokenTag(name, isclose) { - return function(stream, state) { - stream.eatSpace(); - if(isclose && stream.eat(">")) { - popStateStack(state); - state.tokenize = tokenBase; - return ret("tag", "tag"); - } - // self closing tag without attributes? - if(!stream.eat("/")) - pushStateStack(state, { type: "tag", name: name, tokenize: tokenBase}); - if(!stream.eat(">")) { - state.tokenize = tokenAttribute; - return ret("tag", "tag"); - } - else { - state.tokenize = tokenBase; - } - return ret("tag", "tag"); - } - } - - // tokenizer for XML attributes - function tokenAttribute(stream, state) { - var ch = stream.next(); - - if(ch == "/" && stream.eat(">")) { - if(isInXmlAttributeBlock(state)) popStateStack(state); - if(isInXmlBlock(state)) popStateStack(state); - return ret("tag", "tag"); - } - if(ch == ">") { - if(isInXmlAttributeBlock(state)) popStateStack(state); - return ret("tag", "tag"); - } - if(ch == "=") - return ret("", ""); - // quoted string - if (ch == '"' || ch == "'") - return chain(stream, state, tokenString(ch, tokenAttribute)); - - if(!isInXmlAttributeBlock(state)) - pushStateStack(state, { type: "attribute", name: name, tokenize: tokenAttribute}); - - stream.eat(/[a-zA-Z_:]/); - stream.eatWhile(/[-a-zA-Z0-9_:.]/); - stream.eatSpace(); - - // the case where the attribute has not value and the tag was closed - if(stream.match(">", false) || stream.match("/", false)) { - popStateStack(state); - state.tokenize = tokenBase; - } - - return ret("attribute", "attribute"); - } - - // handle comments, including nested - function tokenXMLComment(stream, state) { - while (ch = stream.next()) { - if (ch == "-" && stream.match("->", true)) { - state.tokenize = tokenBase; - return ret("comment", "comment"); - } - } - } - - - // handle CDATA - function tokenCDATA(stream, state) { - while (ch = stream.next()) { - if (ch == "]" && stream.match("]", true)) { - state.tokenize = tokenBase; - return ret("comment", "comment"); - } - } - } - - // handle preprocessing instructions - function tokenPreProcessing(stream, state) { - while (ch = stream.next()) { - if (ch == "?" && stream.match(">", true)) { - state.tokenize = tokenBase; - return ret("comment", "comment meta"); - } - } - } - - - // functions to test the current context of the state - function isInXmlBlock(state) { return isIn(state, "tag"); } - function isInXmlAttributeBlock(state) { return isIn(state, "attribute"); } - function isInCodeBlock(state) { return isIn(state, "codeblock"); } - function isInXmlConstructor(state) { return isIn(state, "xmlconstructor"); } - function isInString(state) { return isIn(state, "string"); } - - function isEQNameAhead(stream) { - // assume we've already eaten a quote (") - if(stream.current() === '"') - return stream.match(/^[^\"]+\"\:/, false); - else if(stream.current() === '\'') - return stream.match(/^[^\"]+\'\:/, false); - else - return false; - } - - function isIn(state, type) { - return (state.stack.length && state.stack[state.stack.length - 1].type == type); - } - - function pushStateStack(state, newState) { - state.stack.push(newState); - } - - function popStateStack(state) { - var popped = state.stack.pop(); - var reinstateTokenize = state.stack.length && state.stack[state.stack.length-1].tokenize - state.tokenize = reinstateTokenize || tokenBase; - } - - // the interface for the mode API - return { - startState: function(basecolumn) { - return { - tokenize: tokenBase, - cc: [], - stack: [] - }; - }, - - token: function(stream, state) { - if (stream.eatSpace()) return null; - var style = state.tokenize(stream, state); - return style; - } - }; - -}); - -CodeMirror.defineMIME("application/xquery", "xquery"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/yaml/index.html b/src/lib/flask_debugtoolbar/static/codemirror/mode/yaml/index.html deleted file mode 100644 index 7f2c792..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/yaml/index.html +++ /dev/null @@ -1,67 +0,0 @@ - - - - CodeMirror: YAML mode - - - - - - - -

CodeMirror: YAML mode

-
- - -

MIME types defined: text/x-yaml.

- - - diff --git a/src/lib/flask_debugtoolbar/static/codemirror/mode/yaml/yaml.js b/src/lib/flask_debugtoolbar/static/codemirror/mode/yaml/yaml.js deleted file mode 100644 index 59e2641..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/mode/yaml/yaml.js +++ /dev/null @@ -1,95 +0,0 @@ -CodeMirror.defineMode("yaml", function() { - - var cons = ['true', 'false', 'on', 'off', 'yes', 'no']; - var keywordRegex = new RegExp("\\b(("+cons.join(")|(")+"))$", 'i'); - - return { - token: function(stream, state) { - var ch = stream.peek(); - var esc = state.escaped; - state.escaped = false; - /* comments */ - if (ch == "#") { stream.skipToEnd(); return "comment"; } - if (state.literal && stream.indentation() > state.keyCol) { - stream.skipToEnd(); return "string"; - } else if (state.literal) { state.literal = false; } - if (stream.sol()) { - state.keyCol = 0; - state.pair = false; - state.pairStart = false; - /* document start */ - if(stream.match(/---/)) { return "def"; } - /* document end */ - if (stream.match(/\.\.\./)) { return "def"; } - /* array list item */ - if (stream.match(/\s*-\s+/)) { return 'meta'; } - } - /* pairs (associative arrays) -> key */ - if (!state.pair && stream.match(/^\s*([a-z0-9\._-])+(?=\s*:)/i)) { - state.pair = true; - state.keyCol = stream.indentation(); - return "atom"; - } - if (state.pair && stream.match(/^:\s*/)) { state.pairStart = true; return 'meta'; } - - /* inline pairs/lists */ - if (stream.match(/^(\{|\}|\[|\])/)) { - if (ch == '{') - state.inlinePairs++; - else if (ch == '}') - state.inlinePairs--; - else if (ch == '[') - state.inlineList++; - else - state.inlineList--; - return 'meta'; - } - - /* list seperator */ - if (state.inlineList > 0 && !esc && ch == ',') { - stream.next(); - return 'meta'; - } - /* pairs seperator */ - if (state.inlinePairs > 0 && !esc && ch == ',') { - state.keyCol = 0; - state.pair = false; - state.pairStart = false; - stream.next(); - return 'meta'; - } - - /* start of value of a pair */ - if (state.pairStart) { - /* block literals */ - if (stream.match(/^\s*(\||\>)\s*/)) { state.literal = true; return 'meta'; }; - /* references */ - if (stream.match(/^\s*(\&|\*)[a-z0-9\._-]+\b/i)) { return 'variable-2'; } - /* numbers */ - if (state.inlinePairs == 0 && stream.match(/^\s*-?[0-9\.\,]+\s?$/)) { return 'number'; } - if (state.inlinePairs > 0 && stream.match(/^\s*-?[0-9\.\,]+\s?(?=(,|}))/)) { return 'number'; } - /* keywords */ - if (stream.match(keywordRegex)) { return 'keyword'; } - } - - /* nothing found, continue */ - state.pairStart = false; - state.escaped = (ch == '\\'); - stream.next(); - return null; - }, - startState: function() { - return { - pair: false, - pairStart: false, - keyCol: 0, - inlinePairs: 0, - inlineList: 0, - literal: false, - escaped: false - }; - } - }; -}); - -CodeMirror.defineMIME("text/x-yaml", "yaml"); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/cobalt.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/cobalt.css deleted file mode 100644 index dbbb7e4..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/cobalt.css +++ /dev/null @@ -1,18 +0,0 @@ -.cm-s-cobalt { background: #002240; color: white; } -.cm-s-cobalt div.CodeMirror-selected { background: #b36539 !important; } -.cm-s-cobalt .CodeMirror-gutter { background: #002240; border-right: 1px solid #aaa; } -.cm-s-cobalt .CodeMirror-gutter-text { color: #d0d0d0; } -.cm-s-cobalt .CodeMirror-cursor { border-left: 1px solid white !important; } - -.cm-s-cobalt span.cm-comment { color: #08f; } -.cm-s-cobalt span.cm-atom { color: #845dc4; } -.cm-s-cobalt span.cm-number, .cm-s-cobalt span.cm-attribute { color: #ff80e1; } -.cm-s-cobalt span.cm-keyword { color: #ffee80; } -.cm-s-cobalt span.cm-string { color: #3ad900; } -.cm-s-cobalt span.cm-meta { color: #ff9d00; } -.cm-s-cobalt span.cm-variable-2, .cm-s-cobalt span.cm-tag { color: #9effff; } -.cm-s-cobalt span.cm-variable-3, .cm-s-cobalt span.cm-def { color: white; } -.cm-s-cobalt span.cm-error { color: #9d1e15; } -.cm-s-cobalt span.cm-bracket { color: #d8d8d8; } -.cm-s-cobalt span.cm-builtin, .cm-s-cobalt span.cm-special { color: #ff9e59; } -.cm-s-cobalt span.cm-link { color: #845dc4; } diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/eclipse.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/eclipse.css deleted file mode 100644 index 47d66a0..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/eclipse.css +++ /dev/null @@ -1,25 +0,0 @@ -.cm-s-eclipse span.cm-meta {color: #FF1717;} -.cm-s-eclipse span.cm-keyword { line-height: 1em; font-weight: bold; color: #7F0055; } -.cm-s-eclipse span.cm-atom {color: #219;} -.cm-s-eclipse span.cm-number {color: #164;} -.cm-s-eclipse span.cm-def {color: #00f;} -.cm-s-eclipse span.cm-variable {color: black;} -.cm-s-eclipse span.cm-variable-2 {color: #0000C0;} -.cm-s-eclipse span.cm-variable-3 {color: #0000C0;} -.cm-s-eclipse span.cm-property {color: black;} -.cm-s-eclipse span.cm-operator {color: black;} -.cm-s-eclipse span.cm-comment {color: #3F7F5F;} -.cm-s-eclipse span.cm-string {color: #2A00FF;} -.cm-s-eclipse span.cm-string-2 {color: #f50;} -.cm-s-eclipse span.cm-error {color: #f00;} -.cm-s-eclipse span.cm-qualifier {color: #555;} -.cm-s-eclipse span.cm-builtin {color: #30a;} -.cm-s-eclipse span.cm-bracket {color: #cc7;} -.cm-s-eclipse span.cm-tag {color: #170;} -.cm-s-eclipse span.cm-attribute {color: #00c;} -.cm-s-eclipse span.cm-link {color: #219;} - -.cm-s-eclipse .CodeMirror-matchingbracket { - border:1px solid grey; - color:black !important;; -} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/elegant.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/elegant.css deleted file mode 100644 index d0ce0cb..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/elegant.css +++ /dev/null @@ -1,10 +0,0 @@ -.cm-s-elegant span.cm-number, .cm-s-elegant span.cm-string, .cm-s-elegant span.cm-atom {color: #762;} -.cm-s-elegant span.cm-comment {color: #262; font-style: italic; line-height: 1em;} -.cm-s-elegant span.cm-meta {color: #555; font-style: italic; line-height: 1em;} -.cm-s-elegant span.cm-variable {color: black;} -.cm-s-elegant span.cm-variable-2 {color: #b11;} -.cm-s-elegant span.cm-qualifier {color: #555;} -.cm-s-elegant span.cm-keyword {color: #730;} -.cm-s-elegant span.cm-builtin {color: #30a;} -.cm-s-elegant span.cm-error {background-color: #fdd;} -.cm-s-elegant span.cm-link {color: #762;} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/lesser-dark.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/lesser-dark.css deleted file mode 100644 index fb5160b..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/lesser-dark.css +++ /dev/null @@ -1,45 +0,0 @@ -/* -http://lesscss.org/ dark theme -Ported to CodeMirror by Peter Kroon -*/ -.CodeMirror{ - line-height: 15px; -} -.cm-s-lesser-dark { - font-family: 'Bitstream Vera Sans Mono', 'DejaVu Sans Mono', 'Monaco', Courier, monospace !important; - font-size:12px; -} - -.cm-s-lesser-dark { background: #262626; color: #EBEFE7; text-shadow: 0 -1px 1px #262626; } -.cm-s-lesser-dark div.CodeMirror-selected {background: #45443B !important;} /* 33322B*/ -.cm-s-lesser-dark .CodeMirror-cursor { border-left: 1px solid white !important; } -.cm-s-lesser-dark .CodeMirror-lines { margin-left:3px; margin-right:3px; }/*editable code holder*/ - -div.CodeMirror span.CodeMirror-matchingbracket { color: #7EFC7E; }/*65FC65*/ - -.cm-s-lesser-dark .CodeMirror-gutter { background: #262626; border-right:1px solid #aaa; padding-right:3px; min-width:2.5em; } -.cm-s-lesser-dark .CodeMirror-gutter-text { color: #777; } - -.cm-s-lesser-dark span.cm-keyword { color: #599eff; } -.cm-s-lesser-dark span.cm-atom { color: #C2B470; } -.cm-s-lesser-dark span.cm-number { color: #B35E4D; } -.cm-s-lesser-dark span.cm-def {color: color: white;} -.cm-s-lesser-dark span.cm-variable { color:#D9BF8C; } -.cm-s-lesser-dark span.cm-variable-2 { color: #669199; } -.cm-s-lesser-dark span.cm-variable-3 { color: white; } -.cm-s-lesser-dark span.cm-property {color: #92A75C;} -.cm-s-lesser-dark span.cm-operator {color: #92A75C;} -.cm-s-lesser-dark span.cm-comment { color: #666; } -.cm-s-lesser-dark span.cm-string { color: #BCD279; } -.cm-s-lesser-dark span.cm-string-2 {color: #f50;} -.cm-s-lesser-dark span.cm-meta { color: #738C73; } -.cm-s-lesser-dark span.cm-error { color: #9d1e15; } -.cm-s-lesser-dark span.cm-qualifier {color: #555;} -.cm-s-lesser-dark span.cm-builtin { color: #ff9e59; } -.cm-s-lesser-dark span.cm-bracket { color: #EBEFE7; } -.cm-s-lesser-dark span.cm-tag { color: #669199; } -.cm-s-lesser-dark span.cm-attribute {color: #00c;} -.cm-s-lesser-dark span.cm-header {color: #a0a;} -.cm-s-lesser-dark span.cm-quote {color: #090;} -.cm-s-lesser-dark span.cm-hr {color: #999;} -.cm-s-lesser-dark span.cm-link {color: #00c;} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/monokai.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/monokai.css deleted file mode 100644 index f01d066..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/monokai.css +++ /dev/null @@ -1,28 +0,0 @@ -/* Based on Sublime Text's Monokai theme */ - -.cm-s-monokai {background: #272822; color: #f8f8f2;} -.cm-s-monokai div.CodeMirror-selected {background: #49483E !important;} -.cm-s-monokai .CodeMirror-gutter {background: #272822; border-right: 0px;} -.cm-s-monokai .CodeMirror-gutter-text {color: #d0d0d0;} -.cm-s-monokai .CodeMirror-cursor {border-left: 1px solid #f8f8f0 !important;} - -.cm-s-monokai span.cm-comment {color: #75715e;} -.cm-s-monokai span.cm-atom {color: #ae81ff;} -.cm-s-monokai span.cm-number {color: #ae81ff;} - -.cm-s-monokai span.cm-property, .cm-s-monokai span.cm-attribute {color: #a6e22e;} -.cm-s-monokai span.cm-keyword {color: #f92672;} -.cm-s-monokai span.cm-string {color: #e6db74;} - -.cm-s-monokai span.cm-variable {color: #a6e22e;} -.cm-s-monokai span.cm-variable-2 {color: #9effff;} -.cm-s-monokai span.cm-def {color: #fd971f;} -.cm-s-monokai span.cm-error {background: #f92672; color: #f8f8f0;} -.cm-s-monokai span.cm-bracket {color: #f8f8f2;} -.cm-s-monokai span.cm-tag {color: #f92672;} -.cm-s-monokai span.cm-link {color: #ae81ff;} - -.cm-s-monokai .CodeMirror-matchingbracket { - text-decoration: underline; - color: white !important; -} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/neat.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/neat.css deleted file mode 100644 index 8a307f8..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/neat.css +++ /dev/null @@ -1,9 +0,0 @@ -.cm-s-neat span.cm-comment { color: #a86; } -.cm-s-neat span.cm-keyword { line-height: 1em; font-weight: bold; color: blue; } -.cm-s-neat span.cm-string { color: #a22; } -.cm-s-neat span.cm-builtin { line-height: 1em; font-weight: bold; color: #077; } -.cm-s-neat span.cm-special { line-height: 1em; font-weight: bold; color: #0aa; } -.cm-s-neat span.cm-variable { color: black; } -.cm-s-neat span.cm-number, .cm-s-neat span.cm-atom { color: #3a3; } -.cm-s-neat span.cm-meta {color: #555;} -.cm-s-neat span.cm-link { color: #3a3; } diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/night.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/night.css deleted file mode 100644 index 3b296e4..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/night.css +++ /dev/null @@ -1,21 +0,0 @@ -/* Loosely based on the Midnight Textmate theme */ - -.cm-s-night { background: #0a001f; color: #f8f8f8; } -.cm-s-night div.CodeMirror-selected { background: #a8f !important; } -.cm-s-night .CodeMirror-gutter { background: #0a001f; border-right: 1px solid #aaa; } -.cm-s-night .CodeMirror-gutter-text { color: #f8f8f8; } -.cm-s-night .CodeMirror-cursor { border-left: 1px solid white !important; } - -.cm-s-night span.cm-comment { color: #6900a1; } -.cm-s-night span.cm-atom { color: #845dc4; } -.cm-s-night span.cm-number, .cm-s-night span.cm-attribute { color: #ffd500; } -.cm-s-night span.cm-keyword { color: #599eff; } -.cm-s-night span.cm-string { color: #37f14a; } -.cm-s-night span.cm-meta { color: #7678e2; } -.cm-s-night span.cm-variable-2, .cm-s-night span.cm-tag { color: #99b2ff; } -.cm-s-night span.cm-variable-3, .cm-s-night span.cm-def { color: white; } -.cm-s-night span.cm-error { color: #9d1e15; } -.cm-s-night span.cm-bracket { color: #8da6ce; } -.cm-s-night span.cm-comment { color: #6900a1; } -.cm-s-night span.cm-builtin, .cm-s-night span.cm-special { color: #ff9e59; } -.cm-s-night span.cm-link { color: #845dc4; } diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/rubyblue.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/rubyblue.css deleted file mode 100644 index 502817a..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/rubyblue.css +++ /dev/null @@ -1,21 +0,0 @@ -.cm-s-rubyblue { font:13px/1.4em Trebuchet, Verdana, sans-serif; } /* - customized editor font - */ - -.cm-s-rubyblue { background: #112435; color: white; } -.cm-s-rubyblue div.CodeMirror-selected { background: #38566F !important; } -.cm-s-rubyblue .CodeMirror-gutter { background: #1F4661; border-right: 7px solid #3E7087; min-width:2.5em; } -.cm-s-rubyblue .CodeMirror-gutter-text { color: white; } -.cm-s-rubyblue .CodeMirror-cursor { border-left: 1px solid white !important; } - -.cm-s-rubyblue span.cm-comment { color: #999; font-style:italic; line-height: 1em; } -.cm-s-rubyblue span.cm-atom { color: #F4C20B; } -.cm-s-rubyblue span.cm-number, .cm-s-rubyblue span.cm-attribute { color: #82C6E0; } -.cm-s-rubyblue span.cm-keyword { color: #F0F; } -.cm-s-rubyblue span.cm-string { color: #F08047; } -.cm-s-rubyblue span.cm-meta { color: #F0F; } -.cm-s-rubyblue span.cm-variable-2, .cm-s-rubyblue span.cm-tag { color: #7BD827; } -.cm-s-rubyblue span.cm-variable-3, .cm-s-rubyblue span.cm-def { color: white; } -.cm-s-rubyblue span.cm-error { color: #AF2018; } -.cm-s-rubyblue span.cm-bracket { color: #F0F; } -.cm-s-rubyblue span.cm-link { color: #F4C20B; } -.cm-s-rubyblue span.CodeMirror-matchingbracket { color:#F0F !important; } -.cm-s-rubyblue span.cm-builtin, .cm-s-rubyblue span.cm-special { color: #FF9D00; } diff --git a/src/lib/flask_debugtoolbar/static/codemirror/theme/xq-dark.css b/src/lib/flask_debugtoolbar/static/codemirror/theme/xq-dark.css deleted file mode 100644 index 493e3a6..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/theme/xq-dark.css +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright (C) 2011 by MarkLogic Corporation -Author: Mike Brevoort - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ -.cm-s-xq-dark { background: #0a001f; color: #f8f8f8; } -.cm-s-xq-dark span.CodeMirror-selected { background: #a8f !important; } -.cm-s-xq-dark .CodeMirror-gutter { background: #0a001f; border-right: 1px solid #aaa; } -.cm-s-xq-dark .CodeMirror-gutter-text { color: #f8f8f8; } -.cm-s-xq-dark .CodeMirror-cursor { border-left: 1px solid white !important; } - -.cm-s-xq-dark span.cm-keyword {color: #FFBD40;} -.cm-s-xq-dark span.cm-atom {color: #6C8CD5;} -.cm-s-xq-dark span.cm-number {color: #164;} -.cm-s-xq-dark span.cm-def {color: #FFF; text-decoration:underline;} -.cm-s-xq-dark span.cm-variable {color: #FFF;} -.cm-s-xq-dark span.cm-variable-2 {color: #EEE;} -.cm-s-xq-dark span.cm-variable-3 {color: #DDD;} -.cm-s-xq-dark span.cm-property {} -.cm-s-xq-dark span.cm-operator {} -.cm-s-xq-dark span.cm-comment {color: gray;} -.cm-s-xq-dark span.cm-string {color: #9FEE00;} -.cm-s-xq-dark span.cm-meta {color: yellow;} -.cm-s-xq-dark span.cm-error {color: #f00;} -.cm-s-xq-dark span.cm-qualifier {color: #FFF700;} -.cm-s-xq-dark span.cm-builtin {color: #30a;} -.cm-s-xq-dark span.cm-bracket {color: #cc7;} -.cm-s-xq-dark span.cm-tag {color: #FFBD40;} -.cm-s-xq-dark span.cm-attribute {color: #FFF700;} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/closetag.js b/src/lib/flask_debugtoolbar/static/codemirror/util/closetag.js deleted file mode 100644 index 89f4b5c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/closetag.js +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Tag-closer extension for CodeMirror. - * - * This extension adds a "closeTag" utility function that can be used with key bindings to - * insert a matching end tag after the ">" character of a start tag has been typed. It can - * also complete " - * Contributed under the same license terms as CodeMirror. - */ -(function() { - /** Option that allows tag closing behavior to be toggled. Default is true. */ - CodeMirror.defaults['closeTagEnabled'] = true; - - /** Array of tag names to add indentation after the start tag for. Default is the list of block-level html tags. */ - CodeMirror.defaults['closeTagIndent'] = ['applet', 'blockquote', 'body', 'button', 'div', 'dl', 'fieldset', 'form', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'html', 'iframe', 'layer', 'legend', 'object', 'ol', 'p', 'select', 'table', 'ul']; - - /** - * Call during key processing to close tags. Handles the key event if the tag is closed, otherwise throws CodeMirror.Pass. - * - cm: The editor instance. - * - ch: The character being processed. - * - indent: Optional. Omit or pass true to use the default indentation tag list defined in the 'closeTagIndent' option. - * Pass false to disable indentation. Pass an array to override the default list of tag names. - */ - CodeMirror.defineExtension("closeTag", function(cm, ch, indent) { - if (!cm.getOption('closeTagEnabled')) { - throw CodeMirror.Pass; - } - - var mode = cm.getOption('mode'); - - if (mode == 'text/html') { - - /* - * Relevant structure of token: - * - * htmlmixed - * className - * state - * htmlState - * type - * context - * tagName - * mode - * - * xml - * className - * state - * tagName - * type - */ - - var pos = cm.getCursor(); - var tok = cm.getTokenAt(pos); - var state = tok.state.base || tok.state; - - if (state.mode && state.mode != 'html') { - throw CodeMirror.Pass; // With htmlmixed, we only care about the html sub-mode. - } - - if (ch == '>') { - var type = state.htmlState ? state.htmlState.type : state.type; // htmlmixed : xml - - if (tok.className == 'tag' && type == 'closeTag') { - throw CodeMirror.Pass; // Don't process the '>' at the end of an end-tag. - } - - cm.replaceSelection('>'); // Mode state won't update until we finish the tag. - pos = {line: pos.line, ch: pos.ch + 1}; - cm.setCursor(pos); - - tok = cm.getTokenAt(cm.getCursor()); - state = tok.state.base || tok.state; - type = state.htmlState ? state.htmlState.type : state.type; // htmlmixed : xml - - if (tok.className == 'tag' && type != 'selfcloseTag') { - var tagName = state.htmlState ? state.htmlState.context.tagName : state.tagName; // htmlmixed : xml - if (tagName.length > 0) { - insertEndTag(cm, indent, pos, tagName); - } - return; - } - - // Undo the '>' insert and allow cm to handle the key instead. - cm.setSelection({line: pos.line, ch: pos.ch - 1}, pos); - cm.replaceSelection(""); - - } else if (ch == '/') { - if (tok.className == 'tag' && tok.string == '<') { - var tagName = state.htmlState ? (state.htmlState.context ? state.htmlState.context.tagName : '') : state.context.tagName; // htmlmixed : xml # extra htmlmized check is for ' 0) { - completeEndTag(cm, pos, tagName); - return; - } - } - } - - } else if (mode == 'xmlpure') { - - var pos = cm.getCursor(); - var tok = cm.getTokenAt(pos); - var tagName = tok.state.context.tagName; - - if (ch == '>') { - // tagName=foo, string=foo - // tagName=foo, string=/ # ignore - // tagName=foo, string=/foo # ignore - if (tok.string == tagName) { - cm.replaceSelection('>'); // parity w/html modes - pos = {line: pos.line, ch: pos.ch + 1}; - cm.setCursor(pos); - - insertEndTag(cm, indent, pos, tagName); - return; - } - - } else if (ch == '/') { - // ', 'end'); - cm.indentLine(pos.line + 1); - cm.indentLine(pos.line + 2); - cm.setCursor({line: pos.line + 1, ch: cm.getLine(pos.line + 1).length}); - } else { - cm.replaceSelection(''); - cm.setCursor(pos); - } - } - - function shouldIndent(cm, indent, tagName) { - if (typeof indent == 'undefined' || indent == null || indent == true) { - indent = cm.getOption('closeTagIndent'); - } - if (!indent) { - indent = []; - } - return indexOf(indent, tagName.toLowerCase()) != -1; - } - - // C&P from codemirror.js...would be nice if this were visible to utilities. - function indexOf(collection, elt) { - if (collection.indexOf) return collection.indexOf(elt); - for (var i = 0, e = collection.length; i < e; ++i) - if (collection[i] == elt) return i; - return -1; - } - - function completeEndTag(cm, pos, tagName) { - cm.replaceSelection('/' + tagName + '>'); - cm.setCursor({line: pos.line, ch: pos.ch + tagName.length + 2 }); - } - -})(); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/dialog.css b/src/lib/flask_debugtoolbar/static/codemirror/util/dialog.css deleted file mode 100644 index 4cb467e..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/dialog.css +++ /dev/null @@ -1,23 +0,0 @@ -.CodeMirror-dialog { - position: relative; -} - -.CodeMirror-dialog > div { - position: absolute; - top: 0; left: 0; right: 0; - background: white; - border-bottom: 1px solid #eee; - z-index: 15; - padding: .1em .8em; - overflow: hidden; - color: #333; -} - -.CodeMirror-dialog input { - border: none; - outline: none; - background: transparent; - width: 20em; - color: inherit; - font-family: monospace; -} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/dialog.js b/src/lib/flask_debugtoolbar/static/codemirror/util/dialog.js deleted file mode 100644 index 8950bf0..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/dialog.js +++ /dev/null @@ -1,63 +0,0 @@ -// Open simple dialogs on top of an editor. Relies on dialog.css. - -(function() { - function dialogDiv(cm, template) { - var wrap = cm.getWrapperElement(); - var dialog = wrap.insertBefore(document.createElement("div"), wrap.firstChild); - dialog.className = "CodeMirror-dialog"; - dialog.innerHTML = '
' + template + '
'; - return dialog; - } - - CodeMirror.defineExtension("openDialog", function(template, callback) { - var dialog = dialogDiv(this, template); - var closed = false, me = this; - function close() { - if (closed) return; - closed = true; - dialog.parentNode.removeChild(dialog); - } - var inp = dialog.getElementsByTagName("input")[0]; - if (inp) { - CodeMirror.connect(inp, "keydown", function(e) { - if (e.keyCode == 13 || e.keyCode == 27) { - CodeMirror.e_stop(e); - close(); - me.focus(); - if (e.keyCode == 13) callback(inp.value); - } - }); - inp.focus(); - CodeMirror.connect(inp, "blur", close); - } - return close; - }); - - CodeMirror.defineExtension("openConfirm", function(template, callbacks) { - var dialog = dialogDiv(this, template); - var buttons = dialog.getElementsByTagName("button"); - var closed = false, me = this, blurring = 1; - function close() { - if (closed) return; - closed = true; - dialog.parentNode.removeChild(dialog); - me.focus(); - } - buttons[0].focus(); - for (var i = 0; i < buttons.length; ++i) { - var b = buttons[i]; - (function(callback) { - CodeMirror.connect(b, "click", function(e) { - CodeMirror.e_preventDefault(e); - close(); - if (callback) callback(me); - }); - })(callbacks[i]); - CodeMirror.connect(b, "blur", function() { - --blurring; - setTimeout(function() { if (blurring <= 0) close(); }, 200); - }); - CodeMirror.connect(b, "focus", function() { ++blurring; }); - } - }); -})(); \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/foldcode.js b/src/lib/flask_debugtoolbar/static/codemirror/util/foldcode.js deleted file mode 100644 index b700d86..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/foldcode.js +++ /dev/null @@ -1,186 +0,0 @@ -// the tagRangeFinder function is -// Copyright (C) 2011 by Daniel Glazman -// released under the MIT license (../../LICENSE) like the rest of CodeMirror -CodeMirror.tagRangeFinder = function(cm, line) { - var nameStartChar = "A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD"; - var nameChar = nameStartChar + "\-\.0-9\\u00B7\\u0300-\\u036F\\u203F-\\u2040"; - var xmlNAMERegExp = new RegExp("^[" + nameStartChar + "][" + nameChar + "]*"); - - var lineText = cm.getLine(line); - var found = false; - var tag = null; - var pos = 0; - while (!found) { - pos = lineText.indexOf("<", pos); - if (-1 == pos) // no tag on line - return; - if (pos + 1 < lineText.length && lineText[pos + 1] == "/") { // closing tag - pos++; - continue; - } - // ok we weem to have a start tag - if (!lineText.substr(pos + 1).match(xmlNAMERegExp)) { // not a tag name... - pos++; - continue; - } - var gtPos = lineText.indexOf(">", pos + 1); - if (-1 == gtPos) { // end of start tag not in line - var l = line + 1; - var foundGt = false; - var lastLine = cm.lineCount(); - while (l < lastLine && !foundGt) { - var lt = cm.getLine(l); - var gt = lt.indexOf(">"); - if (-1 != gt) { // found a > - foundGt = true; - var slash = lt.lastIndexOf("/", gt); - if (-1 != slash && slash < gt) { - var str = lineText.substr(slash, gt - slash + 1); - if (!str.match( /\/\s*\>/ )) // yep, that's the end of empty tag - return l+1; - } - } - l++; - } - found = true; - } - else { - var slashPos = lineText.lastIndexOf("/", gtPos); - if (-1 == slashPos) { // cannot be empty tag - found = true; - // don't continue - } - else { // empty tag? - // check if really empty tag - var str = lineText.substr(slashPos, gtPos - slashPos + 1); - if (!str.match( /\/\s*\>/ )) { // finally not empty - found = true; - // don't continue - } - } - } - if (found) { - var subLine = lineText.substr(pos + 1); - tag = subLine.match(xmlNAMERegExp); - if (tag) { - // we have an element name, wooohooo ! - tag = tag[0]; - // do we have the close tag on same line ??? - if (-1 != lineText.indexOf("", pos)) // yep - { - found = false; - } - // we don't, so we have a candidate... - } - else - found = false; - } - if (!found) - pos++; - } - - if (found) { - var startTag = "(\\<\\/" + tag + "\\>)|(\\<" + tag + "\\>)|(\\<" + tag + "\\s)|(\\<" + tag + "$)"; - var startTagRegExp = new RegExp(startTag, "g"); - var endTag = ""; - var depth = 1; - var l = line + 1; - var lastLine = cm.lineCount(); - while (l < lastLine) { - lineText = cm.getLine(l); - var match = lineText.match(startTagRegExp); - if (match) { - for (var i = 0; i < match.length; i++) { - if (match[i] == endTag) - depth--; - else - depth++; - if (!depth) - return l+1; - } - } - l++; - } - return; - } -}; - -CodeMirror.braceRangeFinder = function(cm, line) { - var lineText = cm.getLine(line); - var startChar = lineText.lastIndexOf("{"); - if (startChar < 0 || lineText.lastIndexOf("}") > startChar) return; - var tokenType = cm.getTokenAt({line: line, ch: startChar}).className; - var count = 1, lastLine = cm.lineCount(), end; - outer: for (var i = line + 1; i < lastLine; ++i) { - var text = cm.getLine(i), pos = 0; - for (;;) { - var nextOpen = text.indexOf("{", pos), nextClose = text.indexOf("}", pos); - if (nextOpen < 0) nextOpen = text.length; - if (nextClose < 0) nextClose = text.length; - pos = Math.min(nextOpen, nextClose); - if (pos == text.length) break; - if (cm.getTokenAt({line: i, ch: pos + 1}).className == tokenType) { - if (pos == nextOpen) ++count; - else if (!--count) { end = i; break outer; } - } - ++pos; - } - } - if (end == null || end == line + 1) return; - return end; -}; - -CodeMirror.indentRangeFinder = function(cm, line) { - var tabSize = cm.getOption("tabSize"); - var myIndent = cm.getLineHandle(line).indentation(tabSize), last; - for (var i = line + 1, end = cm.lineCount(); i < end; ++i) { - var handle = cm.getLineHandle(i); - if (!/^\s*$/.test(handle.text)) { - if (handle.indentation(tabSize) <= myIndent) break; - last = i; - } - } - if (!last) return null; - return last + 1; -}; - -CodeMirror.newFoldFunction = function(rangeFinder, markText) { - var folded = []; - if (markText == null) markText = '
%N%'; - - function isFolded(cm, n) { - for (var i = 0; i < folded.length; ++i) { - var start = cm.lineInfo(folded[i].start); - if (!start) folded.splice(i--, 1); - else if (start.line == n) return {pos: i, region: folded[i]}; - } - } - - function expand(cm, region) { - cm.clearMarker(region.start); - for (var i = 0; i < region.hidden.length; ++i) - cm.showLine(region.hidden[i]); - } - - return function(cm, line) { - cm.operation(function() { - var known = isFolded(cm, line); - if (known) { - folded.splice(known.pos, 1); - expand(cm, known.region); - } else { - var end = rangeFinder(cm, line); - if (end == null) return; - var hidden = []; - for (var i = line + 1; i < end; ++i) { - var handle = cm.hideLine(i); - if (handle) hidden.push(handle); - } - var first = cm.setMarker(line, markText); - var region = {start: first, hidden: hidden}; - cm.onDeleteLine(first, function() { expand(cm, region); }); - folded.push(region); - } - }); - }; -}; diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/formatting.js b/src/lib/flask_debugtoolbar/static/codemirror/util/formatting.js deleted file mode 100644 index e189119..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/formatting.js +++ /dev/null @@ -1,294 +0,0 @@ -// ============== Formatting extensions ============================ -// A common storage for all mode-specific formatting features -if (!CodeMirror.modeExtensions) CodeMirror.modeExtensions = {}; - -// Returns the extension of the editor's current mode -CodeMirror.defineExtension("getModeExt", function () { - var mname = CodeMirror.resolveMode(this.getOption("mode")).name; - var ext = CodeMirror.modeExtensions[mname]; - if (!ext) throw new Error("No extensions found for mode " + mname); - return ext; -}); - -// If the current mode is 'htmlmixed', returns the extension of a mode located at -// the specified position (can be htmlmixed, css or javascript). Otherwise, simply -// returns the extension of the editor's current mode. -CodeMirror.defineExtension("getModeExtAtPos", function (pos) { - var token = this.getTokenAt(pos); - if (token && token.state && token.state.mode) - return CodeMirror.modeExtensions[token.state.mode == "html" ? "htmlmixed" : token.state.mode]; - else - return this.getModeExt(); -}); - -// Comment/uncomment the specified range -CodeMirror.defineExtension("commentRange", function (isComment, from, to) { - var curMode = this.getModeExtAtPos(this.getCursor()); - if (isComment) { // Comment range - var commentedText = this.getRange(from, to); - this.replaceRange(curMode.commentStart + this.getRange(from, to) + curMode.commentEnd - , from, to); - if (from.line == to.line && from.ch == to.ch) { // An empty comment inserted - put cursor inside - this.setCursor(from.line, from.ch + curMode.commentStart.length); - } - } - else { // Uncomment range - var selText = this.getRange(from, to); - var startIndex = selText.indexOf(curMode.commentStart); - var endIndex = selText.lastIndexOf(curMode.commentEnd); - if (startIndex > -1 && endIndex > -1 && endIndex > startIndex) { - // Take string till comment start - selText = selText.substr(0, startIndex) - // From comment start till comment end - + selText.substring(startIndex + curMode.commentStart.length, endIndex) - // From comment end till string end - + selText.substr(endIndex + curMode.commentEnd.length); - } - this.replaceRange(selText, from, to); - } -}); - -// Applies automatic mode-aware indentation to the specified range -CodeMirror.defineExtension("autoIndentRange", function (from, to) { - var cmInstance = this; - this.operation(function () { - for (var i = from.line; i <= to.line; i++) { - cmInstance.indentLine(i, "smart"); - } - }); -}); - -// Applies automatic formatting to the specified range -CodeMirror.defineExtension("autoFormatRange", function (from, to) { - var absStart = this.indexFromPos(from); - var absEnd = this.indexFromPos(to); - // Insert additional line breaks where necessary according to the - // mode's syntax - var res = this.getModeExt().autoFormatLineBreaks(this.getValue(), absStart, absEnd); - var cmInstance = this; - - // Replace and auto-indent the range - this.operation(function () { - cmInstance.replaceRange(res, from, to); - var startLine = cmInstance.posFromIndex(absStart).line; - var endLine = cmInstance.posFromIndex(absStart + res.length).line; - for (var i = startLine; i <= endLine; i++) { - cmInstance.indentLine(i, "smart"); - } - }); -}); - -// Define extensions for a few modes - -CodeMirror.modeExtensions["css"] = { - commentStart: "/*", - commentEnd: "*/", - wordWrapChars: [";", "\\{", "\\}"], - autoFormatLineBreaks: function (text) { - return text.replace(new RegExp("(;|\\{|\\})([^\r\n])", "g"), "$1\n$2"); - } -}; - -CodeMirror.modeExtensions["javascript"] = { - commentStart: "/*", - commentEnd: "*/", - wordWrapChars: [";", "\\{", "\\}"], - - getNonBreakableBlocks: function (text) { - var nonBreakableRegexes = [ - new RegExp("for\\s*?\\(([\\s\\S]*?)\\)"), - new RegExp("'([\\s\\S]*?)('|$)"), - new RegExp("\"([\\s\\S]*?)(\"|$)"), - new RegExp("//.*([\r\n]|$)") - ]; - var nonBreakableBlocks = new Array(); - for (var i = 0; i < nonBreakableRegexes.length; i++) { - var curPos = 0; - while (curPos < text.length) { - var m = text.substr(curPos).match(nonBreakableRegexes[i]); - if (m != null) { - nonBreakableBlocks.push({ - start: curPos + m.index, - end: curPos + m.index + m[0].length - }); - curPos += m.index + Math.max(1, m[0].length); - } - else { // No more matches - break; - } - } - } - nonBreakableBlocks.sort(function (a, b) { - return a.start - b.start; - }); - - return nonBreakableBlocks; - }, - - autoFormatLineBreaks: function (text) { - var curPos = 0; - var reLinesSplitter = new RegExp("(;|\\{|\\})([^\r\n])", "g"); - var nonBreakableBlocks = this.getNonBreakableBlocks(text); - if (nonBreakableBlocks != null) { - var res = ""; - for (var i = 0; i < nonBreakableBlocks.length; i++) { - if (nonBreakableBlocks[i].start > curPos) { // Break lines till the block - res += text.substring(curPos, nonBreakableBlocks[i].start).replace(reLinesSplitter, "$1\n$2"); - curPos = nonBreakableBlocks[i].start; - } - if (nonBreakableBlocks[i].start <= curPos - && nonBreakableBlocks[i].end >= curPos) { // Skip non-breakable block - res += text.substring(curPos, nonBreakableBlocks[i].end); - curPos = nonBreakableBlocks[i].end; - } - } - if (curPos < text.length - 1) { - res += text.substr(curPos).replace(reLinesSplitter, "$1\n$2"); - } - return res; - } - else { - return text.replace(reLinesSplitter, "$1\n$2"); - } - } -}; - -CodeMirror.modeExtensions["xml"] = { - commentStart: "", - wordWrapChars: [">"], - - autoFormatLineBreaks: function (text) { - var lines = text.split("\n"); - var reProcessedPortion = new RegExp("(^\\s*?<|^[^<]*?)(.+)(>\\s*?$|[^>]*?$)"); - var reOpenBrackets = new RegExp("<", "g"); - var reCloseBrackets = new RegExp("(>)([^\r\n])", "g"); - for (var i = 0; i < lines.length; i++) { - var mToProcess = lines[i].match(reProcessedPortion); - if (mToProcess != null && mToProcess.length > 3) { // The line starts with whitespaces and ends with whitespaces - lines[i] = mToProcess[1] - + mToProcess[2].replace(reOpenBrackets, "\n$&").replace(reCloseBrackets, "$1\n$2") - + mToProcess[3]; - continue; - } - } - - return lines.join("\n"); - } -}; - -CodeMirror.modeExtensions["htmlmixed"] = { - commentStart: "", - wordWrapChars: [">", ";", "\\{", "\\}"], - - getModeInfos: function (text, absPos) { - var modeInfos = new Array(); - modeInfos[0] = - { - pos: 0, - modeExt: CodeMirror.modeExtensions["xml"], - modeName: "xml" - }; - - var modeMatchers = new Array(); - modeMatchers[0] = - { - regex: new RegExp("]*>([\\s\\S]*?)(]*>|$)", "i"), - modeExt: CodeMirror.modeExtensions["css"], - modeName: "css" - }; - modeMatchers[1] = - { - regex: new RegExp("]*>([\\s\\S]*?)(]*>|$)", "i"), - modeExt: CodeMirror.modeExtensions["javascript"], - modeName: "javascript" - }; - - var lastCharPos = (typeof (absPos) !== "undefined" ? absPos : text.length - 1); - // Detect modes for the entire text - for (var i = 0; i < modeMatchers.length; i++) { - var curPos = 0; - while (curPos <= lastCharPos) { - var m = text.substr(curPos).match(modeMatchers[i].regex); - if (m != null) { - if (m.length > 1 && m[1].length > 0) { - // Push block begin pos - var blockBegin = curPos + m.index + m[0].indexOf(m[1]); - modeInfos.push( - { - pos: blockBegin, - modeExt: modeMatchers[i].modeExt, - modeName: modeMatchers[i].modeName - }); - // Push block end pos - modeInfos.push( - { - pos: blockBegin + m[1].length, - modeExt: modeInfos[0].modeExt, - modeName: modeInfos[0].modeName - }); - curPos += m.index + m[0].length; - continue; - } - else { - curPos += m.index + Math.max(m[0].length, 1); - } - } - else { // No more matches - break; - } - } - } - // Sort mode infos - modeInfos.sort(function sortModeInfo(a, b) { - return a.pos - b.pos; - }); - - return modeInfos; - }, - - autoFormatLineBreaks: function (text, startPos, endPos) { - var modeInfos = this.getModeInfos(text); - var reBlockStartsWithNewline = new RegExp("^\\s*?\n"); - var reBlockEndsWithNewline = new RegExp("\n\\s*?$"); - var res = ""; - // Use modes info to break lines correspondingly - if (modeInfos.length > 1) { // Deal with multi-mode text - for (var i = 1; i <= modeInfos.length; i++) { - var selStart = modeInfos[i - 1].pos; - var selEnd = (i < modeInfos.length ? modeInfos[i].pos : endPos); - - if (selStart >= endPos) { // The block starts later than the needed fragment - break; - } - if (selStart < startPos) { - if (selEnd <= startPos) { // The block starts earlier than the needed fragment - continue; - } - selStart = startPos; - } - if (selEnd > endPos) { - selEnd = endPos; - } - var textPortion = text.substring(selStart, selEnd); - if (modeInfos[i - 1].modeName != "xml") { // Starting a CSS or JavaScript block - if (!reBlockStartsWithNewline.test(textPortion) - && selStart > 0) { // The block does not start with a line break - textPortion = "\n" + textPortion; - } - if (!reBlockEndsWithNewline.test(textPortion) - && selEnd < text.length - 1) { // The block does not end with a line break - textPortion += "\n"; - } - } - res += modeInfos[i - 1].modeExt.autoFormatLineBreaks(textPortion); - } - } - else { // Single-mode text - res = modeInfos[0].modeExt.autoFormatLineBreaks(text.substring(startPos, endPos)); - } - - return res; - } -}; diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/javascript-hint.js b/src/lib/flask_debugtoolbar/static/codemirror/util/javascript-hint.js deleted file mode 100644 index 2117e5a..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/javascript-hint.js +++ /dev/null @@ -1,134 +0,0 @@ -(function () { - function forEach(arr, f) { - for (var i = 0, e = arr.length; i < e; ++i) f(arr[i]); - } - - function arrayContains(arr, item) { - if (!Array.prototype.indexOf) { - var i = arr.length; - while (i--) { - if (arr[i] === item) { - return true; - } - } - return false; - } - return arr.indexOf(item) != -1; - } - - function scriptHint(editor, keywords, getToken) { - // Find the token at the cursor - var cur = editor.getCursor(), token = getToken(editor, cur), tprop = token; - // If it's not a 'word-style' token, ignore the token. - if (!/^[\w$_]*$/.test(token.string)) { - token = tprop = {start: cur.ch, end: cur.ch, string: "", state: token.state, - className: token.string == "." ? "property" : null}; - } - // If it is a property, find out what it is a property of. - while (tprop.className == "property") { - tprop = getToken(editor, {line: cur.line, ch: tprop.start}); - if (tprop.string != ".") return; - tprop = getToken(editor, {line: cur.line, ch: tprop.start}); - if (tprop.string == ')') { - var level = 1; - do { - tprop = getToken(editor, {line: cur.line, ch: tprop.start}); - switch (tprop.string) { - case ')': level++; break; - case '(': level--; break; - default: break; - } - } while (level > 0) - tprop = getToken(editor, {line: cur.line, ch: tprop.start}); - if (tprop.className == 'variable') - tprop.className = 'function'; - else return; // no clue - } - if (!context) var context = []; - context.push(tprop); - } - return {list: getCompletions(token, context, keywords), - from: {line: cur.line, ch: token.start}, - to: {line: cur.line, ch: token.end}}; - } - - CodeMirror.javascriptHint = function(editor) { - return scriptHint(editor, javascriptKeywords, - function (e, cur) {return e.getTokenAt(cur);}); - } - - function getCoffeeScriptToken(editor, cur) { - // This getToken, it is for coffeescript, imitates the behavior of - // getTokenAt method in javascript.js, that is, returning "property" - // type and treat "." as indepenent token. - var token = editor.getTokenAt(cur); - if (cur.ch == token.start + 1 && token.string.charAt(0) == '.') { - token.end = token.start; - token.string = '.'; - token.className = "property"; - } - else if (/^\.[\w$_]*$/.test(token.string)) { - token.className = "property"; - token.start++; - token.string = token.string.replace(/\./, ''); - } - return token; - } - - CodeMirror.coffeescriptHint = function(editor) { - return scriptHint(editor, coffeescriptKeywords, getCoffeeScriptToken); - } - - var stringProps = ("charAt charCodeAt indexOf lastIndexOf substring substr slice trim trimLeft trimRight " + - "toUpperCase toLowerCase split concat match replace search").split(" "); - var arrayProps = ("length concat join splice push pop shift unshift slice reverse sort indexOf " + - "lastIndexOf every some filter forEach map reduce reduceRight ").split(" "); - var funcProps = "prototype apply call bind".split(" "); - var javascriptKeywords = ("break case catch continue debugger default delete do else false finally for function " + - "if in instanceof new null return switch throw true try typeof var void while with").split(" "); - var coffeescriptKeywords = ("and break catch class continue delete do else extends false finally for " + - "if in instanceof isnt new no not null of off on or return switch then throw true try typeof until void while with yes").split(" "); - - function getCompletions(token, context, keywords) { - var found = [], start = token.string; - function maybeAdd(str) { - if (str.indexOf(start) == 0 && !arrayContains(found, str)) found.push(str); - } - function gatherCompletions(obj) { - if (typeof obj == "string") forEach(stringProps, maybeAdd); - else if (obj instanceof Array) forEach(arrayProps, maybeAdd); - else if (obj instanceof Function) forEach(funcProps, maybeAdd); - for (var name in obj) maybeAdd(name); - } - - if (context) { - // If this is a property, see if it belongs to some object we can - // find in the current environment. - var obj = context.pop(), base; - if (obj.className == "variable") - base = window[obj.string]; - else if (obj.className == "string") - base = ""; - else if (obj.className == "atom") - base = 1; - else if (obj.className == "function") { - if (window.jQuery != null && (obj.string == '$' || obj.string == 'jQuery') && - (typeof window.jQuery == 'function')) - base = window.jQuery(); - else if (window._ != null && (obj.string == '_') && (typeof window._ == 'function')) - base = window._(); - } - while (base != null && context.length) - base = base[context.pop().string]; - if (base != null) gatherCompletions(base); - } - else { - // If not, just look in the window object and any local scope - // (reading into JS mode internals to get at the local variables) - for (var v = token.state.localVars; v; v = v.next) maybeAdd(v.name); - gatherCompletions(window); - forEach(keywords, maybeAdd); - } - return found; - } -})(); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/match-highlighter.js b/src/lib/flask_debugtoolbar/static/codemirror/util/match-highlighter.js deleted file mode 100644 index b70cc4c..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/match-highlighter.js +++ /dev/null @@ -1,44 +0,0 @@ -// Define match-highlighter commands. Depends on searchcursor.js -// Use by attaching the following function call to the onCursorActivity event: - //myCodeMirror.matchHighlight(minChars); -// And including a special span.CodeMirror-matchhighlight css class (also optionally a separate one for .CodeMirror-focused -- see demo matchhighlighter.html) - -(function() { - var DEFAULT_MIN_CHARS = 2; - - function MatchHighlightState() { - this.marked = []; - } - function getMatchHighlightState(cm) { - return cm._matchHighlightState || (cm._matchHighlightState = new MatchHighlightState()); - } - - function clearMarks(cm) { - var state = getMatchHighlightState(cm); - for (var i = 0; i < state.marked.length; ++i) - state.marked[i].clear(); - state.marked = []; - } - - function markDocument(cm, className, minChars) { - clearMarks(cm); - minChars = (typeof minChars !== 'undefined' ? minChars : DEFAULT_MIN_CHARS); - if (cm.somethingSelected() && cm.getSelection().length >= minChars) { - var state = getMatchHighlightState(cm); - var query = cm.getSelection(); - cm.operation(function() { - if (cm.lineCount() < 2000) { // This is too expensive on big documents. - for (var cursor = cm.getSearchCursor(query); cursor.findNext();) { - //Only apply matchhighlight to the matches other than the one actually selected - if (!(cursor.from().line === cm.getCursor(true).line && cursor.from().ch === cm.getCursor(true).ch)) - state.marked.push(cm.markText(cursor.from(), cursor.to(), className)); - } - } - }); - } - } - - CodeMirror.defineExtension("matchHighlight", function(className, minChars) { - markDocument(this, className, minChars); - }); -})(); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/overlay.js b/src/lib/flask_debugtoolbar/static/codemirror/util/overlay.js deleted file mode 100644 index c4cdf9f..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/overlay.js +++ /dev/null @@ -1,51 +0,0 @@ -// Utility function that allows modes to be combined. The mode given -// as the base argument takes care of most of the normal mode -// functionality, but a second (typically simple) mode is used, which -// can override the style of text. Both modes get to parse all of the -// text, but when both assign a non-null style to a piece of code, the -// overlay wins, unless the combine argument was true, in which case -// the styles are combined. - -CodeMirror.overlayParser = function(base, overlay, combine) { - return { - startState: function() { - return { - base: CodeMirror.startState(base), - overlay: CodeMirror.startState(overlay), - basePos: 0, baseCur: null, - overlayPos: 0, overlayCur: null - }; - }, - copyState: function(state) { - return { - base: CodeMirror.copyState(base, state.base), - overlay: CodeMirror.copyState(overlay, state.overlay), - basePos: state.basePos, baseCur: null, - overlayPos: state.overlayPos, overlayCur: null - }; - }, - - token: function(stream, state) { - if (stream.start == state.basePos) { - state.baseCur = base.token(stream, state.base); - state.basePos = stream.pos; - } - if (stream.start == state.overlayPos) { - stream.pos = stream.start; - state.overlayCur = overlay.token(stream, state.overlay); - state.overlayPos = stream.pos; - } - stream.pos = Math.min(state.basePos, state.overlayPos); - if (stream.eol()) state.basePos = state.overlayPos = 0; - - if (state.overlayCur == null) return state.baseCur; - if (state.baseCur != null && combine) return state.baseCur + " " + state.overlayCur; - else return state.overlayCur; - }, - - indent: function(state, textAfter) { - return base.indent(state.base, textAfter); - }, - electricChars: base.electricChars - }; -}; diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/runmode.js b/src/lib/flask_debugtoolbar/static/codemirror/util/runmode.js deleted file mode 100644 index fc58d85..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/runmode.js +++ /dev/null @@ -1,49 +0,0 @@ -CodeMirror.runMode = function(string, modespec, callback, options) { - var mode = CodeMirror.getMode(CodeMirror.defaults, modespec); - var isNode = callback.nodeType == 1; - var tabSize = (options && options.tabSize) || CodeMirror.defaults.tabSize; - if (isNode) { - var node = callback, accum = [], col = 0; - callback = function(text, style) { - if (text == "\n") { - accum.push("
"); - col = 0; - return; - } - var escaped = ""; - // HTML-escape and replace tabs - for (var pos = 0;;) { - var idx = text.indexOf("\t", pos); - if (idx == -1) { - escaped += CodeMirror.htmlEscape(text.slice(pos)); - col += text.length - pos; - break; - } else { - col += idx - pos; - escaped += CodeMirror.htmlEscape(text.slice(pos, idx)); - var size = tabSize - col % tabSize; - col += size; - for (var i = 0; i < size; ++i) escaped += " "; - pos = idx + 1; - } - } - - if (style) - accum.push("" + escaped + ""); - else - accum.push(escaped); - } - } - var lines = CodeMirror.splitLines(string), state = CodeMirror.startState(mode); - for (var i = 0, e = lines.length; i < e; ++i) { - if (i) callback("\n"); - var stream = new CodeMirror.StringStream(lines[i]); - while (!stream.eol()) { - var style = mode.token(stream, state); - callback(stream.current(), style, i, stream.start); - stream.start = stream.pos; - } - } - if (isNode) - node.innerHTML = accum.join(""); -}; diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/search.js b/src/lib/flask_debugtoolbar/static/codemirror/util/search.js deleted file mode 100644 index 63ebca9..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/search.js +++ /dev/null @@ -1,114 +0,0 @@ -// Define search commands. Depends on dialog.js or another -// implementation of the openDialog method. - -// Replace works a little oddly -- it will do the replace on the next -// Ctrl-G (or whatever is bound to findNext) press. You prevent a -// replace by making sure the match is no longer selected when hitting -// Ctrl-G. - -(function() { - function SearchState() { - this.posFrom = this.posTo = this.query = null; - this.marked = []; - } - function getSearchState(cm) { - return cm._searchState || (cm._searchState = new SearchState()); - } - function dialog(cm, text, shortText, f) { - if (cm.openDialog) cm.openDialog(text, f); - else f(prompt(shortText, "")); - } - function confirmDialog(cm, text, shortText, fs) { - if (cm.openConfirm) cm.openConfirm(text, fs); - else if (confirm(shortText)) fs[0](); - } - function parseQuery(query) { - var isRE = query.match(/^\/(.*)\/$/); - return isRE ? new RegExp(isRE[1]) : query; - } - var queryDialog = - 'Search: (Use /re/ syntax for regexp search)'; - function doSearch(cm, rev) { - var state = getSearchState(cm); - if (state.query) return findNext(cm, rev); - dialog(cm, queryDialog, "Search for:", function(query) { - cm.operation(function() { - if (!query || state.query) return; - state.query = parseQuery(query); - if (cm.lineCount() < 2000) { // This is too expensive on big documents. - for (var cursor = cm.getSearchCursor(query); cursor.findNext();) - state.marked.push(cm.markText(cursor.from(), cursor.to(), "CodeMirror-searching")); - } - state.posFrom = state.posTo = cm.getCursor(); - findNext(cm, rev); - }); - }); - } - function findNext(cm, rev) {cm.operation(function() { - var state = getSearchState(cm); - var cursor = cm.getSearchCursor(state.query, rev ? state.posFrom : state.posTo); - if (!cursor.find(rev)) { - cursor = cm.getSearchCursor(state.query, rev ? {line: cm.lineCount() - 1} : {line: 0, ch: 0}); - if (!cursor.find(rev)) return; - } - cm.setSelection(cursor.from(), cursor.to()); - state.posFrom = cursor.from(); state.posTo = cursor.to(); - })} - function clearSearch(cm) {cm.operation(function() { - var state = getSearchState(cm); - if (!state.query) return; - state.query = null; - for (var i = 0; i < state.marked.length; ++i) state.marked[i].clear(); - state.marked.length = 0; - })} - - var replaceQueryDialog = - 'Replace: (Use /re/ syntax for regexp search)'; - var replacementQueryDialog = 'With: '; - var doReplaceConfirm = "Replace? "; - function replace(cm, all) { - dialog(cm, replaceQueryDialog, "Replace:", function(query) { - if (!query) return; - query = parseQuery(query); - dialog(cm, replacementQueryDialog, "Replace with:", function(text) { - if (all) { - cm.operation(function() { - for (var cursor = cm.getSearchCursor(query); cursor.findNext();) { - if (typeof query != "string") { - var match = cm.getRange(cursor.from(), cursor.to()).match(query); - cursor.replace(text.replace(/\$(\d)/, function(w, i) {return match[i];})); - } else cursor.replace(text); - } - }); - } else { - clearSearch(cm); - var cursor = cm.getSearchCursor(query, cm.getCursor()); - function advance() { - var start = cursor.from(), match; - if (!(match = cursor.findNext())) { - cursor = cm.getSearchCursor(query); - if (!(match = cursor.findNext()) || - (cursor.from().line == start.line && cursor.from().ch == start.ch)) return; - } - cm.setSelection(cursor.from(), cursor.to()); - confirmDialog(cm, doReplaceConfirm, "Replace?", - [function() {doReplace(match);}, advance]); - } - function doReplace(match) { - cursor.replace(typeof query == "string" ? text : - text.replace(/\$(\d)/, function(w, i) {return match[i];})); - advance(); - } - advance(); - } - }); - }); - } - - CodeMirror.commands.find = function(cm) {clearSearch(cm); doSearch(cm);}; - CodeMirror.commands.findNext = doSearch; - CodeMirror.commands.findPrev = function(cm) {doSearch(cm, true);}; - CodeMirror.commands.clearSearch = clearSearch; - CodeMirror.commands.replace = replace; - CodeMirror.commands.replaceAll = function(cm) {replace(cm, true);}; -})(); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/searchcursor.js b/src/lib/flask_debugtoolbar/static/codemirror/util/searchcursor.js deleted file mode 100644 index 3b77829..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/searchcursor.js +++ /dev/null @@ -1,117 +0,0 @@ -(function(){ - function SearchCursor(cm, query, pos, caseFold) { - this.atOccurrence = false; this.cm = cm; - if (caseFold == null) caseFold = typeof query == "string" && query == query.toLowerCase(); - - pos = pos ? cm.clipPos(pos) : {line: 0, ch: 0}; - this.pos = {from: pos, to: pos}; - - // The matches method is filled in based on the type of query. - // It takes a position and a direction, and returns an object - // describing the next occurrence of the query, or null if no - // more matches were found. - if (typeof query != "string") // Regexp match - this.matches = function(reverse, pos) { - if (reverse) { - var line = cm.getLine(pos.line).slice(0, pos.ch), match = line.match(query), start = 0; - while (match) { - var ind = line.indexOf(match[0]); - start += ind; - line = line.slice(ind + 1); - var newmatch = line.match(query); - if (newmatch) match = newmatch; - else break; - start++; - } - } - else { - var line = cm.getLine(pos.line).slice(pos.ch), match = line.match(query), - start = match && pos.ch + line.indexOf(match[0]); - } - if (match) - return {from: {line: pos.line, ch: start}, - to: {line: pos.line, ch: start + match[0].length}, - match: match}; - }; - else { // String query - if (caseFold) query = query.toLowerCase(); - var fold = caseFold ? function(str){return str.toLowerCase();} : function(str){return str;}; - var target = query.split("\n"); - // Different methods for single-line and multi-line queries - if (target.length == 1) - this.matches = function(reverse, pos) { - var line = fold(cm.getLine(pos.line)), len = query.length, match; - if (reverse ? (pos.ch >= len && (match = line.lastIndexOf(query, pos.ch - len)) != -1) - : (match = line.indexOf(query, pos.ch)) != -1) - return {from: {line: pos.line, ch: match}, - to: {line: pos.line, ch: match + len}}; - }; - else - this.matches = function(reverse, pos) { - var ln = pos.line, idx = (reverse ? target.length - 1 : 0), match = target[idx], line = fold(cm.getLine(ln)); - var offsetA = (reverse ? line.indexOf(match) + match.length : line.lastIndexOf(match)); - if (reverse ? offsetA >= pos.ch || offsetA != match.length - : offsetA <= pos.ch || offsetA != line.length - match.length) - return; - for (;;) { - if (reverse ? !ln : ln == cm.lineCount() - 1) return; - line = fold(cm.getLine(ln += reverse ? -1 : 1)); - match = target[reverse ? --idx : ++idx]; - if (idx > 0 && idx < target.length - 1) { - if (line != match) return; - else continue; - } - var offsetB = (reverse ? line.lastIndexOf(match) : line.indexOf(match) + match.length); - if (reverse ? offsetB != line.length - match.length : offsetB != match.length) - return; - var start = {line: pos.line, ch: offsetA}, end = {line: ln, ch: offsetB}; - return {from: reverse ? end : start, to: reverse ? start : end}; - } - }; - } - } - - SearchCursor.prototype = { - findNext: function() {return this.find(false);}, - findPrevious: function() {return this.find(true);}, - - find: function(reverse) { - var self = this, pos = this.cm.clipPos(reverse ? this.pos.from : this.pos.to); - function savePosAndFail(line) { - var pos = {line: line, ch: 0}; - self.pos = {from: pos, to: pos}; - self.atOccurrence = false; - return false; - } - - for (;;) { - if (this.pos = this.matches(reverse, pos)) { - this.atOccurrence = true; - return this.pos.match || true; - } - if (reverse) { - if (!pos.line) return savePosAndFail(0); - pos = {line: pos.line-1, ch: this.cm.getLine(pos.line-1).length}; - } - else { - var maxLine = this.cm.lineCount(); - if (pos.line == maxLine - 1) return savePosAndFail(maxLine); - pos = {line: pos.line+1, ch: 0}; - } - } - }, - - from: function() {if (this.atOccurrence) return this.pos.from;}, - to: function() {if (this.atOccurrence) return this.pos.to;}, - - replace: function(newText) { - var self = this; - if (this.atOccurrence) - self.pos.to = this.cm.replaceRange(newText, self.pos.from, self.pos.to); - } - }; - - CodeMirror.defineExtension("getSearchCursor", function(query, pos, caseFold) { - return new SearchCursor(this, query, pos, caseFold); - }); -})(); diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/simple-hint.css b/src/lib/flask_debugtoolbar/static/codemirror/util/simple-hint.css deleted file mode 100644 index 4387cb9..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/simple-hint.css +++ /dev/null @@ -1,16 +0,0 @@ -.CodeMirror-completions { - position: absolute; - z-index: 10; - overflow: hidden; - -webkit-box-shadow: 2px 3px 5px rgba(0,0,0,.2); - -moz-box-shadow: 2px 3px 5px rgba(0,0,0,.2); - box-shadow: 2px 3px 5px rgba(0,0,0,.2); -} -.CodeMirror-completions select { - background: #fafafa; - outline: none; - border: none; - padding: 0; - margin: 0; - font-family: monospace; -} diff --git a/src/lib/flask_debugtoolbar/static/codemirror/util/simple-hint.js b/src/lib/flask_debugtoolbar/static/codemirror/util/simple-hint.js deleted file mode 100644 index 7decd58..0000000 --- a/src/lib/flask_debugtoolbar/static/codemirror/util/simple-hint.js +++ /dev/null @@ -1,72 +0,0 @@ -(function() { - CodeMirror.simpleHint = function(editor, getHints) { - // We want a single cursor position. - if (editor.somethingSelected()) return; - var result = getHints(editor); - if (!result || !result.list.length) return; - var completions = result.list; - function insert(str) { - editor.replaceRange(str, result.from, result.to); - } - // When there is only one completion, use it directly. - if (completions.length == 1) {insert(completions[0]); return true;} - - // Build the select widget - var complete = document.createElement("div"); - complete.className = "CodeMirror-completions"; - var sel = complete.appendChild(document.createElement("select")); - // Opera doesn't move the selection when pressing up/down in a - // multi-select, but it does properly support the size property on - // single-selects, so no multi-select is necessary. - if (!window.opera) sel.multiple = true; - for (var i = 0; i < completions.length; ++i) { - var opt = sel.appendChild(document.createElement("option")); - opt.appendChild(document.createTextNode(completions[i])); - } - sel.firstChild.selected = true; - sel.size = Math.min(10, completions.length); - var pos = editor.cursorCoords(); - complete.style.left = pos.x + "px"; - complete.style.top = pos.yBot + "px"; - document.body.appendChild(complete); - // If we're at the edge of the screen, then we want the menu to appear on the left of the cursor. - var winW = window.innerWidth || Math.max(document.body.offsetWidth, document.documentElement.offsetWidth); - if(winW - pos.x < sel.clientWidth) - complete.style.left = (pos.x - sel.clientWidth) + "px"; - // Hack to hide the scrollbar. - if (completions.length <= 10) - complete.style.width = (sel.clientWidth - 1) + "px"; - - var done = false; - function close() { - if (done) return; - done = true; - complete.parentNode.removeChild(complete); - } - function pick() { - insert(completions[sel.selectedIndex]); - close(); - setTimeout(function(){editor.focus();}, 50); - } - CodeMirror.connect(sel, "blur", close); - CodeMirror.connect(sel, "keydown", function(event) { - var code = event.keyCode; - // Enter - if (code == 13) {CodeMirror.e_stop(event); pick();} - // Escape - else if (code == 27) {CodeMirror.e_stop(event); close(); editor.focus();} - else if (code != 38 && code != 40) { - close(); editor.focus(); - // Pass the event to the CodeMirror instance so that it can handle things like backspace properly. - editor.triggerOnKeyDown(event); - setTimeout(function(){CodeMirror.simpleHint(editor, getHints);}, 50); - } - }); - CodeMirror.connect(sel, "dblclick", pick); - - sel.focus(); - // Opera sometimes ignores focusing a freshly created node - if (window.opera) setTimeout(function(){if (!done) sel.focus();}, 100); - return true; - }; -})(); diff --git a/src/lib/flask_debugtoolbar/static/css/toolbar.css b/src/lib/flask_debugtoolbar/static/css/toolbar.css deleted file mode 100644 index c963930..0000000 --- a/src/lib/flask_debugtoolbar/static/css/toolbar.css +++ /dev/null @@ -1,437 +0,0 @@ -/* Debug Toolbar CSS Reset, adapted from Eric Meyer's CSS Reset */ -#flDebug, #flDebug * { - margin:0; - padding:0; - border:0; - outline:0; - font-size:12px; - line-height:1.5em; - color:#000; - vertical-align:baseline; - background: none; - font-family: inherit; - text-align:left; -} - -#flDebug { font-family: sans-serif; color: #000; background: #fff; } - -#flDebug tbody, #flDebug code { - font-family: Consolas, Monaco, "Bitstream Vera Sans Mono", "Lucida Console", monospace; -} - -#flDebug #flDebugToolbar { - background:#111; - width:200px; - z-index:100000000; - position:fixed; - top:0; - bottom:0; - right:0; - opacity:0.9; -} - -#flDebug #flDebugToolbar small { - color:#999; -} - -#flDebug #flDebugToolbar ul { - margin:0; - padding:0; - list-style:none; -} - -#flDebug #flDebugToolbar li { - border-bottom:1px solid #222; - color:#fff; - display:block; - font-weight:bold; - float:none; - margin:0; - padding:0; - position:relative; - width:auto; -} - -#flDebug #flDebugToolbar li>a, -#flDebug #flDebugToolbar li>div.contentless { - font-weight:normal; - font-style:normal; - text-decoration:none; - display:block; - font-size:16px; - padding:10px 10px 5px 25px; - color:#fff; -} - -#flDebug #flDebugToolbar li a:hover { - color:#111; - background-color:#ffc; -} - -#flDebug #flDebugToolbar li.active { - background-image:url(../img/indicator.png); - background-repeat:no-repeat; - background-position:left center; - background-color:#333; - padding-left:10px; -} - -#flDebug #flDebugToolbar li.active a:hover { - color:#b36a60; - background-color:transparent; -} - -#flDebug #flDebugToolbar li small { - font-size:12px; - color:#999; - font-style:normal; - text-decoration:none; - font-variant:small-caps; -} - -#flDebug #flDebugToolbar li .switch { - font-size: 10px; - position: absolute; - display: block; - color: white; - height: 16px; - width: 16px; - cursor: pointer; - top: 15px; - right: 2px; -} - -#flDebug #flDebugToolbar li .switch.active { - background-image: url(../img/tick.png); -} - -#flDebug #flDebugToolbar li .switch.inactive { - background-image: url(../img/tick-red.png); -} - - -#flDebug #flDebugToolbarHandle { - position:fixed; - background:#fff; - border:1px solid #111; - top:30px; - right:0; - z-index:100000000; - opacity:0.75; -} - -#flDebug a#flShowToolBarButton { - display:block; - height:75px; - width:30px; - border-right:none; - border-bottom:4px solid #fff; - border-top:4px solid #fff; - border-left:4px solid #fff; - color:#fff; - font-size:10px; - font-weight:bold; - text-decoration:none; - text-align:center; - text-indent:-999999px; - background:#000 url(../img/djdt_vertical.png) no-repeat left center; - opacity:0.5; -} - -#flDebug a#flShowToolBarButton:hover { - background-color:#111; - padding-right:6px; - border-top-color:#FFE761; - border-left-color:#FFE761; - border-bottom-color:#FFE761; - opacity:1.0; -} - -#flDebug code { - display:block; - white-space:pre; - overflow:auto; -} - -#flDebug tr.flDebugOdd { - background-color:#f5f5f5; -} - -#flDebug .panelContent { - display:none; - position:fixed; - margin:0; - top:0; - right:200px; - bottom:0; - left:0px; - background-color:#eee; - color:#666; - z-index:100000000; -} - -#flDebug .panelContent > div { - border-bottom:1px solid #ddd; -} - -#flDebug .flDebugPanelTitle { - position:absolute; - background-color:#ffc; - color:#666; - padding-left:20px; - top:0; - right:0; - left:0; - height:50px; -} - -#flDebug .flDebugPanelTitle code { - display:inline; - font-size:inherit; -} - -#flDebug .flDebugPanelContent { - position:absolute; - top:50px; - right:0; - bottom:0; - left:0; - height:auto; - padding:0 0 0 20px; -} - -#flDebug .flDebugPanelContent .scroll { - height:100%; - overflow:auto; - display:block; - padding:0 10px 0 0; -} - -#flDebug h3 { - font-size:24px; - font-weight:normal; - line-height:50px; -} - -#flDebug h4 { - font-size:20px; - font-weight:bold; - margin-top:0.8em; -} - -#flDebug .panelContent table { - border:1px solid #ccc; - border-collapse:collapse; - width:100%; - background-color:#fff; - display:block; - margin-top:0.8em; - overflow: auto; -} -#flDebug .panelContent tbody td, -#flDebug .panelContent tbody th { - vertical-align:top; - padding:2px 3px; -} -#flDebug .panelContent thead th { - padding:1px 6px 1px 3px; - text-align:left; - font-weight:bold; - font-size:14px; -} -#flDebug .panelContent tbody th { - width:12em; - text-align:right; - color:#666; - padding-right:.5em; -} - -#flDebug .flTemplateHideContextDiv { - background-color:#fff; -} - -/* -#flDebug .panelContent p a:hover, #flDebug .panelContent dd a:hover { - color:#111; - background-color:#ffc; -} - -#flDebug .panelContent p { - padding:0 5px; -} - -#flDebug .panelContent p, #flDebug .panelContent table, #flDebug .panelContent ol, #flDebug .panelContent ul, #flDebug .panelContent dl { - margin:5px 0 15px; - background-color:#fff; -} -#flDebug .panelContent table { - clear:both; - border:0; - padding:0; - margin:0; - border-collapse:collapse; - border-spacing:0; -} - -#flDebug .panelContent table a { - color:#000; - padding:2px 4px; -} -#flDebug .panelContent table a:hover { - background-color:#ffc; -} - -#flDebug .panelContent table th { - background-color:#333; - font-weight:bold; - color:#fff; - padding:3px 7px 3px; - text-align:left; - cursor:pointer; -} -#flDebug .panelContent table td { - padding:5px 10px; - font-size:14px; - background:#fff; - color:#000; - vertical-align:top; - border:0; -} -#flDebug .panelContent table tr.flDebugOdd td { - background:#eee; -} -*/ - -#flDebug .panelContent .flDebugClose { - text-indent:-9999999px; - display:block; - position:absolute; - top:4px; - right:15px; - height:40px; - width:40px; - background:url(../img/close.png) no-repeat center center; -} - -#flDebug .panelContent .flDebugClose:hover { - background-image:url(../img/close_hover.png); -} - -#flDebug .panelContent .flDebugClose.flDebugBack { - background-image:url(../img/back.png); -} - -#flDebug .panelContent .flDebugClose.flDebugBack:hover { - background-image:url(../img/back_hover.png); -} - -#flDebug .panelContent dt, #flDebug .panelContent dd { - display:block; -} - -#flDebug .panelContent dt { - margin-top:0.75em; -} - -#flDebug .panelContent dd { - margin-left:10px; -} - -#flDebug a.toggleTemplate { - padding:4px; - background-color:#bbb; - -moz-border-radius:3px; - -webkit-border-radius:3px; -} - -#flDebug a.toggleTemplate:hover { - padding:4px; - background-color:#444; - color:#ffe761; - -moz-border-radius:3px; - -webkit-border-radius:3px; -} - - -#flDebug a.flTemplateShowContext, #flDebug a.flTemplateShowContext span.toggleArrow { - color:#999; -} - -#flDebug a.flTemplateShowContext:hover, #flDebug a.flTemplateShowContext:hover span.toggleArrow { - color:#000; - cursor:pointer; -} - -#flDebug .flDebugSqlWrap { - position:relative; -} - -#flDebug .flDebugSql { - z-index:100000002; -} - -#flDebug .flSQLHideStacktraceDiv tbody th { - text-align: left; -} - -#flDebug .flSqlExplain td { - white-space: pre; -} - -#flDebug span.flDebugLineChart { - background-color:#777; - height:3px; - position:absolute; - bottom:0; - top:0; - left:0; - display:block; - z-index:1000000001; -} -#flDebug span.flDebugLineChartWarning { - background-color:#900; -} - -#flDebug .highlight { color:#000; } -#flDebug .highlight .err { color:#000; } /* Error */ -#flDebug .highlight .g { color:#000; } /* Generic */ -#flDebug .highlight .k { color:#000; font-weight:bold } /* Keyword */ -#flDebug .highlight .o { color:#000; } /* Operator */ -#flDebug .highlight .n { color:#000; } /* Name */ -#flDebug .highlight .mi { color:#000; font-weight:bold } /* Literal.Number.Integer */ -#flDebug .highlight .l { color:#000; } /* Literal */ -#flDebug .highlight .x { color:#000; } /* Other */ -#flDebug .highlight .p { color:#000; } /* Punctuation */ -#flDebug .highlight .m { color:#000; font-weight:bold } /* Literal.Number */ -#flDebug .highlight .s { color:#333 } /* Literal.String */ -#flDebug .highlight .w { color:#888888 } /* Text.Whitespace */ -#flDebug .highlight .il { color:#000; font-weight:bold } /* Literal.Number.Integer.Long */ -#flDebug .highlight .na { color:#333 } /* Name.Attribute */ -#flDebug .highlight .nt { color:#000; font-weight:bold } /* Name.Tag */ -#flDebug .highlight .nv { color:#333 } /* Name.Variable */ -#flDebug .highlight .s2 { color:#333 } /* Literal.String.Double */ -#flDebug .highlight .cp { color:#333 } /* Comment.Preproc */ - -/* tablesorted */ -#flDebug table.tablesorter { - width: 100%; -} -#flDebug table.tablesorter thead th, table.tablesorter tfoot th { - padding-right: 20px; -} -#flDebug table.tablesorter thead th { - background: url(../img/bg.gif) center right no-repeat; - cursor: pointer; -} -#flDebug table.tablesorter tbody tr.odd td { - background-color: #F0F0F6; -} -#flDebug table.tablesorter thead .headerSortUp { - background-image: url(../img/asc.gif); -} -#flDebug table.tablesorter thead .headerSortDown { - background-image: url(../img/desc.gif); -} -#flDebug table.tablesorter thead .headerSortDown, #flDebug table.tablesorter thead .headerSortUp { - background-color: #8dbdd8; -} diff --git a/src/lib/flask_debugtoolbar/static/img/asc.gif b/src/lib/flask_debugtoolbar/static/img/asc.gif deleted file mode 100644 index 7415786..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/asc.gif and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/back.png b/src/lib/flask_debugtoolbar/static/img/back.png deleted file mode 100644 index 655906e..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/back.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/back_hover.png b/src/lib/flask_debugtoolbar/static/img/back_hover.png deleted file mode 100644 index 72c88f2..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/back_hover.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/bg.gif b/src/lib/flask_debugtoolbar/static/img/bg.gif deleted file mode 100644 index fac668f..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/bg.gif and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/close.png b/src/lib/flask_debugtoolbar/static/img/close.png deleted file mode 100644 index c46416c..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/close.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/close_hover.png b/src/lib/flask_debugtoolbar/static/img/close_hover.png deleted file mode 100644 index 33a3462..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/close_hover.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/desc.gif b/src/lib/flask_debugtoolbar/static/img/desc.gif deleted file mode 100644 index 3b30b3c..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/desc.gif and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/djdt_vertical.png b/src/lib/flask_debugtoolbar/static/img/djdt_vertical.png deleted file mode 100644 index 4378d88..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/djdt_vertical.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/indicator.png b/src/lib/flask_debugtoolbar/static/img/indicator.png deleted file mode 100644 index 769f3ba..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/indicator.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/panel_bg.png b/src/lib/flask_debugtoolbar/static/img/panel_bg.png deleted file mode 100644 index 89971e9..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/panel_bg.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/tick-red.png b/src/lib/flask_debugtoolbar/static/img/tick-red.png deleted file mode 100755 index 14583c1..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/tick-red.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/img/tick.png b/src/lib/flask_debugtoolbar/static/img/tick.png deleted file mode 100755 index 2429c1f..0000000 Binary files a/src/lib/flask_debugtoolbar/static/img/tick.png and /dev/null differ diff --git a/src/lib/flask_debugtoolbar/static/js/jquery.js b/src/lib/flask_debugtoolbar/static/js/jquery.js deleted file mode 100644 index 9144b8a..0000000 --- a/src/lib/flask_debugtoolbar/static/js/jquery.js +++ /dev/null @@ -1,16 +0,0 @@ -/*! - * jQuery JavaScript Library v1.5 - * http://jquery.com/ - * - * Copyright 2011, John Resig - * Dual licensed under the MIT or GPL Version 2 licenses. - * http://jquery.org/license - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * Copyright 2011, The Dojo Foundation - * Released under the MIT, BSD, and GPL Licenses. - * - * Date: Mon Jan 31 08:31:29 2011 -0500 - */ -(function(a,b){function b$(a){return d.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function bX(a){if(!bR[a]){var b=d("<"+a+">").appendTo("body"),c=b.css("display");b.remove();if(c==="none"||c==="")c="block";bR[a]=c}return bR[a]}function bW(a,b){var c={};d.each(bV.concat.apply([],bV.slice(0,b)),function(){c[this]=a});return c}function bJ(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var e=a.dataTypes,f=a.converters,g,h=e.length,i,j=e[0],k,l,m,n,o;for(g=1;g=0===c})}function N(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function F(a,b){return(a&&a!=="*"?a+".":"")+b.replace(q,"`").replace(r,"&")}function E(a){var b,c,e,f,g,h,i,j,k,l,m,n,p,q=[],r=[],s=d._data(this,u);typeof s==="function"&&(s=s.events);if(a.liveFired!==this&&s&&s.live&&!a.target.disabled&&(!a.button||a.type!=="click")){a.namespace&&(n=new RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)")),a.liveFired=this;var t=s.live.slice(0);for(i=0;ic)break;a.currentTarget=f.elem,a.data=f.handleObj.data,a.handleObj=f.handleObj,p=f.handleObj.origHandler.apply(f.elem,arguments);if(p===!1||a.isPropagationStopped()){c=f.level,p===!1&&(b=!1);if(a.isImmediatePropagationStopped())break}}return b}}function C(a,b,c){c[0].type=a;return d.event.handle.apply(b,c)}function w(){return!0}function v(){return!1}function f(a,c,f){if(f===b&&a.nodeType===1){f=a.getAttribute("data-"+c);if(typeof f==="string"){try{f=f==="true"?!0:f==="false"?!1:f==="null"?null:d.isNaN(f)?e.test(f)?d.parseJSON(f):f:parseFloat(f)}catch(g){}d.data(a,c,f)}else f=b}return f}var c=a.document,d=function(){function I(){if(!d.isReady){try{c.documentElement.doScroll("left")}catch(a){setTimeout(I,1);return}d.ready()}}var d=function(a,b){return new d.fn.init(a,b,g)},e=a.jQuery,f=a.$,g,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,i=/\S/,j=/^\s+/,k=/\s+$/,l=/\d/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=navigator.userAgent,w,x=!1,y,z="then done fail isResolved isRejected promise".split(" "),A,B=Object.prototype.toString,C=Object.prototype.hasOwnProperty,D=Array.prototype.push,E=Array.prototype.slice,F=String.prototype.trim,G=Array.prototype.indexOf,H={};d.fn=d.prototype={constructor:d,init:function(a,e,f){var g,i,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!e&&c.body){this.context=c,this[0]=c.body,this.selector="body",this.length=1;return this}if(typeof a==="string"){g=h.exec(a);if(!g||!g[1]&&e)return!e||e.jquery?(e||f).find(a):this.constructor(e).find(a);if(g[1]){e=e instanceof d?e[0]:e,k=e?e.ownerDocument||e:c,j=m.exec(a),j?d.isPlainObject(e)?(a=[c.createElement(j[1])],d.fn.attr.call(a,e,!0)):a=[k.createElement(j[1])]:(j=d.buildFragment([g[1]],[k]),a=(j.cacheable?d.clone(j.fragment):j.fragment).childNodes);return d.merge(this,a)}i=c.getElementById(g[2]);if(i&&i.parentNode){if(i.id!==g[2])return f.find(a);this.length=1,this[0]=i}this.context=c,this.selector=a;return this}if(d.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return d.makeArray(a,this)},selector:"",jquery:"1.5",length:0,size:function(){return this.length},toArray:function(){return E.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var e=this.constructor();d.isArray(a)?D.apply(e,a):d.merge(e,a),e.prevObject=this,e.context=this.context,b==="find"?e.selector=this.selector+(this.selector?" ":"")+c:b&&(e.selector=this.selector+"."+b+"("+c+")");return e},each:function(a,b){return d.each(this,a,b)},ready:function(a){d.bindReady(),y.done(a);return this},eq:function(a){return a===-1?this.slice(a):this.slice(a,+a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(E.apply(this,arguments),"slice",E.call(arguments).join(","))},map:function(a){return this.pushStack(d.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:D,sort:[].sort,splice:[].splice},d.fn.init.prototype=d.fn,d.extend=d.fn.extend=function(){var a,c,e,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i==="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!=="object"&&!d.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j0)return;y.resolveWith(c,[d]),d.fn.trigger&&d(c).trigger("ready").unbind("ready")}},bindReady:function(){if(!x){x=!0;if(c.readyState==="complete")return setTimeout(d.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",A,!1),a.addEventListener("load",d.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",A),a.attachEvent("onload",d.ready);var b=!1;try{b=a.frameElement==null}catch(e){}c.documentElement.doScroll&&b&&I()}}},isFunction:function(a){return d.type(a)==="function"},isArray:Array.isArray||function(a){return d.type(a)==="array"},isWindow:function(a){return a&&typeof a==="object"&&"setInterval"in a},isNaN:function(a){return a==null||!l.test(a)||isNaN(a)},type:function(a){return a==null?String(a):H[B.call(a)]||"object"},isPlainObject:function(a){if(!a||d.type(a)!=="object"||a.nodeType||d.isWindow(a))return!1;if(a.constructor&&!C.call(a,"constructor")&&!C.call(a.constructor.prototype,"isPrototypeOf"))return!1;var c;for(c in a){}return c===b||C.call(a,c)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw a},parseJSON:function(b){if(typeof b!=="string"||!b)return null;b=d.trim(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return a.JSON&&a.JSON.parse?a.JSON.parse(b):(new Function("return "+b))();d.error("Invalid JSON: "+b)},parseXML:function(b,c,e){a.DOMParser?(e=new DOMParser,c=e.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b)),e=c.documentElement,(!e||!e.nodeName||e.nodeName==="parsererror")&&d.error("Invalid XML: "+b);return c},noop:function(){},globalEval:function(a){if(a&&i.test(a)){var b=c.getElementsByTagName("head")[0]||c.documentElement,e=c.createElement("script");e.type="text/javascript",d.support.scriptEval()?e.appendChild(c.createTextNode(a)):e.text=a,b.insertBefore(e,b.firstChild),b.removeChild(e)}},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,e){var f,g=0,h=a.length,i=h===b||d.isFunction(a);if(e){if(i){for(f in a)if(c.apply(a[f],e)===!1)break}else for(;g1?(g=Array(c),d.each(b,function(a,b){d.when(b).then(function(b){g[a]=arguments.length>1?E.call(arguments,0):b,--c||e.resolveWith(f,g)},e.reject)})):e!==a&&e.resolve(a);return f},uaMatch:function(a){a=a.toLowerCase();var b=r.exec(a)||s.exec(a)||t.exec(a)||a.indexOf("compatible")<0&&u.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},sub:function(){function a(b,c){return new a.fn.init(b,c)}d.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.subclass=this.subclass,a.fn.init=function b(b,c){c&&c instanceof d&&!(c instanceof a)&&(c=a(c));return d.fn.init.call(this,b,c,e)},a.fn.init.prototype=a.fn;var e=a(c);return a},browser:{}}),y=d._Deferred(),d.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(a,b){H["[object "+b+"]"]=b.toLowerCase()}),w=d.uaMatch(v),w.browser&&(d.browser[w.browser]=!0,d.browser.version=w.version),d.browser.webkit&&(d.browser.safari=!0),G&&(d.inArray=function(a,b){return G.call(b,a)}),i.test(" ")&&(j=/^[\s\xA0]+/,k=/[\s\xA0]+$/),g=d(c),c.addEventListener?A=function(){c.removeEventListener("DOMContentLoaded",A,!1),d.ready()}:c.attachEvent&&(A=function(){c.readyState==="complete"&&(c.detachEvent("onreadystatechange",A),d.ready())});return a.jQuery=a.$=d}();(function(){d.support={};var b=c.createElement("div");b.style.display="none",b.innerHTML="
a";var e=b.getElementsByTagName("*"),f=b.getElementsByTagName("a")[0],g=c.createElement("select"),h=g.appendChild(c.createElement("option"));if(e&&e.length&&f){d.support={leadingWhitespace:b.firstChild.nodeType===3,tbody:!b.getElementsByTagName("tbody").length,htmlSerialize:!!b.getElementsByTagName("link").length,style:/red/.test(f.getAttribute("style")),hrefNormalized:f.getAttribute("href")==="/a",opacity:/^0.55$/.test(f.style.opacity),cssFloat:!!f.style.cssFloat,checkOn:b.getElementsByTagName("input")[0].value==="on",optSelected:h.selected,deleteExpando:!0,optDisabled:!1,checkClone:!1,_scriptEval:null,noCloneEvent:!0,boxModel:null,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableHiddenOffsets:!0},g.disabled=!0,d.support.optDisabled=!h.disabled,d.support.scriptEval=function(){if(d.support._scriptEval===null){var b=c.documentElement,e=c.createElement("script"),f="script"+d.now();e.type="text/javascript";try{e.appendChild(c.createTextNode("window."+f+"=1;"))}catch(g){}b.insertBefore(e,b.firstChild),a[f]?(d.support._scriptEval=!0,delete a[f]):d.support._scriptEval=!1,b.removeChild(e),b=e=f=null}return d.support._scriptEval};try{delete b.test}catch(i){d.support.deleteExpando=!1}b.attachEvent&&b.fireEvent&&(b.attachEvent("onclick",function j(){d.support.noCloneEvent=!1,b.detachEvent("onclick",j)}),b.cloneNode(!0).fireEvent("onclick")),b=c.createElement("div"),b.innerHTML="";var k=c.createDocumentFragment();k.appendChild(b.firstChild),d.support.checkClone=k.cloneNode(!0).cloneNode(!0).lastChild.checked,d(function(){var a=c.createElement("div"),b=c.getElementsByTagName("body")[0];if(b){a.style.width=a.style.paddingLeft="1px",b.appendChild(a),d.boxModel=d.support.boxModel=a.offsetWidth===2,"zoom"in a.style&&(a.style.display="inline",a.style.zoom=1,d.support.inlineBlockNeedsLayout=a.offsetWidth===2,a.style.display="",a.innerHTML="
",d.support.shrinkWrapBlocks=a.offsetWidth!==2),a.innerHTML="
t
";var e=a.getElementsByTagName("td");d.support.reliableHiddenOffsets=e[0].offsetHeight===0,e[0].style.display="",e[1].style.display="none",d.support.reliableHiddenOffsets=d.support.reliableHiddenOffsets&&e[0].offsetHeight===0,a.innerHTML="",b.removeChild(a).style.display="none",a=e=null}});var l=function(a){var b=c.createElement("div");a="on"+a;if(!b.attachEvent)return!0;var d=a in b;d||(b.setAttribute(a,"return;"),d=typeof b[a]==="function"),b=null;return d};d.support.submitBubbles=l("submit"),d.support.changeBubbles=l("change"),b=e=f=null}})();var e=/^(?:\{.*\}|\[.*\])$/;d.extend({cache:{},uuid:0,expando:"jQuery"+(d.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?d.cache[a[d.expando]]:a[d.expando];return!!a&&!d.isEmptyObject(a)},data:function(a,c,e,f){if(d.acceptData(a)){var g=d.expando,h=typeof c==="string",i,j=a.nodeType,k=j?d.cache:a,l=j?a[d.expando]:a[d.expando]&&d.expando;if((!l||f&&l&&!k[l][g])&&h&&e===b)return;l||(j?a[d.expando]=l=++d.uuid:l=d.expando),k[l]||(k[l]={}),typeof c==="object"&&(f?k[l][g]=d.extend(k[l][g],c):k[l]=d.extend(k[l],c)),i=k[l],f&&(i[g]||(i[g]={}),i=i[g]),e!==b&&(i[c]=e);if(c==="events"&&!i[c])return i[g]&&i[g].events;return h?i[c]:i}},removeData:function(b,c,e){if(d.acceptData(b)){var f=d.expando,g=b.nodeType,h=g?d.cache:b,i=g?b[d.expando]:d.expando;if(!h[i])return;if(c){var j=e?h[i][f]:h[i];if(j){delete j[c];if(!d.isEmptyObject(j))return}}if(e){delete h[i][f];if(!d.isEmptyObject(h[i]))return}var k=h[i][f];d.support.deleteExpando||h!=a?delete h[i]:h[i]=null,k?(h[i]={},h[i][f]=k):g&&(d.support.deleteExpando?delete b[d.expando]:b.removeAttribute?b.removeAttribute(d.expando):b[d.expando]=null)}},_data:function(a,b,c){return d.data(a,b,c,!0)},acceptData:function(a){if(a.nodeName){var b=d.noData[a.nodeName.toLowerCase()];if(b)return b!==!0&&a.getAttribute("classid")===b}return!0}}),d.fn.extend({data:function(a,c){var e=null;if(typeof a==="undefined"){if(this.length){e=d.data(this[0]);if(this[0].nodeType===1){var g=this[0].attributes,h;for(var i=0,j=g.length;i-1)return!0;return!1},val:function(a){if(!arguments.length){var c=this[0];if(c){if(d.nodeName(c,"option")){var e=c.attributes.value;return!e||e.specified?c.value:c.text}if(d.nodeName(c,"select")){var f=c.selectedIndex,g=[],h=c.options,j=c.type==="select-one";if(f<0)return null;for(var k=j?f:0,l=j?f+1:h.length;k=0;else if(d.nodeName(this,"select")){var f=d.makeArray(e);d("option",this).each(function(){this.selected=d.inArray(d(this).val(),f)>=0}),f.length||(this.selectedIndex=-1)}else this.value=e}})}}),d.extend({attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,e,f){if(!a||a.nodeType===3||a.nodeType===8||a.nodeType===2)return b;if(f&&c in d.attrFn)return d(a)[c](e);var g=a.nodeType!==1||!d.isXMLDoc(a),h=e!==b;c=g&&d.props[c]||c;if(a.nodeType===1){var i=j.test(c);if(c==="selected"&&!d.support.optSelected){var n=a.parentNode;n&&(n.selectedIndex,n.parentNode&&n.parentNode.selectedIndex)}if((c in a||a[c]!==b)&&g&&!i){h&&(c==="type"&&k.test(a.nodeName)&&a.parentNode&&d.error("type property can't be changed"),e===null?a.nodeType===1&&a.removeAttribute(c):a[c]=e);if(d.nodeName(a,"form")&&a.getAttributeNode(c))return a.getAttributeNode(c).nodeValue;if(c==="tabIndex"){var o=a.getAttributeNode("tabIndex");return o&&o.specified?o.value:l.test(a.nodeName)||m.test(a.nodeName)&&a.href?0:b}return a[c]}if(!d.support.style&&g&&c==="style"){h&&(a.style.cssText=""+e);return a.style.cssText}h&&a.setAttribute(c,""+e);if(!a.attributes[c]&&(a.hasAttribute&&!a.hasAttribute(c)))return b;var p=!d.support.hrefNormalized&&g&&i?a.getAttribute(c,2):a.getAttribute(c);return p===null?b:p}h&&(a[c]=e);return a[c]}});var o=/\.(.*)$/,p=/^(?:textarea|input|select)$/i,q=/\./g,r=/ /g,s=/[^\w\s.|`]/g,t=function(a){return a.replace(s,"\\$&")},u="events";d.event={add:function(c,e,f,g){if(c.nodeType!==3&&c.nodeType!==8){d.isWindow(c)&&(c!==a&&!c.frameElement)&&(c=a);if(f===!1)f=v;else if(!f)return;var h,i;f.handler&&(h=f,f=h.handler),f.guid||(f.guid=d.guid++);var j=d._data(c);if(!j)return;var k=j[u],l=j.handle;typeof k==="function"?(l=k.handle,k=k.events):k||(c.nodeType||(j[u]=j=function(){}),j.events=k={}),l||(j.handle=l=function(){return typeof d!=="undefined"&&!d.event.triggered?d.event.handle.apply(l.elem,arguments):b}),l.elem=c,e=e.split(" ");var m,n=0,o;while(m=e[n++]){i=h?d.extend({},h):{handler:f,data:g},m.indexOf(".")>-1?(o=m.split("."),m=o.shift(),i.namespace=o.slice(0).sort().join(".")):(o=[],i.namespace=""),i.type=m,i.guid||(i.guid=f.guid);var p=k[m],q=d.event.special[m]||{};if(!p){p=k[m]=[];if(!q.setup||q.setup.call(c,g,o,l)===!1)c.addEventListener?c.addEventListener(m,l,!1):c.attachEvent&&c.attachEvent("on"+m,l)}q.add&&(q.add.call(c,i),i.handler.guid||(i.handler.guid=f.guid)),p.push(i),d.event.global[m]=!0}c=null}},global:{},remove:function(a,c,e,f){if(a.nodeType!==3&&a.nodeType!==8){e===!1&&(e=v);var g,h,i,j,k=0,l,m,n,o,p,q,r,s=d.hasData(a)&&d._data(a),w=s&&s[u];if(!s||!w)return;typeof w==="function"&&(s=w,w=w.events),c&&c.type&&(e=c.handler,c=c.type);if(!c||typeof c==="string"&&c.charAt(0)==="."){c=c||"";for(h in w)d.event.remove(a,h+c);return}c=c.split(" ");while(h=c[k++]){r=h,q=null,l=h.indexOf(".")<0,m=[],l||(m=h.split("."),h=m.shift(),n=new RegExp("(^|\\.)"+d.map(m.slice(0).sort(),t).join("\\.(?:.*\\.)?")+"(\\.|$)")),p=w[h];if(!p)continue;if(!e){for(j=0;j=0&&(a.type=f=f.slice(0,-1),a.exclusive=!0),e||(a.stopPropagation(),d.event.global[f]&&d.each(d.cache,function(){var b=d.expando,e=this[b];e&&e.events&&e.events[f]&&d.event.trigger(a,c,e.handle.elem)}));if(!e||e.nodeType===3||e.nodeType===8)return b;a.result=b,a.target=e,c=d.makeArray(c),c.unshift(a)}a.currentTarget=e;var h=e.nodeType?d._data(e,"handle"):(d._data(e,u)||{}).handle;h&&h.apply(e,c);var i=e.parentNode||e.ownerDocument;try{e&&e.nodeName&&d.noData[e.nodeName.toLowerCase()]||e["on"+f]&&e["on"+f].apply(e,c)===!1&&(a.result=!1,a.preventDefault())}catch(j){}if(!a.isPropagationStopped()&&i)d.event.trigger(a,c,i,!0);else if(!a.isDefaultPrevented()){var k,l=a.target,m=f.replace(o,""),n=d.nodeName(l,"a")&&m==="click",p=d.event.special[m]||{};if((!p._default||p._default.call(e,a)===!1)&&!n&&!(l&&l.nodeName&&d.noData[l.nodeName.toLowerCase()])){try{l[m]&&(k=l["on"+m],k&&(l["on"+m]=null),d.event.triggered=!0,l[m]())}catch(q){}k&&(l["on"+m]=k),d.event.triggered=!1}}},handle:function(c){var e,f,g,h,i,j=[],k=d.makeArray(arguments);c=k[0]=d.event.fix(c||a.event),c.currentTarget=this,e=c.type.indexOf(".")<0&&!c.exclusive,e||(g=c.type.split("."),c.type=g.shift(),j=g.slice(0).sort(),h=new RegExp("(^|\\.)"+j.join("\\.(?:.*\\.)?")+"(\\.|$)")),c.namespace=c.namespace||j.join("."),i=d._data(this,u),typeof i==="function"&&(i=i.events),f=(i||{})[c.type];if(i&&f){f=f.slice(0);for(var l=0,m=f.length;l-1?d.map(a.options,function(a){return a.selected}).join("-"):"":a.nodeName.toLowerCase()==="select"&&(c=a.selectedIndex);return c},B=function B(a){var c=a.target,e,f;if(p.test(c.nodeName)&&!c.readOnly){e=d._data(c,"_change_data"),f=A(c),(a.type!=="focusout"||c.type!=="radio")&&d._data(c,"_change_data",f);if(e===b||f===e)return;if(e!=null||f){a.type="change",a.liveFired=b;return d.event.trigger(a,arguments[1],c)}}};d.event.special.change={filters:{focusout:B,beforedeactivate:B,click:function(a){var b=a.target,c=b.type;if(c==="radio"||c==="checkbox"||b.nodeName.toLowerCase()==="select")return B.call(this,a)},keydown:function(a){var b=a.target,c=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(c==="checkbox"||c==="radio")||c==="select-multiple")return B.call(this,a)},beforeactivate:function(a){var b=a.target;d._data(b,"_change_data",A(b))}},setup:function(a,b){if(this.type==="file")return!1;for(var c in z)d.event.add(this,c+".specialChange",z[c]);return p.test(this.nodeName)},teardown:function(a){d.event.remove(this,".specialChange");return p.test(this.nodeName)}},z=d.event.special.change.filters,z.focus=z.beforeactivate}c.addEventListener&&d.each({focus:"focusin",blur:"focusout"},function(a,b){function c(a){a=d.event.fix(a),a.type=b;return d.event.handle.call(this,a)}d.event.special[b]={setup:function(){this.addEventListener(a,c,!0)},teardown:function(){this.removeEventListener(a,c,!0)}}}),d.each(["bind","one"],function(a,c){d.fn[c]=function(a,e,f){if(typeof a==="object"){for(var g in a)this[c](g,e,a[g],f);return this}if(d.isFunction(e)||e===!1)f=e,e=b;var h=c==="one"?d.proxy(f,function(a){d(this).unbind(a,h);return f.apply(this,arguments)}):f;if(a==="unload"&&c!=="one")this.one(a,e,f);else for(var i=0,j=this.length;i0?this.bind(b,a,c):this.trigger(b)},d.attrFn&&(d.attrFn[b]=!0)}),function(){function s(a,b,c,d,e,f){for(var g=0,h=d.length;g0){k=j;break}}j=j[a]}d[g]=k}}}function r(a,b,c,d,e,f){for(var g=0,h=d.length;g+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,g=!1,h=!0;[0,0].sort(function(){h=!1;return 0});var i=function(b,d,e,g){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!=="string")return e;var l,m,o,p,q,r,s,u,v=!0,w=i.isXML(d),x=[],y=b;do{a.exec(""),l=a.exec(y);if(l){y=l[3],x.push(l[1]);if(l[2]){p=l[3];break}}}while(l);if(x.length>1&&k.exec(b))if(x.length===2&&j.relative[x[0]])m=t(x[0]+x[1],d);else{m=j.relative[x[0]]?[d]:i(x.shift(),d);while(x.length)b=x.shift(),j.relative[b]&&(b+=x.shift()),m=t(b,m)}else{!g&&x.length>1&&d.nodeType===9&&!w&&j.match.ID.test(x[0])&&!j.match.ID.test(x[x.length-1])&&(q=i.find(x.shift(),d,w),d=q.expr?i.filter(q.expr,q.set)[0]:q.set[0]);if(d){q=g?{expr:x.pop(),set:n(g)}:i.find(x.pop(),x.length===1&&(x[0]==="~"||x[0]==="+")&&d.parentNode?d.parentNode:d,w),m=q.expr?i.filter(q.expr,q.set):q.set,x.length>0?o=n(m):v=!1;while(x.length)r=x.pop(),s=r,j.relative[r]?s=x.pop():r="",s==null&&(s=d),j.relative[r](o,s,w)}else o=x=[]}o||(o=m),o||i.error(r||b);if(f.call(o)==="[object Array]")if(v)if(d&&d.nodeType===1)for(u=0;o[u]!=null;u++)o[u]&&(o[u]===!0||o[u].nodeType===1&&i.contains(d,o[u]))&&e.push(m[u]);else for(u=0;o[u]!=null;u++)o[u]&&o[u].nodeType===1&&e.push(m[u]);else e.push.apply(e,o);else n(o,e);p&&(i(p,h,e,g),i.uniqueSort(e));return e};i.uniqueSort=function(a){if(p){g=h,a.sort(p);if(g)for(var b=1;b0},i.find=function(a,b,c){var d;if(!a)return[];for(var e=0,f=j.order.length;e":function(a,b){var c,d=typeof b==="string",e=0,f=a.length;if(d&&!/\W/.test(b)){b=b.toLowerCase();for(;e=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(/\\/g,"")},TAG:function(a,b){return a[1].toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||i.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&i.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(/\\/g,"");!f&&j.attrMap[g]&&(a[1]=j.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(/\\/g,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=i(b[3],null,null,c);else{var g=i.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(j.match.POS.test(b[0])||j.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!i(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){return"text"===a.type},radio:function(a){return"radio"===a.type},checkbox:function(a){return"checkbox"===a.type},file:function(a){return"file"===a.type},password:function(a){return"password"===a.type},submit:function(a){return"submit"===a.type},image:function(a){return"image"===a.type},reset:function(a){return"reset"===a.type},button:function(a){return"button"===a.type||a.nodeName.toLowerCase()==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return bc[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=j.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||i.getText([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,k=g.length;h=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=j.attrHandle[c]?j.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=j.setFilters[e];if(f)return f(a,c,b,d)}}},k=j.match.POS,l=function(a,b){return"\\"+(b-0+1)};for(var m in j.match)j.match[m]=new RegExp(j.match[m].source+/(?![^\[]*\])(?![^\(]*\))/.source),j.leftMatch[m]=new RegExp(/(^(?:.|\r|\n)*?)/.source+j.match[m].source.replace(/\\(\d+)/g,l));var n=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(o){n=function(a,b){var c=0,d=b||[];if(f.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length==="number")for(var e=a.length;c",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(j.find.ID=function(a,c,d){if(typeof c.getElementById!=="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!=="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},j.filter.ID=function(a,b){var c=typeof a.getAttributeNode!=="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(j.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="",a.firstChild&&typeof a.firstChild.getAttribute!=="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(j.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=i,b=c.createElement("div"),d="__sizzle__";b.innerHTML="

";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){i=function(b,e,f,g){e=e||c;if(!g&&!i.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return n(e.getElementsByTagName(b),f);if(h[2]&&j.find.CLASS&&e.getElementsByClassName)return n(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return n([e.body],f);if(h&&h[3]){var k=e.getElementById(h[3]);if(!k||!k.parentNode)return n([],f);if(k.id===h[3])return n([k],f)}try{return n(e.querySelectorAll(b),f)}catch(l){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var m=e.getAttribute("id"),o=m||d,p=e.parentNode,q=/^\s*[+~]/.test(b);m?o=o.replace(/'/g,"\\$&"):e.setAttribute("id",o),q&&p&&(e=e.parentNode);try{if(!q||p)return n(e.querySelectorAll("[id='"+o+"'] "+b),f)}catch(r){}finally{m||e.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)i[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector,d=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(e){d=!0}b&&(i.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!i.isXML(a))try{if(d||!j.match.PSEUDO.test(c)&&!/!=/.test(c))return b.call(a,c)}catch(e){}return i(c,null,null,[a]).length>0})}(),function(){var a=c.createElement("div");a.innerHTML="
";if(a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;j.order.splice(1,0,"CLASS"),j.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!=="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?i.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?i.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:i.contains=function(){return!1},i.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var t=function(a,b){var c,d=[],e="",f=b.nodeType?[b]:b;while(c=j.match.PSEUDO.exec(a))e+=c[0],a=a.replace(j.match.PSEUDO,"");a=j.relative[a]?a+"*":a;for(var g=0,h=f.length;g0)for(var g=c;g0},closest:function(a,b){var c=[],e,f,g=this[0];if(d.isArray(a)){var h,i,j={},k=1;if(g&&a.length){for(e=0,f=a.length;e-1:d(g).is(h))&&c.push({selector:i,elem:g,level:k});g=g.parentNode,k++}}return c}var l=L.test(a)?d(a,b||this.context):null;for(e=0,f=this.length;e-1:d.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b)break}}c=c.length>1?d.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a||typeof a==="string")return d.inArray(this[0],a?d(a):this.parent().children());return d.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a==="string"?d(a,b):d.makeArray(a),e=d.merge(this.get(),c);return this.pushStack(N(c[0])||N(e[0])?e:d.unique(e))},andSelf:function(){return this.add(this.prevObject)}}),d.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return d.dir(a,"parentNode")},parentsUntil:function(a,b,c){return d.dir(a,"parentNode",c)},next:function(a){return d.nth(a,2,"nextSibling")},prev:function(a){return d.nth(a,2,"previousSibling")},nextAll:function(a){return d.dir(a,"nextSibling")},prevAll:function(a){return d.dir(a,"previousSibling")},nextUntil:function(a,b,c){return d.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return d.dir(a,"previousSibling",c)},siblings:function(a){return d.sibling(a.parentNode.firstChild,a)},children:function(a){return d.sibling(a.firstChild)},contents:function(a){return d.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:d.makeArray(a.childNodes)}},function(a,b){d.fn[a]=function(c,e){var f=d.map(this,b,c),g=K.call(arguments);G.test(a)||(e=c),e&&typeof e==="string"&&(f=d.filter(e,f)),f=this.length>1&&!M[a]?d.unique(f):f,(this.length>1||I.test(e))&&H.test(a)&&(f=f.reverse());return this.pushStack(f,a,g.join(","))}}),d.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?d.find.matchesSelector(b[0],a)?[b[0]]:[]:d.find.matches(a,b)},dir:function(a,c,e){var f=[],g=a[c];while(g&&g.nodeType!==9&&(e===b||g.nodeType!==1||!d(g).is(e)))g.nodeType===1&&f.push(g),g=g[c];return f},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var P=/ jQuery\d+="(?:\d+|null)"/g,Q=/^\s+/,R=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,S=/<([\w:]+)/,T=/",""],legend:[1,"
","
"],thead:[1,"","
"],tr:[2,"","
"],td:[3,"","
"],col:[2,"","
"],area:[1,"",""],_default:[0,"",""]};X.optgroup=X.option,X.tbody=X.tfoot=X.colgroup=X.caption=X.thead,X.th=X.td,d.support.htmlSerialize||(X._default=[1,"div
","
"]),d.fn.extend({text:function(a){if(d.isFunction(a))return this.each(function(b){var c=d(this);c.text(a.call(this,b,c.text()))});if(typeof a!=="object"&&a!==b)return this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a));return d.text(this)},wrapAll:function(a){if(d.isFunction(a))return this.each(function(b){d(this).wrapAll(a.call(this,b))});if(this[0]){var b=d(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(d.isFunction(a))return this.each(function(b){d(this).wrapInner(a.call(this,b))});return this.each(function(){var b=d(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){d(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){d.nodeName(this,"body")||d(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=d(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,d(arguments[0]).toArray());return a}},remove:function(a,b){for(var c=0,e;(e=this[c])!=null;c++)if(!a||d.filter(a,[e]).length)!b&&e.nodeType===1&&(d.cleanData(e.getElementsByTagName("*")),d.cleanData([e])),e.parentNode&&e.parentNode.removeChild(e);return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&d.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!0:a,b=b==null?a:b;return this.map(function(){return d.clone(this,a,b)})},html:function(a){if(a===b)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(P,""):null;if(typeof a!=="string"||V.test(a)||!d.support.leadingWhitespace&&Q.test(a)||X[(S.exec(a)||["",""])[1].toLowerCase()])d.isFunction(a)?this.each(function(b){var c=d(this);c.html(a.call(this,b,c.html()))}):this.empty().append(a);else{a=a.replace(R,"<$1>");try{for(var c=0,e=this.length;c1&&l0?this.clone(!0):this).get();d(f[h])[b](j),e=e.concat(j)}return this.pushStack(e,a,f.selector)}}),d.extend({clone:function(a,b,c){var e=a.cloneNode(!0),f,g,h;if(!d.support.noCloneEvent&&(a.nodeType===1||a.nodeType===11)&&!d.isXMLDoc(a)){f=a.getElementsByTagName("*"),g=e.getElementsByTagName("*");for(h=0;f[h];++h)$(f[h],g[h]);$(a,e)}if(b){Z(a,e);if(c&&"getElementsByTagName"in a){f=a.getElementsByTagName("*"),g=e.getElementsByTagName("*");if(f.length)for(h=0;f[h];++h)Z(f[h],g[h])}}return e},clean:function(a,b,e,f){b=b||c,typeof b.createElement==="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);var g=[];for(var h=0,i;(i=a[h])!=null;h++){typeof i==="number"&&(i+="");if(!i)continue;if(typeof i!=="string"||U.test(i)){if(typeof i==="string"){i=i.replace(R,"<$1>");var j=(S.exec(i)||["",""])[1].toLowerCase(),k=X[j]||X._default,l=k[0],m=b.createElement("div");m.innerHTML=k[1]+i+k[2];while(l--)m=m.lastChild;if(!d.support.tbody){var n=T.test(i),o=j==="table"&&!n?m.firstChild&&m.firstChild.childNodes:k[1]===""&&!n?m.childNodes:[];for(var p=o.length-1;p>=0;--p)d.nodeName(o[p],"tbody")&&!o[p].childNodes.length&&o[p].parentNode.removeChild(o[p])}!d.support.leadingWhitespace&&Q.test(i)&&m.insertBefore(b.createTextNode(Q.exec(i)[0]),m.firstChild),i=m.childNodes}}else i=b.createTextNode(i);i.nodeType?g.push(i):g=d.merge(g,i)}if(e)for(h=0;g[h];h++)!f||!d.nodeName(g[h],"script")||g[h].type&&g[h].type.toLowerCase()!=="text/javascript"?(g[h].nodeType===1&&g.splice.apply(g,[h+1,0].concat(d.makeArray(g[h].getElementsByTagName("script")))),e.appendChild(g[h])):f.push(g[h].parentNode?g[h].parentNode.removeChild(g[h]):g[h]);return g},cleanData:function(a){var b,c,e=d.cache,f=d.expando,g=d.event.special,h=d.support.deleteExpando;for(var i=0,j;(j=a[i])!=null;i++){if(j.nodeName&&d.noData[j.nodeName.toLowerCase()])continue;c=j[d.expando];if(c){b=e[c]&&e[c][f];if(b&&b.events){for(var k in b.events)g[k]?d.event.remove(j,k):d.removeEvent(j,k,b.handle);b.handle&&(b.handle.elem=null)}h?delete j[d.expando]:j.removeAttribute&&j.removeAttribute(d.expando),delete e[c]}}}});var ba=/alpha\([^)]*\)/i,bb=/opacity=([^)]*)/,bc=/-([a-z])/ig,bd=/([A-Z])/g,be=/^-?\d+(?:px)?$/i,bf=/^-?\d/,bg={position:"absolute",visibility:"hidden",display:"block"},bh=["Left","Right"],bi=["Top","Bottom"],bj,bk,bl,bm=function(a,b){return b.toUpperCase()};d.fn.css=function(a,c){if(arguments.length===2&&c===b)return this;return d.access(this,a,c,!0,function(a,c,e){return e!==b?d.style(a,c,e):d.css(a,c)})},d.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=bj(a,"opacity","opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{zIndex:!0,fontWeight:!0,opacity:!0,zoom:!0,lineHeight:!0},cssProps:{"float":d.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,e,f){if(a&&a.nodeType!==3&&a.nodeType!==8&&a.style){var g,h=d.camelCase(c),i=a.style,j=d.cssHooks[h];c=d.cssProps[h]||h;if(e===b){if(j&&"get"in j&&(g=j.get(a,!1,f))!==b)return g;return i[c]}if(typeof e==="number"&&isNaN(e)||e==null)return;typeof e==="number"&&!d.cssNumber[h]&&(e+="px");if(!j||!("set"in j)||(e=j.set(a,e))!==b)try{i[c]=e}catch(k){}}},css:function(a,c,e){var f,g=d.camelCase(c),h=d.cssHooks[g];c=d.cssProps[g]||g;if(h&&"get"in h&&(f=h.get(a,!0,e))!==b)return f;if(bj)return bj(a,c,g)},swap:function(a,b,c){var d={};for(var e in b)d[e]=a.style[e],a.style[e]=b[e];c.call(a);for(e in b)a.style[e]=d[e]},camelCase:function(a){return a.replace(bc,bm)}}),d.curCSS=d.css,d.each(["height","width"],function(a,b){d.cssHooks[b]={get:function(a,c,e){var f;if(c){a.offsetWidth!==0?f=bn(a,b,e):d.swap(a,bg,function(){f=bn(a,b,e)});if(f<=0){f=bj(a,b,b),f==="0px"&&bl&&(f=bl(a,b,b));if(f!=null)return f===""||f==="auto"?"0px":f}if(f<0||f==null){f=a.style[b];return f===""||f==="auto"?"0px":f}return typeof f==="string"?f:f+"px"}},set:function(a,b){if(!be.test(b))return b;b=parseFloat(b);if(b>=0)return b+"px"}}}),d.support.opacity||(d.cssHooks.opacity={get:function(a,b){return bb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style;c.zoom=1;var e=d.isNaN(b)?"":"alpha(opacity="+b*100+")",f=c.filter||"";c.filter=ba.test(f)?f.replace(ba,e):c.filter+" "+e}}),c.defaultView&&c.defaultView.getComputedStyle&&(bk=function(a,c,e){var f,g,h;e=e.replace(bd,"-$1").toLowerCase();if(!(g=a.ownerDocument.defaultView))return b;if(h=g.getComputedStyle(a,null))f=h.getPropertyValue(e),f===""&&!d.contains(a.ownerDocument.documentElement,a)&&(f=d.style(a,e));return f}),c.documentElement.currentStyle&&(bl=function(a,b){var c,d=a.currentStyle&&a.currentStyle[b],e=a.runtimeStyle&&a.runtimeStyle[b],f=a.style;!be.test(d)&&bf.test(d)&&(c=f.left,e&&(a.runtimeStyle.left=a.currentStyle.left),f.left=b==="fontSize"?"1em":d||0,d=f.pixelLeft+"px",f.left=c,e&&(a.runtimeStyle.left=e));return d===""?"auto":d}),bj=bk||bl,d.expr&&d.expr.filters&&(d.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!d.support.reliableHiddenOffsets&&(a.style.display||d.css(a,"display"))==="none"},d.expr.filters.visible=function(a){return!d.expr.filters.hidden(a)});var bo=/%20/g,bp=/\[\]$/,bq=/\r?\n/g,br=/#.*$/,bs=/^(.*?):\s*(.*?)\r?$/mg,bt=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bu=/^(?:GET|HEAD)$/,bv=/^\/\//,bw=/\?/,bx=/)<[^<]*)*<\/script>/gi,by=/^(?:select|textarea)/i,bz=/\s+/,bA=/([?&])_=[^&]*/,bB=/^(\w+:)\/\/([^\/?#:]+)(?::(\d+))?/,bC=d.fn.load,bD={},bE={};d.fn.extend({load:function(a,b,c){if(typeof a!=="string"&&bC)return bC.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}var g="GET";b&&(d.isFunction(b)?(c=b,b=null):typeof b==="object"&&(b=d.param(b,d.ajaxSettings.traditional),g="POST"));var h=this;d.ajax({url:a,type:g,dataType:"html",data:b,complete:function(a,b,e){e=a.responseText,a.isResolved()&&(a.done(function(a){e=a}),h.html(f?d("
").append(e.replace(bx,"")).find(f):e)),c&&h.each(c,[e,b,a])}});return this},serialize:function(){return d.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?d.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||by.test(this.nodeName)||bt.test(this.type))}).map(function(a,b){var c=d(this).val();return c==null?null:d.isArray(c)?d.map(c,function(a,c){return{name:b.name,value:a.replace(bq,"\r\n")}}):{name:b.name,value:c.replace(bq,"\r\n")}}).get()}}),d.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){d.fn[b]=function(a){return this.bind(b,a)}}),d.each(["get","post"],function(a,b){d[b]=function(a,c,e,f){d.isFunction(c)&&(f=f||e,e=c,c=null);return d.ajax({type:b,url:a,data:c,success:e,dataType:f})}}),d.extend({getScript:function(a,b){return d.get(a,null,b,"script")},getJSON:function(a,b,c){return d.get(a,b,c,"json")},ajaxSetup:function(a){d.extend(!0,d.ajaxSettings,a),a.context&&(d.ajaxSettings.context=a.context)},ajaxSettings:{url:location.href,global:!0,type:"GET",contentType:"application/x-www-form-urlencoded",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":"*/*"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":d.parseJSON,"text xml":d.parseXML}},ajaxPrefilter:bF(bD),ajaxTransport:bF(bE),ajax:function(a,e){function w(a,c,e,l){if(t!==2){t=2,p&&clearTimeout(p),o=b,m=l||"",v.readyState=a?4:0;var n,q,r,s=e?bI(f,v,e):b,u,w;if(a>=200&&a<300||a===304){if(f.ifModified){if(u=v.getResponseHeader("Last-Modified"))d.lastModified[f.url]=u;if(w=v.getResponseHeader("Etag"))d.etag[f.url]=w}if(a===304)c="notmodified",n=!0;else try{q=bJ(f,s),c="success",n=!0}catch(x){c="parsererror",r=x}}else r=c,a&&(c="error",a<0&&(a=0));v.status=a,v.statusText=c,n?i.resolveWith(g,[q,c,v]):i.rejectWith(g,[v,c,r]),v.statusCode(k),k=b,f.global&&h.trigger("ajax"+(n?"Success":"Error"),[v,f,n?q:r]),j.resolveWith(g,[v,c]),f.global&&(h.trigger("ajaxComplete",[v,f]),--d.active||d.event.trigger("ajaxStop"))}}typeof e!=="object"&&(e=a,a=b),e=e||{};var f=d.extend(!0,{},d.ajaxSettings,e),g=(f.context=("context"in e?e:d.ajaxSettings).context)||f,h=g===f?d.event:d(g),i=d.Deferred(),j=d._Deferred(),k=f.statusCode||{},l={},m,n,o,p,q=c.location,r=q.protocol||"http:",s,t=0,u,v={readyState:0,setRequestHeader:function(a,b){t===0&&(l[a.toLowerCase()]=b);return this},getAllResponseHeaders:function(){return t===2?m:null},getResponseHeader:function(a){var b;if(t===2){if(!n){n={};while(b=bs.exec(m))n[b[1].toLowerCase()]=b[2]}b=n[a.toLowerCase()]}return b||null},abort:function(a){a=a||"abort",o&&o.abort(a),w(0,a);return this}};i.promise(v),v.success=v.done,v.error=v.fail,v.complete=j.done,v.statusCode=function(a){if(a){var b;if(t<2)for(b in a)k[b]=[k[b],a[b]];else b=a[v.status],v.then(b,b)}return this},f.url=(""+(a||f.url)).replace(br,"").replace(bv,r+"//"),f.dataTypes=d.trim(f.dataType||"*").toLowerCase().split(bz),f.crossDomain||(s=bB.exec(f.url.toLowerCase()),f.crossDomain=s&&(s[1]!=r||s[2]!=q.hostname||(s[3]||(s[1]==="http:"?80:443))!=(q.port||(r==="http:"?80:443)))),f.data&&f.processData&&typeof f.data!=="string"&&(f.data=d.param(f.data,f.traditional)),bG(bD,f,e,v),f.type=f.type.toUpperCase(),f.hasContent=!bu.test(f.type),f.global&&d.active++===0&&d.event.trigger("ajaxStart");if(!f.hasContent){f.data&&(f.url+=(bw.test(f.url)?"&":"?")+f.data);if(f.cache===!1){var x=d.now(),y=f.url.replace(bA,"$1_="+x);f.url=y+(y===f.url?(bw.test(f.url)?"&":"?")+"_="+x:"")}}if(f.data&&f.hasContent&&f.contentType!==!1||e.contentType)l["content-type"]=f.contentType;f.ifModified&&(d.lastModified[f.url]&&(l["if-modified-since"]=d.lastModified[f.url]),d.etag[f.url]&&(l["if-none-match"]=d.etag[f.url])),l.accept=f.dataTypes[0]&&f.accepts[f.dataTypes[0]]?f.accepts[f.dataTypes[0]]+(f.dataTypes[0]!=="*"?", */*; q=0.01":""):f.accepts["*"];for(u in f.headers)l[u.toLowerCase()]=f.headers[u];if(!f.beforeSend||f.beforeSend.call(g,v,f)!==!1&&t!==2){for(u in {success:1,error:1,complete:1})v[u](f[u]);o=bG(bE,f,e,v);if(o){t=v.readyState=1,f.global&&h.trigger("ajaxSend",[v,f]),f.async&&f.timeout>0&&(p=setTimeout(function(){v.abort("timeout")},f.timeout));try{o.send(l,w)}catch(z){status<2?w(-1,z):d.error(z)}}else w(-1,"No Transport")}else w(0,"abort"),v=!1;return v},param:function(a,c){var e=[],f=function(a,b){b=d.isFunction(b)?b():b,e[e.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=d.ajaxSettings.traditional);if(d.isArray(a)||a.jquery)d.each(a,function(){f(this.name,this.value)});else for(var g in a)bH(g,a[g],c,f);return e.join("&").replace(bo,"+")}}),d.extend({active:0,lastModified:{},etag:{}});var bK=d.now(),bL=/(\=)\?(&|$)|()\?\?()/i;d.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return d.expando+"_"+bK++}}),d.ajaxPrefilter("json jsonp",function(b,c,e){e=typeof b.data==="string";if(b.dataTypes[0]==="jsonp"||c.jsonpCallback||c.jsonp!=null||b.jsonp!==!1&&(bL.test(b.url)||e&&bL.test(b.data))){var f,g=b.jsonpCallback=d.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h=a[g],i=b.url,j=b.data,k="$1"+g+"$2";b.jsonp!==!1&&(i=i.replace(bL,k),b.url===i&&(e&&(j=j.replace(bL,k)),b.data===j&&(i+=(/\?/.test(i)?"&":"?")+b.jsonp+"="+g))),b.url=i,b.data=j,a[g]=function(a){f=[a]},b.complete=[function(){a[g]=h;if(h)f&&d.isFunction(h)&&a[g](f[0]);else try{delete a[g]}catch(b){}},b.complete],b.converters["script json"]=function(){f||d.error(g+" was not called");return f[0]},b.dataTypes[0]="json";return"script"}}),d.ajaxSetup({accepts:{script:"text/javascript, application/javascript"},contents:{script:/javascript/},converters:{"text script":function(a){d.globalEval(a);return a}}}),d.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),d.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var bM=d.now(),bN={},bO,bP;d.ajaxSettings.xhr=a.ActiveXObject?function(){if(a.location.protocol!=="file:")try{return new a.XMLHttpRequest}catch(b){}try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(c){}}:function(){return new a.XMLHttpRequest};try{bP=d.ajaxSettings.xhr()}catch(bQ){}d.support.ajax=!!bP,d.support.cors=bP&&"withCredentials"in bP,bP=b,d.support.ajax&&d.ajaxTransport(function(b){if(!b.crossDomain||d.support.cors){var c;return{send:function(e,f){bO||(bO=1,d(a).bind("unload",function(){d.each(bN,function(a,b){b.onreadystatechange&&b.onreadystatechange(1)})}));var g=b.xhr(),h;b.username?g.open(b.type,b.url,b.async,b.username,b.password):g.open(b.type,b.url,b.async),(!b.crossDomain||b.hasContent)&&!e["x-requested-with"]&&(e["x-requested-with"]="XMLHttpRequest");try{d.each(e,function(a,b){g.setRequestHeader(a,b)})}catch(i){}g.send(b.hasContent&&b.data||null),c=function(a,e){if(c&&(e||g.readyState===4)){c=0,h&&(g.onreadystatechange=d.noop,delete bN[h]);if(e)g.readyState!==4&&g.abort();else{var i=g.status,j,k=g.getAllResponseHeaders(),l={},m=g.responseXML;m&&m.documentElement&&(l.xml=m),l.text=g.responseText;try{j=g.statusText}catch(n){j=""}i=i===0?!b.crossDomain||j?k?304:0:302:i==1223?204:i,f(i,j,l,k)}}},b.async&&g.readyState!==4?(h=bM++,bN[h]=g,g.onreadystatechange=c):c()},abort:function(){c&&c(0,1)}}}});var bR={},bS=/^(?:toggle|show|hide)$/,bT=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,bU,bV=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];d.fn.extend({show:function(a,b,c){var e,f;if(a||a===0)return this.animate(bW("show",3),a,b,c);for(var g=0,h=this.length;g=0;a--)c[a].elem===this&&(b&&c[a](!0),c.splice(a,1))}),b||this.dequeue();return this}}),d.each({slideDown:bW("show",1),slideUp:bW("hide",1),slideToggle:bW("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){d.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),d.extend({speed:function(a,b,c){var e=a&&typeof a==="object"?d.extend({},a):{complete:c||!c&&b||d.isFunction(a)&&a,duration:a,easing:c&&b||b&&!d.isFunction(b)&&b};e.duration=d.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in d.fx.speeds?d.fx.speeds[e.duration]:d.fx.speeds._default,e.old=e.complete,e.complete=function(){e.queue!==!1&&d(this).dequeue(),d.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,c,d){return c+d*a},swing:function(a,b,c,d){return(-Math.cos(a*Math.PI)/2+.5)*d+c}},timers:[],fx:function(a,b,c){this.options=b,this.elem=a,this.prop=c,b.orig||(b.orig={})}}),d.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this),(d.fx.step[this.prop]||d.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(d.css(this.elem,this.prop));return a||0},custom:function(a,b,c){function g(a){return e.step(a)}var e=this,f=d.fx;this.startTime=d.now(),this.start=a,this.end=b,this.unit=c||this.unit||"px",this.now=this.start,this.pos=this.state=0,g.elem=this.elem,g()&&d.timers.push(g)&&!bU&&(bU=setInterval(f.tick,f.interval))},show:function(){this.options.orig[this.prop]=d.style(this.elem,this.prop),this.options.show=!0,this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur()),d(this.elem).show()},hide:function(){this.options.orig[this.prop]=d.style(this.elem,this.prop),this.options.hide=!0,this.custom(this.cur(),0)},step:function(a){var b=d.now(),c=!0;if(a||b>=this.options.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),this.options.curAnim[this.prop]=!0;for(var e in this.options.curAnim)this.options.curAnim[e]!==!0&&(c=!1);if(c){if(this.options.overflow!=null&&!d.support.shrinkWrapBlocks){var f=this.elem,g=this.options;d.each(["","X","Y"],function(a,b){f.style["overflow"+b]=g.overflow[a]})}this.options.hide&&d(this.elem).hide();if(this.options.hide||this.options.show)for(var h in this.options.curAnim)d.style(this.elem,h,this.options.orig[h]);this.options.complete.call(this.elem)}return!1}var i=b-this.startTime;this.state=i/this.options.duration;var j=this.options.specialEasing&&this.options.specialEasing[this.prop],k=this.options.easing||(d.easing.swing?"swing":"linear");this.pos=d.easing[j||k](this.state,i,0,1,this.options.duration),this.now=this.start+(this.end-this.start)*this.pos,this.update();return!0}},d.extend(d.fx,{tick:function(){var a=d.timers;for(var b=0;b
";d.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"}),b.innerHTML=j,a.insertBefore(b,a.firstChild),e=b.firstChild,f=e.firstChild,h=e.nextSibling.firstChild.firstChild,this.doesNotAddBorder=f.offsetTop!==5,this.doesAddBorderForTableAndCells=h.offsetTop===5,f.style.position="fixed",f.style.top="20px",this.supportsFixedPosition=f.offsetTop===20||f.offsetTop===15,f.style.position=f.style.top="",e.style.overflow="hidden",e.style.position="relative",this.subtractsBorderForOverflowNotVisible=f.offsetTop===-5,this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==i,a.removeChild(b),a=b=e=f=g=h=null,d.offset.initialize=d.noop},bodyOffset:function(a){var b=a.offsetTop,c=a.offsetLeft;d.offset.initialize(),d.offset.doesNotIncludeMarginInBodyOffset&&(b+=parseFloat(d.css(a,"marginTop"))||0,c+=parseFloat(d.css(a,"marginLeft"))||0);return{top:b,left:c}},setOffset:function(a,b,c){var e=d.css(a,"position");e==="static"&&(a.style.position="relative");var f=d(a),g=f.offset(),h=d.css(a,"top"),i=d.css(a,"left"),j=e==="absolute"&&d.inArray("auto",[h,i])>-1,k={},l={},m,n;j&&(l=f.position()),m=j?l.top:parseInt(h,10)||0,n=j?l.left:parseInt(i,10)||0,d.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):f.css(k)}},d.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),e=bZ.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(d.css(a,"marginTop"))||0,c.left-=parseFloat(d.css(a,"marginLeft"))||0,e.top+=parseFloat(d.css(b[0],"borderTopWidth"))||0,e.left+=parseFloat(d.css(b[0],"borderLeftWidth"))||0;return{top:c.top-e.top,left:c.left-e.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&(!bZ.test(a.nodeName)&&d.css(a,"position")==="static"))a=a.offsetParent;return a})}}),d.each(["Left","Top"],function(a,c){var e="scroll"+c;d.fn[e]=function(c){var f=this[0],g;if(!f)return null;if(c!==b)return this.each(function(){g=b$(this),g?g.scrollTo(a?d(g).scrollLeft():c,a?c:d(g).scrollTop()):this[e]=c});g=b$(f);return g?"pageXOffset"in g?g[a?"pageYOffset":"pageXOffset"]:d.support.boxModel&&g.document.documentElement[e]||g.document.body[e]:f[e]}}),d.each(["Height","Width"],function(a,c){var e=c.toLowerCase();d.fn["inner"+c]=function(){return this[0]?parseFloat(d.css(this[0],e,"padding")):null},d.fn["outer"+c]=function(a){return this[0]?parseFloat(d.css(this[0],e,a?"margin":"border")):null},d.fn[e]=function(a){var f=this[0];if(!f)return a==null?null:this;if(d.isFunction(a))return this.each(function(b){var c=d(this);c[e](a.call(this,b,c[e]()))});if(d.isWindow(f)){var g=f.document.documentElement["client"+c];return f.document.compatMode==="CSS1Compat"&&g||f.document.body["client"+c]||g}if(f.nodeType===9)return Math.max(f.documentElement["client"+c],f.body["scroll"+c],f.documentElement["scroll"+c],f.body["offset"+c],f.documentElement["offset"+c]);if(a===b){var h=d.css(f,e),i=parseFloat(h);return d.isNaN(i)?h:i}return this.css(e,typeof a==="string"?a:a+"px")}})})(window); diff --git a/src/lib/flask_debugtoolbar/static/js/jquery.tablesorter.js b/src/lib/flask_debugtoolbar/static/js/jquery.tablesorter.js deleted file mode 100644 index 9b58731..0000000 --- a/src/lib/flask_debugtoolbar/static/js/jquery.tablesorter.js +++ /dev/null @@ -1,1031 +0,0 @@ -/* - * - * TableSorter 2.0 - Client-side table sorting with ease! - * Version 2.0.5b - * @requires jQuery v1.2.3 - * - * Copyright (c) 2007 Christian Bach - * Examples and docs at: http://tablesorter.com - * Dual licensed under the MIT and GPL licenses: - * http://www.opensource.org/licenses/mit-license.php - * http://www.gnu.org/licenses/gpl.html - * - */ -/** - * - * @description Create a sortable table with multi-column sorting capabilitys - * - * @example $('table').tablesorter(); - * @desc Create a simple tablesorter interface. - * - * @example $('table').tablesorter({ sortList:[[0,0],[1,0]] }); - * @desc Create a tablesorter interface and sort on the first and secound column column headers. - * - * @example $('table').tablesorter({ headers: { 0: { sorter: false}, 1: {sorter: false} } }); - * - * @desc Create a tablesorter interface and disableing the first and second column headers. - * - * - * @example $('table').tablesorter({ headers: { 0: {sorter:"integer"}, 1: {sorter:"currency"} } }); - * - * @desc Create a tablesorter interface and set a column parser for the first - * and second column. - * - * - * @param Object - * settings An object literal containing key/value pairs to provide - * optional settings. - * - * - * @option String cssHeader (optional) A string of the class name to be appended - * to sortable tr elements in the thead of the table. Default value: - * "header" - * - * @option String cssAsc (optional) A string of the class name to be appended to - * sortable tr elements in the thead on a ascending sort. Default value: - * "headerSortUp" - * - * @option String cssDesc (optional) A string of the class name to be appended - * to sortable tr elements in the thead on a descending sort. Default - * value: "headerSortDown" - * - * @option String sortInitialOrder (optional) A string of the inital sorting - * order can be asc or desc. Default value: "asc" - * - * @option String sortMultisortKey (optional) A string of the multi-column sort - * key. Default value: "shiftKey" - * - * @option String textExtraction (optional) A string of the text-extraction - * method to use. For complex html structures inside td cell set this - * option to "complex", on large tables the complex option can be slow. - * Default value: "simple" - * - * @option Object headers (optional) An array containing the forces sorting - * rules. This option let's you specify a default sorting rule. Default - * value: null - * - * @option Array sortList (optional) An array containing the forces sorting - * rules. This option let's you specify a default sorting rule. Default - * value: null - * - * @option Array sortForce (optional) An array containing forced sorting rules. - * This option let's you specify a default sorting rule, which is - * prepended to user-selected rules. Default value: null - * - * @option Boolean sortLocaleCompare (optional) Boolean flag indicating whatever - * to use String.localeCampare method or not. Default set to true. - * - * - * @option Array sortAppend (optional) An array containing forced sorting rules. - * This option let's you specify a default sorting rule, which is - * appended to user-selected rules. Default value: null - * - * @option Boolean widthFixed (optional) Boolean flag indicating if tablesorter - * should apply fixed widths to the table columns. This is usefull when - * using the pager companion plugin. This options requires the dimension - * jquery plugin. Default value: false - * - * @option Boolean cancelSelection (optional) Boolean flag indicating if - * tablesorter should cancel selection of the table headers text. - * Default value: true - * - * @option Boolean debug (optional) Boolean flag indicating if tablesorter - * should display debuging information usefull for development. - * - * @type jQuery - * - * @name tablesorter - * - * @cat Plugins/Tablesorter - * - * @author Christian Bach/christian.bach@polyester.se - */ - -(function ($) { - $.extend({ - tablesorter: new - function () { - - var parsers = [], - widgets = []; - - this.defaults = { - cssHeader: "header", - cssAsc: "headerSortUp", - cssDesc: "headerSortDown", - cssChildRow: "expand-child", - sortInitialOrder: "asc", - sortMultiSortKey: "shiftKey", - sortForce: null, - sortAppend: null, - sortLocaleCompare: true, - textExtraction: "simple", - parsers: {}, widgets: [], - widgetZebra: { - css: ["even", "odd"] - }, headers: {}, widthFixed: false, - cancelSelection: true, - sortList: [], - headerList: [], - dateFormat: "us", - decimal: '/\.|\,/g', - onRenderHeader: null, - selectorHeaders: 'thead th', - debug: false - }; - - /* debuging utils */ - - function benchmark(s, d) { - log(s + "," + (new Date().getTime() - d.getTime()) + "ms"); - } - - this.benchmark = benchmark; - - function log(s) { - if (typeof console != "undefined" && typeof console.debug != "undefined") { - console.log(s); - } else { - alert(s); - } - } - - /* parsers utils */ - - function buildParserCache(table, $headers) { - - if (table.config.debug) { - var parsersDebug = ""; - } - - if (table.tBodies.length == 0) return; // In the case of empty tables - var rows = table.tBodies[0].rows; - - if (rows[0]) { - - var list = [], - cells = rows[0].cells, - l = cells.length; - - for (var i = 0; i < l; i++) { - - var p = false; - - if ($.metadata && ($($headers[i]).metadata() && $($headers[i]).metadata().sorter)) { - - p = getParserById($($headers[i]).metadata().sorter); - - } else if ((table.config.headers[i] && table.config.headers[i].sorter)) { - - p = getParserById(table.config.headers[i].sorter); - } - if (!p) { - - p = detectParserForColumn(table, rows, -1, i); - } - - if (table.config.debug) { - parsersDebug += "column:" + i + " parser:" + p.id + "\n"; - } - - list.push(p); - } - } - - if (table.config.debug) { - log(parsersDebug); - } - - return list; - }; - - function detectParserForColumn(table, rows, rowIndex, cellIndex) { - var l = parsers.length, - node = false, - nodeValue = false, - keepLooking = true; - while (nodeValue == '' && keepLooking) { - rowIndex++; - if (rows[rowIndex]) { - node = getNodeFromRowAndCellIndex(rows, rowIndex, cellIndex); - nodeValue = trimAndGetNodeText(table.config, node); - if (table.config.debug) { - log('Checking if value was empty on row:' + rowIndex); - } - } else { - keepLooking = false; - } - } - for (var i = 1; i < l; i++) { - if (parsers[i].is(nodeValue, table, node)) { - return parsers[i]; - } - } - // 0 is always the generic parser (text) - return parsers[0]; - } - - function getNodeFromRowAndCellIndex(rows, rowIndex, cellIndex) { - return rows[rowIndex].cells[cellIndex]; - } - - function trimAndGetNodeText(config, node) { - return $.trim(getElementText(config, node)); - } - - function getParserById(name) { - var l = parsers.length; - for (var i = 0; i < l; i++) { - if (parsers[i].id.toLowerCase() == name.toLowerCase()) { - return parsers[i]; - } - } - return false; - } - - /* utils */ - - function buildCache(table) { - - if (table.config.debug) { - var cacheTime = new Date(); - } - - var totalRows = (table.tBodies[0] && table.tBodies[0].rows.length) || 0, - totalCells = (table.tBodies[0].rows[0] && table.tBodies[0].rows[0].cells.length) || 0, - parsers = table.config.parsers, - cache = { - row: [], - normalized: [] - }; - - for (var i = 0; i < totalRows; ++i) { - - /** Add the table data to main data array */ - var c = $(table.tBodies[0].rows[i]), - cols = []; - - // if this is a child row, add it to the last row's children and - // continue to the next row - if (c.hasClass(table.config.cssChildRow)) { - cache.row[cache.row.length - 1] = cache.row[cache.row.length - 1].add(c); - // go to the next for loop - continue; - } - - cache.row.push(c); - - for (var j = 0; j < totalCells; ++j) { - cols.push(parsers[j].format(getElementText(table.config, c[0].cells[j]), table, c[0].cells[j])); - } - - cols.push(cache.normalized.length); // add position for rowCache - cache.normalized.push(cols); - cols = null; - }; - - if (table.config.debug) { - benchmark("Building cache for " + totalRows + " rows:", cacheTime); - } - - return cache; - }; - - function getElementText(config, node) { - - var text = ""; - - if (!node) return ""; - - if (!config.supportsTextContent) config.supportsTextContent = node.textContent || false; - - if (config.textExtraction == "simple") { - if (config.supportsTextContent) { - text = node.textContent; - } else { - if (node.childNodes[0] && node.childNodes[0].hasChildNodes()) { - text = node.childNodes[0].innerHTML; - } else { - text = node.innerHTML; - } - } - } else { - if (typeof(config.textExtraction) == "function") { - text = config.textExtraction(node); - } else { - text = $(node).text(); - } - } - return text; - } - - function appendToTable(table, cache) { - - if (table.config.debug) { - var appendTime = new Date() - } - - var c = cache, - r = c.row, - n = c.normalized, - totalRows = n.length, - checkCell = (n[0].length - 1), - tableBody = $(table.tBodies[0]), - rows = []; - - - for (var i = 0; i < totalRows; i++) { - var pos = n[i][checkCell]; - - rows.push(r[pos]); - - if (!table.config.appender) { - - //var o = ; - var l = r[pos].length; - for (var j = 0; j < l; j++) { - tableBody[0].appendChild(r[pos][j]); - } - - // - } - } - - - - if (table.config.appender) { - - table.config.appender(table, rows); - } - - rows = null; - - if (table.config.debug) { - benchmark("Rebuilt table:", appendTime); - } - - // apply table widgets - applyWidget(table); - - // trigger sortend - setTimeout(function () { - $(table).trigger("sortEnd"); - }, 0); - - }; - - function buildHeaders(table) { - - if (table.config.debug) { - var time = new Date(); - } - - var meta = ($.metadata) ? true : false; - - var header_index = computeTableHeaderCellIndexes(table); - - $tableHeaders = $(table.config.selectorHeaders, table).each(function (index) { - - this.column = header_index[this.parentNode.rowIndex + "-" + this.cellIndex]; - // this.column = index; - this.order = formatSortingOrder(table.config.sortInitialOrder); - - - this.count = this.order; - - if (checkHeaderMetadata(this) || checkHeaderOptions(table, index)) this.sortDisabled = true; - if (checkHeaderOptionsSortingLocked(table, index)) this.order = this.lockedOrder = checkHeaderOptionsSortingLocked(table, index); - - if (!this.sortDisabled) { - var $th = $(this).addClass(table.config.cssHeader); - if (table.config.onRenderHeader) table.config.onRenderHeader.apply($th); - } - - // add cell to headerList - table.config.headerList[index] = this; - }); - - if (table.config.debug) { - benchmark("Built headers:", time); - log($tableHeaders); - } - - return $tableHeaders; - - }; - - // from: - // http://www.javascripttoolbox.com/lib/table/examples.php - // http://www.javascripttoolbox.com/temp/table_cellindex.html - - - function computeTableHeaderCellIndexes(t) { - var matrix = []; - var lookup = {}; - var thead = t.getElementsByTagName('THEAD')[0]; - var trs = thead.getElementsByTagName('TR'); - - for (var i = 0; i < trs.length; i++) { - var cells = trs[i].cells; - for (var j = 0; j < cells.length; j++) { - var c = cells[j]; - - var rowIndex = c.parentNode.rowIndex; - var cellId = rowIndex + "-" + c.cellIndex; - var rowSpan = c.rowSpan || 1; - var colSpan = c.colSpan || 1 - var firstAvailCol; - if (typeof(matrix[rowIndex]) == "undefined") { - matrix[rowIndex] = []; - } - // Find first available column in the first row - for (var k = 0; k < matrix[rowIndex].length + 1; k++) { - if (typeof(matrix[rowIndex][k]) == "undefined") { - firstAvailCol = k; - break; - } - } - lookup[cellId] = firstAvailCol; - for (var k = rowIndex; k < rowIndex + rowSpan; k++) { - if (typeof(matrix[k]) == "undefined") { - matrix[k] = []; - } - var matrixrow = matrix[k]; - for (var l = firstAvailCol; l < firstAvailCol + colSpan; l++) { - matrixrow[l] = "x"; - } - } - } - } - return lookup; - } - - function checkCellColSpan(table, rows, row) { - var arr = [], - r = table.tHead.rows, - c = r[row].cells; - - for (var i = 0; i < c.length; i++) { - var cell = c[i]; - - if (cell.colSpan > 1) { - arr = arr.concat(checkCellColSpan(table, headerArr, row++)); - } else { - if (table.tHead.length == 1 || (cell.rowSpan > 1 || !r[row + 1])) { - arr.push(cell); - } - // headerArr[row] = (i+row); - } - } - return arr; - }; - - function checkHeaderMetadata(cell) { - if (($.metadata) && ($(cell).metadata().sorter === false)) { - return true; - }; - return false; - } - - function checkHeaderOptions(table, i) { - if ((table.config.headers[i]) && (table.config.headers[i].sorter === false)) { - return true; - }; - return false; - } - - function checkHeaderOptionsSortingLocked(table, i) { - if ((table.config.headers[i]) && (table.config.headers[i].lockedOrder)) return table.config.headers[i].lockedOrder; - return false; - } - - function applyWidget(table) { - var c = table.config.widgets; - var l = c.length; - for (var i = 0; i < l; i++) { - - getWidgetById(c[i]).format(table); - } - - } - - function getWidgetById(name) { - var l = widgets.length; - for (var i = 0; i < l; i++) { - if (widgets[i].id.toLowerCase() == name.toLowerCase()) { - return widgets[i]; - } - } - }; - - function formatSortingOrder(v) { - if (typeof(v) != "Number") { - return (v.toLowerCase() == "desc") ? 1 : 0; - } else { - return (v == 1) ? 1 : 0; - } - } - - function isValueInArray(v, a) { - var l = a.length; - for (var i = 0; i < l; i++) { - if (a[i][0] == v) { - return true; - } - } - return false; - } - - function setHeadersCss(table, $headers, list, css) { - // remove all header information - $headers.removeClass(css[0]).removeClass(css[1]); - - var h = []; - $headers.each(function (offset) { - if (!this.sortDisabled) { - h[this.column] = $(this); - } - }); - - var l = list.length; - for (var i = 0; i < l; i++) { - h[list[i][0]].addClass(css[list[i][1]]); - } - } - - function fixColumnWidth(table, $headers) { - var c = table.config; - if (c.widthFixed) { - var colgroup = $(''); - $("tr:first td", table.tBodies[0]).each(function () { - colgroup.append($('').css('width', $(this).width())); - }); - $(table).prepend(colgroup); - }; - } - - function updateHeaderSortCount(table, sortList) { - var c = table.config, - l = sortList.length; - for (var i = 0; i < l; i++) { - var s = sortList[i], - o = c.headerList[s[0]]; - o.count = s[1]; - o.count++; - } - } - - /* sorting methods */ - - function multisort(table, sortList, cache) { - - if (table.config.debug) { - var sortTime = new Date(); - } - - var dynamicExp = "var sortWrapper = function(a,b) {", - l = sortList.length; - - // TODO: inline functions. - for (var i = 0; i < l; i++) { - - var c = sortList[i][0]; - var order = sortList[i][1]; - // var s = (getCachedSortType(table.config.parsers,c) == "text") ? - // ((order == 0) ? "sortText" : "sortTextDesc") : ((order == 0) ? - // "sortNumeric" : "sortNumericDesc"); - // var s = (table.config.parsers[c].type == "text") ? ((order == 0) - // ? makeSortText(c) : makeSortTextDesc(c)) : ((order == 0) ? - // makeSortNumeric(c) : makeSortNumericDesc(c)); - var s = (table.config.parsers[c].type == "text") ? ((order == 0) ? makeSortFunction("text", "asc", c) : makeSortFunction("text", "desc", c)) : ((order == 0) ? makeSortFunction("numeric", "asc", c) : makeSortFunction("numeric", "desc", c)); - var e = "e" + i; - - dynamicExp += "var " + e + " = " + s; // + "(a[" + c + "],b[" + c - // + "]); "; - dynamicExp += "if(" + e + ") { return " + e + "; } "; - dynamicExp += "else { "; - - } - - // if value is the same keep orignal order - var orgOrderCol = cache.normalized[0].length - 1; - dynamicExp += "return a[" + orgOrderCol + "]-b[" + orgOrderCol + "];"; - - for (var i = 0; i < l; i++) { - dynamicExp += "}; "; - } - - dynamicExp += "return 0; "; - dynamicExp += "}; "; - - if (table.config.debug) { - benchmark("Evaling expression:" + dynamicExp, new Date()); - } - - eval(dynamicExp); - - cache.normalized.sort(sortWrapper); - - if (table.config.debug) { - benchmark("Sorting on " + sortList.toString() + " and dir " + order + " time:", sortTime); - } - - return cache; - }; - - function makeSortFunction(type, direction, index) { - var a = "a[" + index + "]", - b = "b[" + index + "]"; - if (type == 'text' && direction == 'asc') { - return "(" + a + " == " + b + " ? 0 : (" + a + " === null ? Number.POSITIVE_INFINITY : (" + b + " === null ? Number.NEGATIVE_INFINITY : (" + a + " < " + b + ") ? -1 : 1 )));"; - } else if (type == 'text' && direction == 'desc') { - return "(" + a + " == " + b + " ? 0 : (" + a + " === null ? Number.POSITIVE_INFINITY : (" + b + " === null ? Number.NEGATIVE_INFINITY : (" + b + " < " + a + ") ? -1 : 1 )));"; - } else if (type == 'numeric' && direction == 'asc') { - return "(" + a + " === null && " + b + " === null) ? 0 :(" + a + " === null ? Number.POSITIVE_INFINITY : (" + b + " === null ? Number.NEGATIVE_INFINITY : " + a + " - " + b + "));"; - } else if (type == 'numeric' && direction == 'desc') { - return "(" + a + " === null && " + b + " === null) ? 0 :(" + a + " === null ? Number.POSITIVE_INFINITY : (" + b + " === null ? Number.NEGATIVE_INFINITY : " + b + " - " + a + "));"; - } - }; - - function makeSortText(i) { - return "((a[" + i + "] < b[" + i + "]) ? -1 : ((a[" + i + "] > b[" + i + "]) ? 1 : 0));"; - }; - - function makeSortTextDesc(i) { - return "((b[" + i + "] < a[" + i + "]) ? -1 : ((b[" + i + "] > a[" + i + "]) ? 1 : 0));"; - }; - - function makeSortNumeric(i) { - return "a[" + i + "]-b[" + i + "];"; - }; - - function makeSortNumericDesc(i) { - return "b[" + i + "]-a[" + i + "];"; - }; - - function sortText(a, b) { - if (table.config.sortLocaleCompare) return a.localeCompare(b); - return ((a < b) ? -1 : ((a > b) ? 1 : 0)); - }; - - function sortTextDesc(a, b) { - if (table.config.sortLocaleCompare) return b.localeCompare(a); - return ((b < a) ? -1 : ((b > a) ? 1 : 0)); - }; - - function sortNumeric(a, b) { - return a - b; - }; - - function sortNumericDesc(a, b) { - return b - a; - }; - - function getCachedSortType(parsers, i) { - return parsers[i].type; - }; /* public methods */ - this.construct = function (settings) { - return this.each(function () { - // if no thead or tbody quit. - if (!this.tHead || !this.tBodies) return; - // declare - var $this, $document, $headers, cache, config, shiftDown = 0, - sortOrder; - // new blank config object - this.config = {}; - // merge and extend. - config = $.extend(this.config, $.tablesorter.defaults, settings); - // store common expression for speed - $this = $(this); - // save the settings where they read - $.data(this, "tablesorter", config); - // build headers - $headers = buildHeaders(this); - // try to auto detect column type, and store in tables config - this.config.parsers = buildParserCache(this, $headers); - // build the cache for the tbody cells - cache = buildCache(this); - // get the css class names, could be done else where. - var sortCSS = [config.cssDesc, config.cssAsc]; - // fixate columns if the users supplies the fixedWidth option - fixColumnWidth(this); - // apply event handling to headers - // this is to big, perhaps break it out? - $headers.click( - - function (e) { - var totalRows = ($this[0].tBodies[0] && $this[0].tBodies[0].rows.length) || 0; - if (!this.sortDisabled && totalRows > 0) { - // Only call sortStart if sorting is - // enabled. - $this.trigger("sortStart"); - // store exp, for speed - var $cell = $(this); - // get current column index - var i = this.column; - // get current column sort order - this.order = this.count++ % 2; - // always sort on the locked order. - if(this.lockedOrder) this.order = this.lockedOrder; - - // user only whants to sort on one - // column - if (!e[config.sortMultiSortKey]) { - // flush the sort list - config.sortList = []; - if (config.sortForce != null) { - var a = config.sortForce; - for (var j = 0; j < a.length; j++) { - if (a[j][0] != i) { - config.sortList.push(a[j]); - } - } - } - // add column to sort list - config.sortList.push([i, this.order]); - // multi column sorting - } else { - // the user has clicked on an all - // ready sortet column. - if (isValueInArray(i, config.sortList)) { - // revers the sorting direction - // for all tables. - for (var j = 0; j < config.sortList.length; j++) { - var s = config.sortList[j], - o = config.headerList[s[0]]; - if (s[0] == i) { - o.count = s[1]; - o.count++; - s[1] = o.count % 2; - } - } - } else { - // add column to sort list array - config.sortList.push([i, this.order]); - } - }; - setTimeout(function () { - // set css for headers - setHeadersCss($this[0], $headers, config.sortList, sortCSS); - appendToTable( - $this[0], multisort( - $this[0], config.sortList, cache) - ); - }, 1); - // stop normal event by returning false - return false; - } - // cancel selection - }).mousedown(function () { - if (config.cancelSelection) { - this.onselectstart = function () { - return false - }; - return false; - } - }); - // apply easy methods that trigger binded events - $this.bind("update", function () { - var me = this; - setTimeout(function () { - // rebuild parsers. - me.config.parsers = buildParserCache( - me, $headers); - // rebuild the cache map - cache = buildCache(me); - }, 1); - }).bind("updateCell", function (e, cell) { - var config = this.config; - // get position from the dom. - var pos = [(cell.parentNode.rowIndex - 1), cell.cellIndex]; - // update cache - cache.normalized[pos[0]][pos[1]] = config.parsers[pos[1]].format( - getElementText(config, cell), cell); - }).bind("sorton", function (e, list) { - $(this).trigger("sortStart"); - config.sortList = list; - // update and store the sortlist - var sortList = config.sortList; - // update header count index - updateHeaderSortCount(this, sortList); - // set css for headers - setHeadersCss(this, $headers, sortList, sortCSS); - // sort the table and append it to the dom - appendToTable(this, multisort(this, sortList, cache)); - }).bind("appendCache", function () { - appendToTable(this, cache); - }).bind("applyWidgetId", function (e, id) { - getWidgetById(id).format(this); - }).bind("applyWidgets", function () { - // apply widgets - applyWidget(this); - }); - if ($.metadata && ($(this).metadata() && $(this).metadata().sortlist)) { - config.sortList = $(this).metadata().sortlist; - } - // if user has supplied a sort list to constructor. - if (config.sortList.length > 0) { - $this.trigger("sorton", [config.sortList]); - } - // apply widgets - applyWidget(this); - }); - }; - this.addParser = function (parser) { - var l = parsers.length, - a = true; - for (var i = 0; i < l; i++) { - if (parsers[i].id.toLowerCase() == parser.id.toLowerCase()) { - a = false; - } - } - if (a) { - parsers.push(parser); - }; - }; - this.addWidget = function (widget) { - widgets.push(widget); - }; - this.formatFloat = function (s) { - var i = parseFloat(s); - return (isNaN(i)) ? 0 : i; - }; - this.formatInt = function (s) { - var i = parseInt(s); - return (isNaN(i)) ? 0 : i; - }; - this.isDigit = function (s, config) { - // replace all an wanted chars and match. - return /^[-+]?\d*$/.test($.trim(s.replace(/[,.']/g, ''))); - }; - this.clearTableBody = function (table) { - if ($.browser.msie) { - function empty() { - while (this.firstChild) - this.removeChild(this.firstChild); - } - empty.apply(table.tBodies[0]); - } else { - table.tBodies[0].innerHTML = ""; - } - }; - } - }); - - // extend plugin scope - $.fn.extend({ - tablesorter: $.tablesorter.construct - }); - - // make shortcut - var ts = $.tablesorter; - - // add default parsers - ts.addParser({ - id: "text", - is: function (s) { - return true; - }, format: function (s) { - return $.trim(s.toLocaleLowerCase()); - }, type: "text" - }); - - ts.addParser({ - id: "digit", - is: function (s, table) { - var c = table.config; - return $.tablesorter.isDigit(s, c); - }, format: function (s) { - return $.tablesorter.formatFloat(s); - }, type: "numeric" - }); - - ts.addParser({ - id: "currency", - is: function (s) { - return /^[£$€?.]/.test(s); - }, format: function (s) { - return $.tablesorter.formatFloat(s.replace(new RegExp(/[£$€]/g), "")); - }, type: "numeric" - }); - - ts.addParser({ - id: "ipAddress", - is: function (s) { - return /^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s); - }, format: function (s) { - var a = s.split("."), - r = "", - l = a.length; - for (var i = 0; i < l; i++) { - var item = a[i]; - if (item.length == 2) { - r += "0" + item; - } else { - r += item; - } - } - return $.tablesorter.formatFloat(r); - }, type: "numeric" - }); - - ts.addParser({ - id: "url", - is: function (s) { - return /^(https?|ftp|file):\/\/$/.test(s); - }, format: function (s) { - return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//), '')); - }, type: "text" - }); - - ts.addParser({ - id: "isoDate", - is: function (s) { - return /^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s); - }, format: function (s) { - return $.tablesorter.formatFloat((s != "") ? new Date(s.replace( - new RegExp(/-/g), "/")).getTime() : "0"); - }, type: "numeric" - }); - - ts.addParser({ - id: "percent", - is: function (s) { - return /\%$/.test($.trim(s)); - }, format: function (s) { - return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g), "")); - }, type: "numeric" - }); - - ts.addParser({ - id: "usLongDate", - is: function (s) { - return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/)); - }, format: function (s) { - return $.tablesorter.formatFloat(new Date(s).getTime()); - }, type: "numeric" - }); - - ts.addParser({ - id: "shortDate", - is: function (s) { - return /\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s); - }, format: function (s, table) { - var c = table.config; - s = s.replace(/\-/g, "/"); - if (c.dateFormat == "us") { - // reformat the string in ISO format - s = s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/, "$3/$1/$2"); - } else if (c.dateFormat == "uk") { - // reformat the string in ISO format - s = s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/, "$3/$2/$1"); - } else if (c.dateFormat == "dd/mm/yy" || c.dateFormat == "dd-mm-yy") { - s = s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/, "$1/$2/$3"); - } - return $.tablesorter.formatFloat(new Date(s).getTime()); - }, type: "numeric" - }); - ts.addParser({ - id: "time", - is: function (s) { - return /^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s); - }, format: function (s) { - return $.tablesorter.formatFloat(new Date("2000/01/01 " + s).getTime()); - }, type: "numeric" - }); - ts.addParser({ - id: "metadata", - is: function (s) { - return false; - }, format: function (s, table, cell) { - var c = table.config, - p = (!c.parserMetadataName) ? 'sortValue' : c.parserMetadataName; - return $(cell).metadata()[p]; - }, type: "numeric" - }); - // add default widgets - ts.addWidget({ - id: "zebra", - format: function (table) { - if (table.config.debug) { - var time = new Date(); - } - var $tr, row = -1, - odd; - // loop through the visible rows - $("tr:visible", table.tBodies[0]).each(function (i) { - $tr = $(this); - // style children rows the same way the parent - // row was styled - if (!$tr.hasClass(table.config.cssChildRow)) row++; - odd = (row % 2 == 0); - $tr.removeClass( - table.config.widgetZebra.css[odd ? 0 : 1]).addClass( - table.config.widgetZebra.css[odd ? 1 : 0]) - }); - if (table.config.debug) { - $.tablesorter.benchmark("Applying Zebra widget", time); - } - } - }); -})(jQuery); \ No newline at end of file diff --git a/src/lib/flask_debugtoolbar/static/js/toolbar.js b/src/lib/flask_debugtoolbar/static/js/toolbar.js deleted file mode 100644 index a1b80d0..0000000 --- a/src/lib/flask_debugtoolbar/static/js/toolbar.js +++ /dev/null @@ -1,193 +0,0 @@ -(function($) { - $.cookie = function(name, value, options) { if (typeof value != 'undefined') { options = options || {}; if (value === null) { value = ''; options.expires = -1; } var expires = ''; if (options.expires && (typeof options.expires == 'number' || options.expires.toUTCString)) { var date; if (typeof options.expires == 'number') { date = new Date(); date.setTime(date.getTime() + (options.expires * 24 * 60 * 60 * 1000)); } else { date = options.expires; } expires = '; expires=' + date.toUTCString(); } var path = options.path ? '; path=' + (options.path) : ''; var domain = options.domain ? '; domain=' + (options.domain) : ''; var secure = options.secure ? '; secure' : ''; document.cookie = [name, '=', encodeURIComponent(value), expires, path, domain, secure].join(''); } else { var cookieValue = null; if (document.cookie && document.cookie != '') { var cookies = document.cookie.split(';'); for (var i = 0; i < cookies.length; i++) { var cookie = $.trim(cookies[i]); if (cookie.substring(0, name.length + 1) == (name + '=')) { cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); break; } } } return cookieValue; } }; - $('head').append(''); - var COOKIE_NAME = 'fldt'; - var COOKIE_NAME_ACTIVE = COOKIE_NAME +'_active'; - var fldt = { - init: function() { - $('#flDebug').show(); - var current = null; - $('#flDebugPanelList li a').click(function() { - if (!this.className) { - return false; - } - current = $('#flDebug #' + this.className + '-content'); - if (current.is(':visible')) { - $(document).trigger('close.flDebug'); - $(this).parent().removeClass('active'); - } else { - $('.panelContent').hide(); // Hide any that are already open - current.show(); - $('#flDebugToolbar li').removeClass('active'); - $(this).parent().addClass('active'); - } - return false; - }); - $('#flDebugPanelList li .switch').click(function() { - var $panel = $(this).parent(); - var $this = $(this); - var dom_id = $panel.attr('id'); - - // Turn cookie content into an array of active panels - var active_str = $.cookie(COOKIE_NAME_ACTIVE); - var active = (active_str) ? active_str.split(';') : []; - active = $.grep(active, function(n,i) { return n != dom_id; }); - - if ($this.hasClass('active')) { - $this.removeClass('active'); - $this.addClass('inactive'); - } - else { - active.push(dom_id); - $this.removeClass('inactive'); - $this.addClass('active'); - } - - if (active.length > 0) { - $.cookie(COOKIE_NAME_ACTIVE, active.join(';'), { - path: '/', expires: 10 - }); - } - else { - $.cookie(COOKIE_NAME_ACTIVE, null, { - path: '/', expires: -1 - }); - } - }); - $('#flDebug a.flDebugClose').click(function() { - $(document).trigger('close.flDebug'); - $('#flDebugToolbar li').removeClass('active'); - return false; - }); - $('#flDebug a.remoteCall').click(function() { - $('#flDebugWindow').load(this.href, {}, function() { - $('#flDebugWindow a.flDebugBack').click(function() { - $(this).parent().parent().hide(); - return false; - }); - }); - $('#flDebugWindow').show(); - return false; - }); - $('#flDebugTemplatePanel a.flTemplateShowContext').click(function() { - fldt.toggle_arrow($(this).children('.toggleArrow')) - fldt.toggle_content($(this).parent().next()); - return false; - }); - $('#flDebugSQLPanel a.flSQLShowStacktrace').click(function() { - fldt.toggle_content($('.flSQLHideStacktraceDiv', $(this).parents('tr'))); - return false; - }); - $('#flHideToolBarButton').click(function() { - fldt.hide_toolbar(true); - return false; - }); - $('#flShowToolBarButton').click(function() { - fldt.show_toolbar(); - return false; - }); - $(document).bind('close.flDebug', function() { - // If a sub-panel is open, close that - if ($('#flDebugWindow').is(':visible')) { - $('#flDebugWindow').hide(); - return; - } - // If a panel is open, close that - if ($('.panelContent').is(':visible')) { - $('.panelContent').hide(); - return; - } - // Otherwise, just minimize the toolbar - if ($('#flDebugToolbar').is(':visible')) { - fldt.hide_toolbar(true); - return; - } - }); - if ($.cookie(COOKIE_NAME)) { - fldt.hide_toolbar(false); - } else { - fldt.show_toolbar(false); - } - $('#flDebug table.tablesorter') - .tablesorter() - .bind('sortStart', function() { - $(this).find('tbody tr') - .removeClass('flDebugEven') - .removeClass('flDebugOdd'); - }) - .bind('sortEnd', function() { - $(this).find('tbody tr').each(function(idx, elem) { - var even = idx % 2 == 0; - $(elem) - .toggleClass('flDebugEven', even) - .toggleClass('flDebugOdd', !even); - }); - }); - }, - toggle_content: function(elem) { - if (elem.is(':visible')) { - elem.hide(); - } else { - elem.show(); - } - }, - close: function() { - $(document).trigger('close.flDebug'); - return false; - }, - hide_toolbar: function(setCookie) { - // close any sub panels - $('#flDebugWindow').hide(); - // close all panels - $('.panelContent').hide(); - $('#flDebugToolbar li').removeClass('active'); - // finally close toolbar - $('#flDebugToolbar').hide('fast'); - $('#flDebugToolbarHandle').show(); - // Unbind keydown - $(document).unbind('keydown.flDebug'); - if (setCookie) { - $.cookie(COOKIE_NAME, 'hide', { - path: '/', - expires: 10 - }); - } - }, - show_toolbar: function(animate) { - // Set up keybindings - $(document).bind('keydown.flDebug', function(e) { - if (e.keyCode == 27) { - fldt.close(); - } - }); - $('#flDebugToolbarHandle').hide(); - if (animate) { - $('#flDebugToolbar').show('fast'); - } else { - $('#flDebugToolbar').show(); - } - $.cookie(COOKIE_NAME, null, { - path: '/', - expires: -1 - }); - }, - toggle_arrow: function(elem) { - var uarr = String.fromCharCode(0x25b6); - var darr = String.fromCharCode(0x25bc); - elem.html(elem.html() == uarr ? darr : uarr); - }, - load_href: function(href) { - $.get(href, function(data, status, xhr) { - document.open(); - document.write(xhr.responseText); - document.close(); - }); - return false; - } - }; - $(document).ready(function() { - fldt.init(); - }); - window.fldt = fldt; - -})(jQuery.noConflict(true)); diff --git a/src/lib/flask_debugtoolbar/templates/base.html b/src/lib/flask_debugtoolbar/templates/base.html deleted file mode 100644 index 0f15c74..0000000 --- a/src/lib/flask_debugtoolbar/templates/base.html +++ /dev/null @@ -1,56 +0,0 @@ - diff --git a/src/lib/flask_debugtoolbar/templates/panels/headers.html b/src/lib/flask_debugtoolbar/templates/panels/headers.html deleted file mode 100644 index d373add..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/headers.html +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - {% for key, value in headers.iteritems() %} - - - - - {% endfor %} - -
KeyValue
{{ key|escape }}{{ value|escape }}
diff --git a/src/lib/flask_debugtoolbar/templates/panels/logger.html b/src/lib/flask_debugtoolbar/templates/panels/logger.html deleted file mode 100644 index e4e7673..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/logger.html +++ /dev/null @@ -1,26 +0,0 @@ -{% if records %} - - - - - - - - - - - {% for record in records %} - - - - - - - {% endfor %} - -
LevelTimeMessageLocation
{{ record.level }}{{ record.time }}{{ record.message }}{{ record.file }}:{{ record.line }}
-{% else %} -

No messages logged.

-{% endif %} - - diff --git a/src/lib/flask_debugtoolbar/templates/panels/profiler.html b/src/lib/flask_debugtoolbar/templates/panels/profiler.html deleted file mode 100644 index 59ba34a..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/profiler.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - {% for row in function_calls %} - - - - - - - - - {% endfor %} - -
CallsTotal Time (ms)Per Call (ms)Cumulative Time (ms)Per Call (ms)Function
{{ row.ncalls }}{{ row.tottime }}{{ '%.4f'|format(row.percall) }}{{ row.cumtime }}{{ '%.4f'|format(row.percall_cum) }}{{ row.filename|escape }}
- diff --git a/src/lib/flask_debugtoolbar/templates/panels/request_vars.html b/src/lib/flask_debugtoolbar/templates/panels/request_vars.html deleted file mode 100644 index dad3719..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/request_vars.html +++ /dev/null @@ -1,104 +0,0 @@ - -

View information

- - - - - - - - - - - - - - - -
View Functionargskwargs
{{ view_func }}{{ view_args|default("None") }} - {% if view_kwargs.items() %} - {% for k, v in view_kwargs.items() %} - {{ k }}={{ v }}{% if not loop.last %}, {% endif %} - {% endfor %} - {% else %} - None - {% endif %} -
- -{% macro show_map(map) %} - - - - - - - - - - - - - {% for key, value in map %} - - - - - {% endfor %} - -
VariableValue
{{ key|printable }}{{ value|printable }}
-{% endmacro %} - - -

COOKIES Variables

-{% if cookies %} - {{ show_map(cookies) }} -{% else %} -

No COOKIE data

-{% endif %} - -

SESSION Variables

-{% if session %} - {{ show_map(session) }} -{% else %} -

No SESSION data

-{% endif %} - - -{% macro show_multi_map(map) %} - - - - - - - - - {% for key, value in map %} - - - - - {% endfor %} - -
VariableValue
{{ key|printable }} - {%- set sep = joiner() -%} - {%- for v in value -%} - {{ sep() }}{{ v|printable }} - {%- endfor -%} -
-{% endmacro %} - - -

GET Variables

-{% if get %} - {{ show_multi_map(get) }} -{% else %} -

No GET data

-{% endif %} - -

POST Variables

-{% if post %} - {{ show_multi_map(post) }} -{% else %} -

No POST data

-{% endif %} diff --git a/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy.html b/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy.html deleted file mode 100644 index 4e86fa6..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy.html +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - {% for query in queries %} - - - - - - - {% endfor %} - -
 (ms)ActionContextQuery
{{ '%.4f'|format(query.duration) }} - {% if query.params %} - {% if query.is_select %} - SELECT
- EXPLAIN
- {% if is_mysql %} - PROFILE
- {% endif %} - {% endif %} - {% endif %} -
- {{ query.context }} - -
-
{{ query.sql|safe }}
- {# - {% if query.stacktrace %} - - {% endif %} - - #} -
-
diff --git a/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy_explain.html b/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy_explain.html deleted file mode 100644 index 05df296..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy_explain.html +++ /dev/null @@ -1,33 +0,0 @@ -
- Back -

SQL Explained

-
-
-
-
-
Executed SQL
-
{{ sql|safe }}
-
Time
-
{{ '%.4f'|format(duration) }} ms
-
- - - - {% for h in headers %} - - {% endfor %} - - - - {% for row in result %} - - {% for column in row %} - - {% endfor %} - - {% endfor %} - -
{{ h|upper }}
{{ column }}
-
-
- diff --git a/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy_select.html b/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy_select.html deleted file mode 100644 index e7ec035..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/sqlalchemy_select.html +++ /dev/null @@ -1,37 +0,0 @@ -
- Back -

SQL Explained

-
-
-
-
-
Executed SQL
-
{{ sql|safe }}
-
Time
-
{{ '%.4f'|format(duration) }} ms
-
- {% if result %} - - - - {% for h in headers %} - - {% endfor %} - - - - {% for row in result %} - - {% for column in row %} - - {% endfor %} - - {% endfor %} - -
{{ h|upper }}
{{ column }}
- {% else %} -

Empty set

- {% endif %} -
-
- diff --git a/src/lib/flask_debugtoolbar/templates/panels/template.html b/src/lib/flask_debugtoolbar/templates/panels/template.html deleted file mode 100644 index 3505e89..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/template.html +++ /dev/null @@ -1,26 +0,0 @@ -{% if templates %} - {% if editable %} - Edit templates - {% endif %} - {% for template in templates %} -

{{ template.template.name }}

- - - - - - - - - {% for k, v in template.context|dictsort %} - - - - - {% endfor %} - -
VariableValue
{{ k }}{{ v|printable }}
- {% endfor %} -{% else %} -

No template rendered

-{% endif %} diff --git a/src/lib/flask_debugtoolbar/templates/panels/template_editor.html b/src/lib/flask_debugtoolbar/templates/panels/template_editor.html deleted file mode 100644 index f654aa6..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/template_editor.html +++ /dev/null @@ -1,154 +0,0 @@ - - - - - - - - - - - Editing {{ templates[0].name }} - - - - - - - -
-
-
- - -
-
- -
-
-
- - - - - - - - - - - - - diff --git a/src/lib/flask_debugtoolbar/templates/panels/timer.html b/src/lib/flask_debugtoolbar/templates/panels/timer.html deleted file mode 100644 index 8b69e67..0000000 --- a/src/lib/flask_debugtoolbar/templates/panels/timer.html +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - - - - - - - {% for key, value in rows %} - - - - - {% endfor %} - -
ResourceValue
{{ key|escape }}{{ value|escape }}
- diff --git a/src/lib/flask_debugtoolbar/templates/redirect.html b/src/lib/flask_debugtoolbar/templates/redirect.html deleted file mode 100644 index 6f49fe5..0000000 --- a/src/lib/flask_debugtoolbar/templates/redirect.html +++ /dev/null @@ -1,15 +0,0 @@ - - - Redirect intercepted - - -

Redirect ({{ redirect_code }})

-

Location: {{ redirect_to }}

-

- The Flask Debug Toolbar has intercepted a redirect to the above URL for - debug viewing purposes. You can click the above link to continue with the - redirect as normal. If you'd like to disable this feature, you can set the - config variable DEBUG_TB_INTERCEPT_REDIRECTS to False. -

- - diff --git a/src/lib/flask_debugtoolbar/toolbar.py b/src/lib/flask_debugtoolbar/toolbar.py deleted file mode 100644 index 9544b46..0000000 --- a/src/lib/flask_debugtoolbar/toolbar.py +++ /dev/null @@ -1,76 +0,0 @@ -import urllib - -from flask import url_for, current_app - - - -class DebugToolbar(object): - - # default config settings - config = { - 'DEBUG_TB_INTERCEPT_REDIRECTS': True, - 'DEBUG_TB_PANELS': ( - 'flask_debugtoolbar.panels.versions.VersionDebugPanel', - 'flask_debugtoolbar.panels.timer.TimerDebugPanel', - 'flask_debugtoolbar.panels.headers.HeaderDebugPanel', - 'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel', - 'flask_debugtoolbar.panels.template.TemplateDebugPanel', - 'flask_debugtoolbar.panels.sqlalchemy.SQLAlchemyDebugPanel', - 'flask_debugtoolbar.panels.logger.LoggingPanel', - 'flask_debugtoolbar.panels.profiler.ProfilerDebugPanel', - ) - } - - panel_classes = [] - - def __init__(self, request, jinja_env): - self.jinja_env = jinja_env - self.request = request - self.panels = [] - - self.template_context = { - 'static_path': url_for('_debug_toolbar.static', filename='') - } - - self.create_panels() - - @classmethod - def load_panels(cls, app): - cls.config.update(app.config) - - for panel_path in cls.config['DEBUG_TB_PANELS']: - dot = panel_path.rindex('.') - panel_module, panel_classname = panel_path[:dot], panel_path[dot+1:] - - try: - mod = __import__(panel_module, {}, {}, ['']) - except ImportError, e: - app.logger.warning('Disabled %s due to ImportError: %s', panel_classname, e) - continue - panel_class = getattr(mod, panel_classname) - cls.panel_classes.append(panel_class) - - def create_panels(self): - """ - Populate debug panels - """ - activated = self.request.cookies.get('fldt_active', '') - activated = urllib.unquote(activated).split(';') - - for panel_class in self.panel_classes: - panel_instance = panel_class( - context=self.template_context, - jinja_env=self.jinja_env) - - if panel_instance.dom_id() in activated: - panel_instance.is_active = True - self.panels.append(panel_instance) - - def render_toolbar(self): - context = self.template_context.copy() - context.update({'panels': self.panels}) - - template = self.jinja_env.get_template('base.html') - return template.render(**context) - - diff --git a/src/lib/flask_debugtoolbar/utils.py b/src/lib/flask_debugtoolbar/utils.py deleted file mode 100644 index 3f42c6a..0000000 --- a/src/lib/flask_debugtoolbar/utils.py +++ /dev/null @@ -1,56 +0,0 @@ -import os.path -import sys - -try: - from pygments import highlight - from pygments.formatters import HtmlFormatter - from pygments.lexers import SqlLexer - from pygments.styles import get_style_by_name - PYGMENT_STYLE = get_style_by_name('colorful') - HAVE_PYGMENTS = True -except ImportError: - HAVE_PYGMENTS = False - - -from flask import current_app - -def format_fname(value): - # If the value is not an absolute path, the it is a builtin or - # a relative file (thus a project file). - if not os.path.isabs(value): - if value.startswith(('{', '<')): - return value - if value.startswith('.' + os.path.sep): - return value - return '.' + os.path.sep + value - - # If the file is absolute and within the project root handle it as - # a project file - if value.startswith(current_app.root_path): - return "." + value[len(current_app.root_path):] - - # Loop through sys.path to find the longest match and return - # the relative path from there. - paths = sys.path - prefix = None - prefix_len = 0 - for path in sys.path: - new_prefix = os.path.commonprefix([path, value]) - if len(new_prefix) > prefix_len: - prefix = new_prefix - prefix_len = len(prefix) - - if not prefix.endswith(os.path.sep): - prefix_len -= 1 - path = value[prefix_len:] - return '<%s>' % path - -def format_sql(query, args): - if not HAVE_PYGMENTS: - return query - - return highlight( - query, - SqlLexer(encoding='utf-8'), - HtmlFormatter(encoding='utf-8', noclasses=True, style=PYGMENT_STYLE)) - diff --git a/src/lib/flaskext/__init__.py b/src/lib/flaskext/__init__.py deleted file mode 100644 index de40ea7..0000000 --- a/src/lib/flaskext/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__import__('pkg_resources').declare_namespace(__name__) diff --git a/src/lib/flaskext/wtf/__init__.py b/src/lib/flaskext/wtf/__init__.py deleted file mode 100644 index 7fb2ece..0000000 --- a/src/lib/flaskext/wtf/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flaskext.wtf - ~~~~~~~~~~~~ - - Flask-WTF extension - - :copyright: (c) 2010 by Dan Jacob. - :license: BSD, see LICENSE for more details. -""" - -try: - import sqlalchemy - _is_sqlalchemy = True -except ImportError: - _is_sqlalchemy = False - -from wtforms import fields, widgets, validators -from wtforms.fields import * -from wtforms.validators import * -from wtforms.widgets import * -from wtforms import ValidationError - -from flaskext.wtf import html5 -from flaskext.wtf.form import Form -from flaskext.wtf import recaptcha - -from flaskext.wtf.recaptcha.fields import RecaptchaField -from flaskext.wtf.recaptcha.widgets import RecaptchaWidget -from flaskext.wtf.recaptcha.validators import Recaptcha - -fields.RecaptchaField = RecaptchaField -widgets.RecaptchaWidget = RecaptchaWidget -validators.Recaptcha = Recaptcha - -from flaskext.wtf.file import FileField -from flaskext.wtf.file import FileAllowed, FileRequired, file_allowed, \ - file_required - -fields.FileField = FileField - -validators.file_allowed = file_allowed -validators.file_required = file_required -validators.FileAllowed = FileAllowed -validators.FileRequired = FileRequired - - -__all__ = ['Form', 'ValidationError', - 'fields', 'validators', 'widgets', 'html5'] - -__all__ += validators.__all__ -__all__ += fields.__all__ if hasattr(fields, '__all__') else fields.core.__all__ -__all__ += widgets.__all__ if hasattr(widgets, '__all__') else widgets.core.__all__ -__all__ += recaptcha.__all__ - -if _is_sqlalchemy: - from wtforms.ext.sqlalchemy.fields import QuerySelectField, \ - QuerySelectMultipleField - - __all__ += ['QuerySelectField', - 'QuerySelectMultipleField'] - - for field in (QuerySelectField, - QuerySelectMultipleField): - - setattr(fields, field.__name__, field) - diff --git a/src/lib/flaskext/wtf/file.py b/src/lib/flaskext/wtf/file.py deleted file mode 100644 index 8c1910a..0000000 --- a/src/lib/flaskext/wtf/file.py +++ /dev/null @@ -1,72 +0,0 @@ -from werkzeug import FileStorage -from wtforms import FileField as _FileField -from wtforms import ValidationError - -class FileField(_FileField): - """ - Werkzeug-aware subclass of **wtforms.FileField** - - Provides a `has_file()` method to check if its data is a FileStorage - instance with an actual file. - """ - @property - def file(self): - """ - :deprecated: synonym for **data** - """ - return self.data - - def has_file(self): - '''Return True iff self.data is a FileStorage with file data''' - if not isinstance(self.data, FileStorage): - return False - # filename == None => the field was present but no file was entered - # filename == '' is for a werkzeug hack: - # large file uploads will get stored in a temporary file on disk and - # show up as an extra FileStorage with name '' - return self.data.filename not in [None, '', ''] - - -class FileRequired(object): - """ - Validates that field has a file. - - `message` : error message - - You can also use the synonym **file_required**. - """ - - def __init__(self, message=None): - self.message=message - - def __call__(self, form, field): - if not field.has_file(): - raise ValidationError, self.message - -file_required = FileRequired - - -class FileAllowed(object): - """ - Validates that the uploaded file is allowed by the given - Flask-Uploads UploadSet. - - `upload_set` : instance of **flaskext.uploads.UploadSet** - - `message` : error message - - You can also use the synonym **file_allowed**. - """ - - def __init__(self, upload_set, message=None): - self.upload_set = upload_set - self.message = message - - def __call__(self, form, field): - if not field.has_file(): - return - if not self.upload_set.file_allowed(field.data, field.data.filename): - raise ValidationError, self.message - -file_allowed = FileAllowed - diff --git a/src/lib/flaskext/wtf/form.py b/src/lib/flaskext/wtf/form.py deleted file mode 100644 index 239ffa0..0000000 --- a/src/lib/flaskext/wtf/form.py +++ /dev/null @@ -1,120 +0,0 @@ -from jinja2 import Markup -from flask import request, session, current_app -from wtforms.fields import HiddenField -from wtforms.ext.csrf.session import SessionSecureForm - -class _Auto(): - '''Placeholder for unspecified variables that should be set to defaults. - - Used when None is a valid option and should not be replaced by a default. - ''' - pass - -class Form(SessionSecureForm): - - """ - Flask-specific subclass of WTForms **SessionSecureForm** class. - - Flask-specific behaviors: - If formdata is not specified, this will use flask.request.form. Explicitly - pass formdata = None to prevent this. - - csrf_context - a session or dict-like object to use when making CSRF tokens. - Default: flask.session. - - secret_key - a secret key for building CSRF tokens. If this isn't specified, - the form will take the first of these that is defined: - * the SECRET_KEY attribute on this class - * the value of flask.current_app.config["SECRET_KEY"] - * the session's secret_key - If none of these are set, raise an exception. - - csrf_enabled - whether to use CSRF protection. If False, all csrf behavior - is suppressed. Default: check app.config for CSRF_ENABLED, else True - - """ - def __init__(self, formdata=_Auto, obj=None, prefix='', csrf_context=None, - secret_key=None, csrf_enabled=None, *args, **kwargs): - - if csrf_enabled is None: - csrf_enabled = current_app.config.get('CSRF_ENABLED', True) - self.csrf_enabled = csrf_enabled - - if formdata is _Auto: - if self.is_submitted: - formdata = request.form - if request.files: - formdata = formdata.copy() - formdata.update(request.files) - else: - formdata = None - if self.csrf_enabled: - if csrf_context is None: - csrf_context = session - if secret_key is None: - # It wasn't passed in, check if the class has a SECRET_KEY set - secret_key = getattr(self, "SECRET_KEY", None) - if secret_key is None: - # It wasn't on the class, check the application config - secret_key = current_app.config.get("SECRET_KEY") - if secret_key is None and session: - # It's not there either! Is there a session secret key if we can - secret_key = session.secret_key - if secret_key is None: - # It wasn't anywhere. This is an error. - raise Exception('Must provide secret_key to use csrf.') - - self.SECRET_KEY = secret_key - else: - csrf_context = {} - self.SECRET_KEY = "" - super(Form, self).__init__(formdata, obj, prefix, csrf_context=csrf_context, *args, **kwargs) - - def generate_csrf_token(self, csrf_context=None): - if not self.csrf_enabled: - return None - return super(Form, self).generate_csrf_token(csrf_context) - - def validate_csrf_token(self, field): - if not self.csrf_enabled: - return - super(Form, self).validate_csrf_token(field) - - def is_submitted(self): - """ - Checks if form has been submitted. The default case is if the HTTP - method is **PUT** or **POST**. - """ - - return request and request.method in ("PUT", "POST") - - def hidden_tag(self, *fields): - """ - Wraps hidden fields in a hidden DIV tag, in order to keep XHTML - compliance. - - .. versionadded:: 0.3 - - :param fields: list of hidden field names. If not provided will render - all hidden fields, including the CSRF field. - """ - - if not fields: - fields = [f for f in self if isinstance(f, HiddenField)] - - rv = [u'
'] - for field in fields: - if isinstance(field, basestring): - field = getattr(self, field) - rv.append(unicode(field)) - rv.append(u"
") - - return Markup(u"".join(rv)) - - def validate_on_submit(self): - """ - Checks if form has been submitted and if so runs validate. This is - a shortcut, equivalent to ``form.is_submitted() and form.validate()`` - """ - return self.is_submitted() and self.validate() - diff --git a/src/lib/flaskext/wtf/html5.py b/src/lib/flaskext/wtf/html5.py deleted file mode 100644 index 4ffd8b7..0000000 --- a/src/lib/flaskext/wtf/html5.py +++ /dev/null @@ -1,125 +0,0 @@ -from wtforms import TextField -from wtforms import IntegerField as _IntegerField -from wtforms import DecimalField as _DecimalField -from wtforms import DateField as _DateField -from wtforms.widgets import Input - -class DateInput(Input): - """ - Creates `` widget - """ - input_type = "date" - - -class NumberInput(Input): - """ - Creates `` widget - """ - input_type="number" - - -class RangeInput(Input): - """ - Creates `` widget - """ - input_type="range" - - -class URLInput(Input): - """ - Creates `` widget - """ - input_type = "url" - - -class EmailInput(Input): - """ - Creates `` widget - """ - - input_type = "email" - - -class SearchInput(Input): - """ - Creates `` widget - """ - - input_type = "search" - -class TelInput(Input): - """ - Creates `` widget - """ - - input_type = "tel" - - -class SearchField(TextField): - """ - **TextField** using **SearchInput** by default - """ - widget = SearchInput() - - -class DateField(_DateField): - """ - **DateField** using **DateInput** by default - """ - - widget = DateInput() - - -class URLField(TextField): - """ - **TextField** using **URLInput** by default - """ - - widget = URLInput() - - -class EmailField(TextField): - """ - **TextField** using **EmailInput** by default - """ - - widget = EmailInput() - -class TelField(TextField): - """ - **TextField** using **TelInput** by default - """ - - widget = TelInput() - - -class IntegerField(_IntegerField): - """ - **IntegerField** using **NumberInput** by default - """ - - widget = NumberInput() - - -class DecimalField(_DecimalField): - """ - **DecimalField** using **NumberInput** by default - """ - - widget = NumberInput() - - -class IntegerRangeField(_IntegerField): - """ - **IntegerField** using **RangeInput** by default - """ - - widget = RangeInput() - - -class DecimalRangeField(_DecimalField): - """ - **DecimalField** using **RangeInput** by default - """ - - widget = RangeInput() diff --git a/src/lib/flaskext/wtf/recaptcha/__init__.py b/src/lib/flaskext/wtf/recaptcha/__init__.py deleted file mode 100644 index 272092b..0000000 --- a/src/lib/flaskext/wtf/recaptcha/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from flaskext.wtf.recaptcha import fields -from flaskext.wtf.recaptcha import validators -from flaskext.wtf.recaptcha import widgets - -__all__ = fields.__all__ + validators.__all__ + widgets.__all__ diff --git a/src/lib/flaskext/wtf/recaptcha/fields.py b/src/lib/flaskext/wtf/recaptcha/fields.py deleted file mode 100644 index 59a392d..0000000 --- a/src/lib/flaskext/wtf/recaptcha/fields.py +++ /dev/null @@ -1,16 +0,0 @@ -from wtforms.fields import Field - -from flaskext.wtf.recaptcha import widgets -from flaskext.wtf.recaptcha.validators import Recaptcha - -__all__ = ["RecaptchaField"] - -class RecaptchaField(Field): - widget = widgets.RecaptchaWidget() - - # error message if recaptcha validation fails - recaptcha_error = None - - def __init__(self, label='', validators=None, **kwargs): - validators = validators or [Recaptcha()] - super(RecaptchaField, self).__init__(label, validators, **kwargs) diff --git a/src/lib/flaskext/wtf/recaptcha/validators.py b/src/lib/flaskext/wtf/recaptcha/validators.py deleted file mode 100644 index 01d3d58..0000000 --- a/src/lib/flaskext/wtf/recaptcha/validators.py +++ /dev/null @@ -1,73 +0,0 @@ -import urllib2 - -from flask import request, current_app - -from wtforms import ValidationError - -from werkzeug import url_encode - -RECAPTCHA_VERIFY_SERVER = 'http://api-verify.recaptcha.net/verify' - -__all__ = ["Recaptcha"] - -class Recaptcha(object): - """Validates a ReCaptcha.""" - _error_codes = { - 'invalid-site-public-key': 'The public key for reCAPTCHA is invalid', - 'invalid-site-private-key': 'The private key for reCAPTCHA is invalid', - 'invalid-referrer': 'The public key for reCAPTCHA is not valid for ' - 'this domainin', - 'verify-params-incorrect': 'The parameters passed to reCAPTCHA ' - 'verification are incorrect', - } - - def __init__(self, message=u'Invalid word. Please try again.'): - self.message = message - - def __call__(self, form, field): - challenge = request.form.get('recaptcha_challenge_field', '') - response = request.form.get('recaptcha_response_field', '') - remote_ip = request.remote_addr - - if not challenge or not response: - raise ValidationError(field.gettext('This field is required.')) - - if not self._validate_recaptcha(challenge, response, remote_ip): - field.recaptcha_error = 'incorrect-captcha-sol' - raise ValidationError(field.gettext(self.message)) - - def _validate_recaptcha(self, challenge, response, remote_addr): - """Performs the actual validation.""" - - if current_app.testing: - return True - - try: - private_key = current_app.config['RECAPTCHA_PRIVATE_KEY'] - except KeyError: - raise RuntimeError, "No RECAPTCHA_PRIVATE_KEY config set" - - data = url_encode({ - 'privatekey': private_key, - 'remoteip': remote_addr, - 'challenge': challenge, - 'response': response - }) - - - response = urllib2.urlopen(RECAPTCHA_VERIFY_SERVER, data) - - if response.code != 200: - return False - - rv = [l.strip() for l in response.readlines()] - - if rv and rv[0] == 'true': - return True - - if len(rv) > 1: - error = rv[1] - if error in self._error_codes: - raise RuntimeError(self._error_codes[error]) - - return False diff --git a/src/lib/flaskext/wtf/recaptcha/widgets.py b/src/lib/flaskext/wtf/recaptcha/widgets.py deleted file mode 100644 index 2d2fcfb..0000000 --- a/src/lib/flaskext/wtf/recaptcha/widgets.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Custom widgets -""" -try: - import json -except ImportError: - import simplejson as json - -from flask import current_app -from werkzeug import url_encode - -# use flaskext.babel for translations, if available - -try: - from flaskext.babel import gettext as _ -except ImportError: - _ = lambda(s) : s - -RECAPTCHA_API_SERVER = 'http://api.recaptcha.net/' -RECAPTCHA_SSL_API_SERVER = 'https://www.google.com/recaptcha/api/' -RECAPTCHA_HTML = u''' - - - -''' - -__all__ = ["RecaptchaWidget"] - -class RecaptchaWidget(object): - - def recaptcha_html(self, server, query, options): - return RECAPTCHA_HTML % dict( - script_url='%schallenge?%s' % (server, query), - frame_url='%snoscript?%s' % (server, query), - options=json.dumps(options) - ) - - def __call__(self, field, error=None, **kwargs): - """Returns the recaptcha input HTML.""" - - if current_app.config.get('RECAPTCHA_USE_SSL', False): - - server = RECAPTCHA_SSL_API_SERVER - - else: - - server = RECAPTCHA_API_SERVER - - try: - public_key = current_app.config['RECAPTCHA_PUBLIC_KEY'] - except KeyError: - raise RuntimeError, "RECAPTCHA_PUBLIC_KEY config not set" - query_options = dict(k=public_key) - - if field.recaptcha_error is not None: - query_options['error'] = unicode(field.recaptcha_error) - - query = url_encode(query_options) - - options = { - 'theme': 'clean', - 'custom_translations': { - 'visual_challenge': _('Get a visual challenge'), - 'audio_challenge': _('Get an audio challenge'), - 'refresh_btn': _('Get a new challenge'), - 'instructions_visual': _('Type the two words:'), - 'instructions_audio': _('Type what you hear:'), - 'help_btn': _('Help'), - 'play_again': _('Play sound again'), - 'cant_hear_this': _('Download sound as MP3'), - 'incorrect_try_again': _('Incorrect. Try again.'), - } - } - - options.update(current_app.config.get('RECAPTCHA_OPTIONS', {})) - - return self.recaptcha_html(server, query, options) diff --git a/src/lib/gae_mini_profiler/.gitignore b/src/lib/gae_mini_profiler/.gitignore deleted file mode 100755 index 0d20b64..0000000 --- a/src/lib/gae_mini_profiler/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.pyc diff --git a/src/lib/gae_mini_profiler/README.md b/src/lib/gae_mini_profiler/README.md deleted file mode 100755 index b3d383d..0000000 --- a/src/lib/gae_mini_profiler/README.md +++ /dev/null @@ -1,132 +0,0 @@ -# Google App Engine Mini Profiler - -gae_mini_profiler is a quick drop-in WSGI app that provides ubiquitous profiling of your existing GAE projects. It exposes RPC statistics and CPU profiling output for users of your choosing on your production site. Only requests coming from users of your choosing will be profiled, and others will not suffer any performance degradation, so you can use this profiler to learn about production performance without stressing about slowing users down. See screenshots and features below. - -This project is heavily inspired by the Stack Exchange team's impressive [mini-profiler](http://miniprofiler.com/). - -* See it in action -* Screenshots -* Getting Started -* Features -* Dependencies -* Bonus -* FAQ - -## See it in action - -Play around with a demo App Engine applications with gae_mini_profiler enabled at [http://mini-profiler.appspot.com](http://mini-profiler.appspot.com/). - -## Screenshots - -All profiled pages have total milliseconds in corner, which can be expanded...
-

-...to show more details...
-

-...about remote procedure call performance...
-

-...or CPU profiler output.
-

-Choose between an instrumented CPU profiler (above) or a sampling -profiler (below).
-

-Ajax requests and redirects are also profiled and added to the corner of your page.
-

-Any Python logging module output is also available for easy access. -
- -## Getting Started - -1. Download this repository's source and copy the `gae_mini_profiler/` folder into your App Engine project's root directory. - -2. Add the following two handler definitions to `app.yaml`: - - handlers: - - url: /gae_mini_profiler/static - static_dir: gae_mini_profiler/static - - url: /gae_mini_profiler/.* - script: gae_mini_profiler.main.application - -3. Modify the WSGI application you want to profile by wrapping it with the gae_mini_profiler WSGI application. - - import gae_mini_profiler.profiler - ... - application = webapp.WSGIApplication([...]) - application = gae_mini_profiler.profiler.ProfilerWSGIMiddleware(application) - -4. Modify your template to include our javascript and stylesheets just before your ending body tag. - - There is a profiler_includes() function in gae_mini_profiler.templatetags that spits out the right code for these scripts and stylesheets. - - Using any template engine of your choice, call this function at the end of your template: - - ... - {% profiler_includes %} - - - - Note that these resources will not be loaded on requests when the profiler is disabled, so you don't need to worry about extra HTTP requests slowing down your users. - - Using Django? - You can register a simple_tag to expose this to your templates: - - register = template.create_template_register() - @register.simple_tag - def profiler_includes(): - return gae_mini_profiler.templatetags.profiler_includes() - - Using jinja2? - You can expose this function to your templates easily: - - webapp2_extras.jinja2.default_config = { - "globals": { - "profiler_includes": gae_mini_profiler.templatetags.profiler_includes - } - } - - Using anything else to generate your HTML? - Just find some way to spit the results of profiler_includes() into your HTML. Doesn't have to be anything fancy. - -5. You're all set! Now you just need to choose when you want to enable the profiler by overriding a simple function. By default it's enabled on the dev server and disabled in production. To enable it for App Engine admins in production, add the following to appengine_config.py: - - def gae_mini_profiler_should_profile_production(): - from google.appengine.api import users - return users.is_current_user_admin() - - In appengine_config.py you can override both of the following... - - def gae_mini_profiler_should_profile_production(): pass - def gae_mini_profiler_should_profile_development(): pass - - ...with any logic you want to choose when the profiler should be enabled. - - -## Features - -* Production profiling without impacting normal users -* Easily profile all requests, including ajax calls -* Summaries of RPC call types and their performance so you can quickly figure out whether datastore, memcache, or urlfetch is your bottleneck -* Redirect chains are tracked -- quickly examine the profile of not just the currently rendered request, but any preceding request that issued a 302 redirect leading to the current page. -* Share individual profile results with others by sending link -* Duplicate RPC calls are flagged for easy spotting in case you're repeating memcache or datastore queries -* Choose from either an instrumented or sampling CPU profiler to quickly figure out where your requests are spending time - -## Dependencies - -* jQuery must be included somewhere on your page. -* (Optional) If you want the fancy slider selector for the Logs output, jQuery UI must also be included with its Slider plugin. - -## Bonus - -gae_mini_profiler is currently in production use at [Khan Academy](http://khanacademy.org). If you make good use of it elsewhere, please lemme know. - -## FAQ - -1. What's the license? [MIT licensed](http://en.wikipedia.org/wiki/MIT_License). -2. I had my appstats_RECORD_FRACTION variable set to 0.1, which means only 10% of my queries were getting profiles generated. This meant that most of the time gae_mini_profiler was failing with a javascript error, because the appstats variable was null. - - If you are using appengine_config.py to customize Appstats behavior you should add this to the top of your "appstats_should_record" method. -
def appstats_should_record(env):
-        from gae_mini_profiler.config import should_profile
-        if should_profile(env):
-            return True
-
diff --git a/src/lib/gae_mini_profiler/__init__.py b/src/lib/gae_mini_profiler/__init__.py deleted file mode 100755 index e69de29..0000000 diff --git a/src/lib/gae_mini_profiler/appstats_profiler.py b/src/lib/gae_mini_profiler/appstats_profiler.py deleted file mode 100755 index 0899b43..0000000 --- a/src/lib/gae_mini_profiler/appstats_profiler.py +++ /dev/null @@ -1,150 +0,0 @@ -"""RPC profiler that uses appstats to track, time, and log all RPC events. - -This is just a simple wrapper for appstats with result formatting. See -https://developers.google.com/appengine/docs/python/tools/appstats for more. -""" - -import logging -from pprint import pformat - -from google.appengine.ext.appstats import recording - -import cleanup -import unformatter -import util - -class Profile(object): - """Profiler that wraps appstats for programmatic access and reporting.""" - def __init__(self): - # Configure AppStats output, keeping a high level of request - # content so we can detect dupe RPCs more accurately - recording.config.MAX_REPR = 750 - - # Each request has its own internal appstats recorder - self.recorder = None - - def results(self): - """Return appstats results in a dictionary for template context.""" - if not self.recorder: - # If appstats fails to initialize for any reason, return an empty - # set of results. - logging.warn("Missing recorder for appstats profiler.") - return { - "calls": [], - "total_time": 0, - } - - total_call_count = 0 - total_time = 0 - calls = [] - service_totals_dict = {} - likely_dupes = False - end_offset_last = 0 - - requests_set = set() - - appstats_key = long(self.recorder.start_timestamp * 1000) - - for trace in self.recorder.traces: - total_call_count += 1 - - total_time += trace.duration_milliseconds() - - # Don't accumulate total RPC time for traces that overlap asynchronously - if trace.start_offset_milliseconds() < end_offset_last: - total_time -= (end_offset_last - trace.start_offset_milliseconds()) - end_offset_last = trace.start_offset_milliseconds() + trace.duration_milliseconds() - - service_prefix = trace.service_call_name() - - if "." in service_prefix: - service_prefix = service_prefix[:service_prefix.find(".")] - - if service_prefix not in service_totals_dict: - service_totals_dict[service_prefix] = { - "total_call_count": 0, - "total_time": 0, - "total_misses": 0, - } - - service_totals_dict[service_prefix]["total_call_count"] += 1 - service_totals_dict[service_prefix]["total_time"] += trace.duration_milliseconds() - - stack_frames_desc = [] - for frame in trace.call_stack_list(): - stack_frames_desc.append("%s:%s %s" % - (util.short_rpc_file_fmt(frame.class_or_file_name()), - frame.line_number(), - frame.function_name())) - - request = trace.request_data_summary() - response = trace.response_data_summary() - - likely_dupe = request in requests_set - likely_dupes = likely_dupes or likely_dupe - requests_set.add(request) - - request_short = request_pretty = None - response_short = response_pretty = None - miss = 0 - try: - request_object = unformatter.unformat(request) - response_object = unformatter.unformat(response) - - request_short, response_short, miss = cleanup.cleanup(request_object, response_object) - - request_pretty = pformat(request_object) - response_pretty = pformat(response_object) - except Exception, e: - logging.warning("Prettifying RPC calls failed.\n%s\nRequest: %s\nResponse: %s", - e, request, response, exc_info=True) - - service_totals_dict[service_prefix]["total_misses"] += miss - - calls.append({ - "service": trace.service_call_name(), - "start_offset": util.milliseconds_fmt(trace.start_offset_milliseconds()), - "total_time": util.milliseconds_fmt(trace.duration_milliseconds()), - "request": request_pretty or request, - "response": response_pretty or response, - "request_short": request_short or cleanup.truncate(request), - "response_short": response_short or cleanup.truncate(response), - "stack_frames_desc": stack_frames_desc, - "likely_dupe": likely_dupe, - }) - - service_totals = [] - for service_prefix in service_totals_dict: - service_totals.append({ - "service_prefix": service_prefix, - "total_call_count": service_totals_dict[service_prefix]["total_call_count"], - "total_misses": service_totals_dict[service_prefix]["total_misses"], - "total_time": util.milliseconds_fmt(service_totals_dict[service_prefix]["total_time"]), - }) - service_totals = sorted(service_totals, reverse=True, key=lambda service_total: float(service_total["total_time"])) - - return { - "total_call_count": total_call_count, - "total_time": util.milliseconds_fmt(total_time), - "calls": calls, - "service_totals": service_totals, - "likely_dupes": likely_dupes, - "appstats_key": appstats_key, - } - - def wrap(self, app): - """Wrap and return a WSGI application with appstats recording enabled. - - Args: - app: existing WSGI application to be wrapped - Returns: - new WSGI application that will run the original app with appstats - enabled. - """ - def wrapped_appstats_app(environ, start_response): - # Use this wrapper to grab the app stats recorder for RequestStats.save() - if recording.recorder_proxy.has_recorder_for_current_request(): - self.recorder = recording.recorder_proxy.get_for_current_request() - return app(environ, start_response) - - return recording.appstats_wsgi_middleware(wrapped_appstats_app) diff --git a/src/lib/gae_mini_profiler/cleanup.py b/src/lib/gae_mini_profiler/cleanup.py deleted file mode 100755 index deab177..0000000 --- a/src/lib/gae_mini_profiler/cleanup.py +++ /dev/null @@ -1,272 +0,0 @@ -import StringIO - -def cleanup(request, response): - ''' - Convert request and response dicts to a human readable format where - possible. - ''' - request_short = None - response_short = None - miss = 0 - - if "MemcacheGetRequest" in request: - request = request["MemcacheGetRequest"] - response = response["MemcacheGetResponse"] - if request: - request_short = memcache_get(request) - if response: - response_short, miss = memcache_get_response(response) - elif "MemcacheSetRequest" in request and request["MemcacheSetRequest"]: - request_short = memcache_set(request["MemcacheSetRequest"]) - elif "Query" in request and request["Query"]: - request_short = datastore_query(request["Query"]) - elif "GetRequest" in request and request["GetRequest"]: - request_short = datastore_get(request["GetRequest"]) - elif "PutRequest" in request and request["PutRequest"]: - request_short = datastore_put(request["PutRequest"]) - # todo: - # TaskQueueBulkAddRequest - # BeginTransaction - # Transaction - - return request_short, response_short, miss - -def memcache_get_response(response): - """Pretty-format a memcache.get() response. - - Arguments: - response - The memcache.get() response object, e.g. - {'item': [{'Item': {'flags': '0L', 'key': 'memcache_key', ... - - Returns: - The tuple (value, miss) where the "value" is the value of the - memcache.get() response as a string. If there are multiple response - values, as when multiple keys are passed in, the values are separated by - newline characters. "miss" is 1 if there were no items returned from - memcache and 0 otherwise. - """ - if 'item' not in response or not response['item']: - return None, 1 - - items = response['item'] - for i, item in enumerate(items): - if type(item) == dict: - if 'MemcacheGetResponse_Item' in item: - # This key exists in dev and in the 'python' production runtime. - item = item['MemcacheGetResponse_Item']['value'] - else: - # But it's a different key in the 'python27' production runtime. - item = item['Item']['value'] - item = truncate(repr(item)) - items[i] = item - response_short = "\n".join(items) - return response_short, 0 - -def memcache_get(request): - """Pretty-format a memcache.get() request. - - Arguments: - request - The memcache.get() request object, i.e. - {'key': ['memcache_key']} - - Returns: - The keys of the memcache.get() response as a string. If there are - multiple keys, they are separated by newline characters. - """ - keys = request['key'] - request_short = "\n".join([truncate(k) for k in keys]) - namespace = '' - if 'name_space' in request: - namespace = request['name_space'] - if len(keys) > 1: - request_short += '\n' - else: - request_short += ' ' - request_short += '(ns:%s)' % truncate(namespace) - return request_short - -def memcache_set(request): - """Pretty-format a memcache.set() request. - - Arguments: - request - The memcache.set() request object, e.g., - {'item': [{'Item': {'flags': '0L', 'key': 'memcache_key' ... - - Returns: - The keys of the memcache.get() response as a string. If there are - multiple keys, they are separated by newline characters. - """ - keys = [] - for i in request["item"]: - if "MemcacheSetRequest_Item" in i: - # This key exists in dev and in the 'python' production runtime. - key = i["MemcacheSetRequest_Item"]["key"] - else: - # But it's a different key in the 'python27' production runtime. - key = i["Item"]["key"] - keys.append(truncate(key)) - return "\n".join(keys) - -def datastore_query(query): - kind = query.get('kind', 'UnknownKind') - count = query.get('count', '') - - filters_clean = datastore_query_filter(query) - orders_clean = datastore_query_order(query) - - s = StringIO.StringIO() - s.write("SELECT FROM %s\n" % kind) - if filters_clean: - s.write("WHERE\n") - for name, op, value in filters_clean: - s.write("%s %s %s\n" % (name, op, value)) - if orders_clean: - s.write("ORDER BY\n") - for prop, direction in orders_clean: - s.write("%s %s\n" % (prop, direction)) - if count: - s.write("LIMIT %s\n" % count) - - result = s.getvalue() - s.close() - return result - -def datastore_query_filter(query): - _Operator_NAMES = { - 0: "?", - 1: "<", - 2: "<=", - 3: ">", - 4: ">=", - 5: "=", - 6: "IN", - 7: "EXISTS", - } - filters = query.get('filter', []) - filters_clean = [] - for f in filters: - if 'Query_Filter' in f: - # This key exists in dev and in the 'python' production runtime. - f = f["Query_Filter"] - elif 'Filter' in f: - # But it's a different key in the 'python27' production runtime. - f = f["Filter"] - else: - # Filters are optional, so there might be no filter at all. - continue - op = _Operator_NAMES[int(f.get('op', 0))] - props = f["property"] - for p in props: - p = p["Property"] - name = p["name"] if "name" in p else "UnknownName" - - if 'value' in p: - - propval = p['value']['PropertyValue'] - - if 'stringvalue' in propval: - value = propval["stringvalue"] - elif 'referencevalue' in propval: - if 'PropertyValue_ReferenceValue' in propval['referencevalue']: - # This key exists in dev and in the 'python' production runtime. - ref = propval['referencevalue']['PropertyValue_ReferenceValue'] - else: - # But it's a different key in the 'python27' production runtime. - ref = propval['referencevalue']['ReferenceValue'] - els = ref['pathelement'] - paths = [] - for el in els: - if 'PropertyValue_ReferenceValuePathElement' in el: - # This key exists in dev and in the 'python' production runtime. - path = el['PropertyValue_ReferenceValuePathElement'] - else: - # But it's a different key in the 'python27' production runtime. - path = el['ReferenceValuePathElement'] - paths.append("%s(%s)" % (path['type'], id_or_name(path))) - value = "->".join(paths) - elif 'booleanvalue' in propval: - value = propval["booleanvalue"] - elif 'uservalue' in propval: - if 'PropertyValue_UserValue' in propval['uservalue']: - # This key exists in dev and in the 'python' production runtime. - email = propval['uservalue']['PropertyValue_UserValue']['email'] - else: - # But it's a different key in the 'python27' production runtime. - email = propval['uservalue']['UserValue']['email'] - value = 'User(%s)' % email - elif '...' in propval: - value = '...' - elif 'int64value' in propval: - value = propval["int64value"] - else: - raise Exception(propval) - else: - value = '' - filters_clean.append((name, op, value)) - return filters_clean - -def datastore_query_order(query): - orders = query.get('order', []) - _Direction_NAMES = { - 0: "?DIR", - 1: "ASC", - 2: "DESC", - } - orders_clean = [] - for order in orders: - if 'Query_Order' in order: - # This key exists in dev and in the 'python' production runtime. - order = order['Query_Order'] - else: - # But it's a different key in the 'python27' production runtime. - order = order['Order'] - direction = _Direction_NAMES[int(order.get('direction', 0))] - prop = order.get('property', 'UnknownProperty') - orders_clean.append((prop, direction)) - return orders_clean - -def id_or_name(path): - if 'name' in path: - return path['name'] - else: - return path['id'] - -def datastore_get(request): - keys = request["key"] - if len(keys) > 1: - keylist = cleanup_key(keys.pop(0)) - for key in keys: - keylist += ", " + cleanup_key(key) - return keylist - elif keys: - return cleanup_key(keys[0]) - -def cleanup_key(key): - if 'Reference' not in key: - #sometimes key is passed in as '...' - return key - els = key['Reference']['path']['Path']['element'] - paths = [] - for el in els: - if 'Path_Element' in el: - # This key exists in dev and in the 'python' production runtime. - path = el['Path_Element'] - else: - # But it's a different key in the 'python27' production runtime. - path = el['Element'] - paths.append("%s(%s)" % (path['type'] if 'type' in path - else 'UnknownType', id_or_name(path))) - return "->".join(paths) - -def datastore_put(request): - entities = request["entity"] - keys = [] - for entity in entities: - keys.append(cleanup_key(entity["EntityProto"]["key"])) - return "\n".join(keys) - -def truncate(value, limit=100): - if len(value) > limit: - return value[:limit - 3] + "..." - else: - return value diff --git a/src/lib/gae_mini_profiler/config.py b/src/lib/gae_mini_profiler/config.py deleted file mode 100755 index 1d90342..0000000 --- a/src/lib/gae_mini_profiler/config.py +++ /dev/null @@ -1,40 +0,0 @@ -import os - -from google.appengine.api import lib_config - -# These should_profile functions return true whenever a request should be -# profiled. -# -# You can override these functions in appengine_config.py. See example below -# and https://developers.google.com/appengine/docs/python/tools/appengineconfig -# -# These functions will be run once per request, so make sure they are fast. -# -# Example: -# ...in appengine_config.py: -# def gae_mini_profiler_should_profile_production(): -# from google.appengine.api import users -# return users.is_current_user_admin() - -def _should_profile_production_default(): - """Default to disabling in production if this function isn't overridden. - - Can be overridden in appengine_config.py""" - return False - -def _should_profile_development_default(): - """Default to enabling in development if this function isn't overridden. - - Can be overridden in appengine_config.py""" - return True - -_config = lib_config.register("gae_mini_profiler", { - "should_profile_production": _should_profile_production_default, - "should_profile_development": _should_profile_development_default}) - -def should_profile(): - """Returns true if the current request should be profiles.""" - if os.environ["SERVER_SOFTWARE"].startswith("Devel"): - return _config.should_profile_development() - else: - return _config.should_profile_production() diff --git a/src/lib/gae_mini_profiler/cookies.py b/src/lib/gae_mini_profiler/cookies.py deleted file mode 100755 index f82f1fa..0000000 --- a/src/lib/gae_mini_profiler/cookies.py +++ /dev/null @@ -1,50 +0,0 @@ -import Cookie -import logging -import os - -def get_cookie_value(key): - cookies = None - try: - cookies = Cookie.BaseCookie(os.environ.get('HTTP_COOKIE','')) - except Cookie.CookieError, error: - logging.debug("Ignoring Cookie Error, skipping get cookie: '%s'" % error) - - if not cookies: - return None - - cookie = cookies.get(key) - - if not cookie: - return None - - return cookie.value - -# Cookie handling from http://appengine-cookbook.appspot.com/recipe/a-simple-cookie-class/ -def set_cookie_value(key, value='', max_age=None, - path='/', domain=None, secure=None, httponly=False, - version=None, comment=None): - cookies = Cookie.BaseCookie() - cookies[key] = value - for var_name, var_value in [ - ('max-age', max_age), - ('path', path), - ('domain', domain), - ('secure', secure), - #('HttpOnly', httponly), Python 2.6 is required for httponly cookies - ('version', version), - ('comment', comment), - ]: - if var_value is not None and var_value is not False: - cookies[key][var_name] = str(var_value) - if max_age is not None: - cookies[key]['expires'] = max_age - - cookies_header = cookies[key].output(header='').lstrip() - - if httponly: - # We have to manually add this part of the header until GAE uses Python 2.6. - cookies_header += "; HttpOnly" - - return cookies_header - - diff --git a/src/lib/gae_mini_profiler/instrumented_profiler.py b/src/lib/gae_mini_profiler/instrumented_profiler.py deleted file mode 100755 index 6d09962..0000000 --- a/src/lib/gae_mini_profiler/instrumented_profiler.py +++ /dev/null @@ -1,70 +0,0 @@ -"""CPU profiler that works by instrumenting all function calls (uses cProfile). - -This profiler provides detailed function timings for all function calls -during a request. - -This is just a simple wrapper for cProfile with result formatting. See -http://docs.python.org/2/library/profile.html for more. - -PRO: since every function call is instrumented, you'll be sure to see -everything that goes on during a request. For code that doesn't have lots of -deeply nested function calls, this can be the easiest and most accurate way to -get an idea for which functions are taking lots of time. - -CON: overhead is added to each function call due to this instrumentation. If -you're profiling code with deeply nested function calls or tight loops going -over lots of function calls, this perf overhead will add up. -""" - -import cProfile -import pstats -import StringIO - -import util - -class Profile(object): - """Profiler that wraps cProfile for programmatic access and reporting.""" - def __init__(self): - self.c_profile = cProfile.Profile() - - def results(self): - """Return cProfile results in a dictionary for template context.""" - # Make sure nothing is printed to stdout - output = StringIO.StringIO() - stats = pstats.Stats(self.c_profile, stream=output) - stats.sort_stats("cumulative") - - results = { - "total_call_count": stats.total_calls, - "total_time": util.seconds_fmt(stats.total_tt), - "calls": [] - } - - width, list_func_names = stats.get_print_list([80]) - for func_name in list_func_names: - primitive_call_count, total_call_count, total_time, cumulative_time, callers = stats.stats[func_name] - - func_desc = pstats.func_std_string(func_name) - - callers_names = map(lambda func_name: pstats.func_std_string(func_name), callers.keys()) - callers_desc = map( - lambda name: {"func_desc": name, "func_desc_short": util.short_method_fmt(name)}, - callers_names) - - results["calls"].append({ - "primitive_call_count": primitive_call_count, - "total_call_count": total_call_count, - "cumulative_time": util.seconds_fmt(cumulative_time, 2), - "per_call_cumulative": util.seconds_fmt(cumulative_time / primitive_call_count, 2) if primitive_call_count else "", - "func_desc": func_desc, - "func_desc_short": util.short_method_fmt(func_desc), - "callers_desc": callers_desc, - }) - - output.close() - - return results - - def run(self, fxn): - """Run function with cProfile enabled, saving results.""" - return self.c_profile.runcall(lambda *args, **kwargs: fxn(), None, None) diff --git a/src/lib/gae_mini_profiler/main.py b/src/lib/gae_mini_profiler/main.py deleted file mode 100755 index 9716419..0000000 --- a/src/lib/gae_mini_profiler/main.py +++ /dev/null @@ -1,16 +0,0 @@ -from google.appengine.ext import webapp -from google.appengine.ext.webapp.util import run_wsgi_app - -import profiler - -application = webapp.WSGIApplication([ - ("/gae_mini_profiler/request/log", profiler.RequestLogHandler), - ("/gae_mini_profiler/request", profiler.RequestStatsHandler), - ("/gae_mini_profiler/shared", profiler.SharedStatsHandler), -]) - -def main(): - run_wsgi_app(application) - -if __name__ == "__main__": - main() diff --git a/src/lib/gae_mini_profiler/profiler.py b/src/lib/gae_mini_profiler/profiler.py deleted file mode 100755 index 9c0dd4d..0000000 --- a/src/lib/gae_mini_profiler/profiler.py +++ /dev/null @@ -1,576 +0,0 @@ -from __future__ import with_statement - -import datetime -import time -import logging -import os -import re - -try: - import threading -except ImportError: - import dummy_threading as threading - -# use json in Python 2.7, fallback to simplejson for Python 2.5 -try: - import json -except ImportError: - import simplejson as json - -import StringIO -from types import GeneratorType -import zlib - -from google.appengine.api import logservice -from google.appengine.api import memcache -from google.appengine.ext.appstats import recording -from google.appengine.ext.webapp import RequestHandler - -import cookies -import pickle -import config -import util - -dev_server = os.environ["SERVER_SOFTWARE"].startswith("Devel") - - -class CurrentRequestId(object): - """A per-request identifier accessed by other pieces of mini profiler. - - It is managed as part of the middleware lifecycle.""" - - # In production use threading.local() to make request ids threadsafe - _local = threading.local() - _local.request_id = None - - # On the devserver don't use threading.local b/c it's reset on Thread.start - dev_server_request_id = None - - @staticmethod - def get(): - if dev_server: - return CurrentRequestId.dev_server_request_id - else: - return CurrentRequestId._local.request_id - - @staticmethod - def set(request_id): - if dev_server: - CurrentRequestId.dev_server_request_id = request_id - else: - CurrentRequestId._local.request_id = request_id - - -class Mode(object): - """Possible profiler modes. - - TODO(kamens): switch this from an enum to a more sensible bitmask or other - alternative that supports multiple settings without an exploding number of - enums. - - TODO(kamens): when this is changed from an enum to a bitmask or other more - sensible object with multiple properties, we should pass a Mode object - around the rest of this code instead of using a simple string that this - static class is forced to examine (e.g. if self.mode.is_rpc_enabled()). - """ - - SIMPLE = "simple" # Simple start/end timing for the request as a whole - CPU_INSTRUMENTED = "instrumented" # Profile all function calls - CPU_SAMPLING = "sampling" # Sample call stacks - RPC_ONLY = "rpc" # Profile all RPC calls - RPC_AND_CPU_INSTRUMENTED = "rpc_instrumented" # RPCs and all fxn calls - RPC_AND_CPU_SAMPLING = "rpc_sampling" # RPCs and sample call stacks - - @staticmethod - def get_mode(environ): - """Get the profiler mode requested by current request's headers & - cookies.""" - if "HTTP_G_M_P_MODE" in environ: - mode = environ["HTTP_G_M_P_MODE"] - else: - mode = cookies.get_cookie_value("g-m-p-mode") - - if (mode not in [ - Mode.SIMPLE, - Mode.CPU_INSTRUMENTED, - Mode.CPU_SAMPLING, - Mode.RPC_ONLY, - Mode.RPC_AND_CPU_INSTRUMENTED, - Mode.RPC_AND_CPU_SAMPLING]): - mode = Mode.RPC_AND_CPU_INSTRUMENTED - - return mode - - @staticmethod - def is_rpc_enabled(mode): - return mode in [ - Mode.RPC_ONLY, - Mode.RPC_AND_CPU_INSTRUMENTED, - Mode.RPC_AND_CPU_SAMPLING]; - - @staticmethod - def is_sampling_enabled(mode): - return mode in [ - Mode.CPU_SAMPLING, - Mode.RPC_AND_CPU_SAMPLING]; - - @staticmethod - def is_instrumented_enabled(mode): - return mode in [ - Mode.CPU_INSTRUMENTED, - Mode.RPC_AND_CPU_INSTRUMENTED]; - - -class SharedStatsHandler(RequestHandler): - - def get(self): - path = os.path.join(os.path.dirname(__file__), "templates/shared.html") - - request_id = self.request.get("request_id") - if not RequestStats.get(request_id): - self.response.out.write("Profiler stats no longer exist for this request.") - return - - # Late-bind templatetags to avoid a circular import. - # TODO(chris): remove late-binding once templatetags has been teased - # apart and no longer contains so many broad dependencies. - - import templatetags - profiler_includes = templatetags.profiler_includes_request_id(request_id, True) - - # We are not using a templating engine here to avoid pulling in Jinja2 - # or Django. It's an admin page anyway, and all other templating lives - # in javascript right now. - - with open(path, 'rU') as f: - template = f.read() - - template = template.replace('{{profiler_includes}}', profiler_includes) - self.response.out.write(template) - - -class RequestLogHandler(RequestHandler): - """Handler for retrieving and returning a RequestLog from GAE's logs API. - - See https://developers.google.com/appengine/docs/python/logs. - - This GET request accepts a logging_request_id via query param that matches - the request_id from an App Engine RequestLog. - - It returns a JSON object that contains the pieces of RequestLog info we - find most interesting, such as pending_ms and loading_request. - """ - - def get(self): - - self.response.headers["Content-Type"] = "application/json" - dict_request_log = None - - # This logging_request_id should match a request_id from an App Engine - # request log. - # https://developers.google.com/appengine/docs/python/logs/functions - logging_request_id = self.request.get("logging_request_id") - - # Grab the single request log from logservice - logs = logservice.fetch(request_ids=[logging_request_id]) - - # This slightly strange query result implements __iter__ but not next, - # so we have to iterate to get our expected single result. - for log in logs: - dict_request_log = { - "pending_ms": log.pending_time, # time spent in pending queue - "loading_request": log.was_loading_request, # loading request? - "logging_request_id": logging_request_id - } - # We only expect a single result. - break - - # Log fetching doesn't work on the dev server and this data isn't - # relevant in dev server's case, so we return a simple fake response. - if dev_server: - dict_request_log = { - "pending_ms": 0, - "loading_request": False, - "logging_request_id": logging_request_id - } - - self.response.out.write(json.dumps(dict_request_log)) - - -class RequestStatsHandler(RequestHandler): - - def get(self): - - self.response.headers["Content-Type"] = "application/json" - - list_request_ids = [] - - request_ids = self.request.get("request_ids") - if request_ids: - list_request_ids = request_ids.split(",") - - list_request_stats = [] - - for request_id in list_request_ids: - - request_stats = RequestStats.get(request_id) - - if request_stats and not request_stats.disabled: - - dict_request_stats = {} - for property in RequestStats.serialized_properties: - dict_request_stats[property] = request_stats.__getattribute__(property) - - list_request_stats.append(dict_request_stats) - - # Don't show temporary redirect profiles more than once automatically, as they are - # tied to URL params and may be copied around easily. - if request_stats.temporary_redirect: - request_stats.disabled = True - request_stats.store() - - self.response.out.write(json.dumps(list_request_stats)) - -class RequestStats(object): - - serialized_properties = ["request_id", "url", "url_short", "s_dt", - "profiler_results", "appstats_results", "mode", - "temporary_redirect", "logs", - "logging_request_id"] - - def __init__(self, profiler, environ): - # unique mini profiler request id - self.request_id = profiler.request_id - - # App Engine's logservice request_id - # https://developers.google.com/appengine/docs/python/logs/ - self.logging_request_id = profiler.logging_request_id - - self.url = environ.get("PATH_INFO") - if environ.get("QUERY_STRING"): - self.url += "?%s" % environ.get("QUERY_STRING") - - self.url_short = self.url - if len(self.url_short) > 26: - self.url_short = self.url_short[:26] + "..." - - self.mode = profiler.mode - self.s_dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - self.profiler_results = profiler.profiler_results() - self.appstats_results = profiler.appstats_results() - self.logs = profiler.logs - - self.temporary_redirect = profiler.temporary_redirect - self.disabled = False - - def store(self): - # Store compressed results so we stay under the memcache 1MB limit - pickled = pickle.dumps(self) - compressed_pickled = zlib.compress(pickled) - - return memcache.set(RequestStats.memcache_key(self.request_id), compressed_pickled) - - @staticmethod - def get(request_id): - if request_id: - - compressed_pickled = memcache.get(RequestStats.memcache_key(request_id)) - - if compressed_pickled: - pickled = zlib.decompress(compressed_pickled) - return pickle.loads(pickled) - - return None - - @staticmethod - def memcache_key(request_id): - if not request_id: - return None - return "__gae_mini_profiler_request_%s" % request_id - - -class RequestProfiler(object): - """Profile a single request.""" - - def __init__(self, request_id, mode): - self.request_id = request_id - self.mode = mode - self.instrumented_prof = None - self.sampling_prof = None - self.appstats_prof = None - self.temporary_redirect = False - self.handler = None - self.logs = None - self.logging_request_id = self.get_logging_request_id() - self.start = None - self.end = None - - def profiler_results(self): - """Return the CPU profiler results for this request, if any. - - This will return a dictionary containing results for either the - sampling profiler, instrumented profiler results, or a simple - start/stop timer if both profilers are disabled.""" - - total_time = util.seconds_fmt(self.end - self.start, 0) - results = {"total_time": total_time} - - if self.instrumented_prof: - results.update(self.instrumented_prof.results()) - elif self.sampling_prof: - results.update(self.sampling_prof.results()) - - return results - - def appstats_results(self): - """Return the RPC profiler (appstats) results for this request, if any. - - This will return a dictionary containing results from appstats or an - empty result set if appstats profiling is disabled.""" - - results = { - "calls": [], - "total_time": 0, - } - - if self.appstats_prof: - results.update(self.appstats_prof.results()) - - return results - - def profile_start_response(self, app, environ, start_response): - """Collect and store statistics for a single request. - - Use this method from middleware in place of the standard - request-serving pattern. Do: - - profiler = RequestProfiler(...) - return profiler(app, environ, start_response) - - Instead of: - - return app(environ, start_response) - - Depending on the mode, this method gathers timing information - and an execution profile and stores them in the datastore for - later access. - """ - - # Always track simple start/stop time. - self.start = time.time() - - if self.mode == Mode.SIMPLE: - - # Detailed recording is disabled. - result = app(environ, start_response) - for value in result: - yield value - - else: - - # Add logging handler - self.add_handler() - - if Mode.is_rpc_enabled(self.mode): - # Turn on AppStats monitoring for this request - # Note that we don't import appstats_profiler at the top of - # this file so we don't bring in a lot of imports for users who - # don't have the profiler enabled. - from gae_mini_profiler import appstats_profiler - self.appstats_prof = appstats_profiler.Profile() - app = self.appstats_prof.wrap(app) - - # By default, we create a placeholder wrapper function that - # simply calls whatever function it is passed as its first - # argument. - result_fxn_wrapper = lambda fxn: fxn() - - # TODO(kamens): both sampling_profiler and instrumented_profiler - # could subclass the same class. Then they'd both be guaranteed to - # implement run(), and the following if/else could be simplified. - if Mode.is_sampling_enabled(self.mode): - # Turn on sampling profiling for this request. - # Note that we don't import sampling_profiler at the top of - # this file so we don't bring in a lot of imports for users who - # don't have the profiler enabled. - from gae_mini_profiler import sampling_profiler - self.sampling_prof = sampling_profiler.Profile() - result_fxn_wrapper = self.sampling_prof.run - - elif Mode.is_instrumented_enabled(self.mode): - # Turn on cProfile instrumented profiling for this request - # Note that we don't import instrumented_profiler at the top of - # this file so we don't bring in a lot of imports for users who - # don't have the profiler enabled. - from gae_mini_profiler import instrumented_profiler - self.instrumented_prof = instrumented_profiler.Profile() - result_fxn_wrapper = self.instrumented_prof.run - - # Get wsgi result - result = result_fxn_wrapper(lambda: app(environ, start_response)) - - # If we're dealing w/ a generator, profile all of the .next calls as well - if type(result) == GeneratorType: - - while True: - try: - yield result_fxn_wrapper(result.next) - except StopIteration: - break - - else: - for value in result: - yield value - - self.logs = self.get_logs(self.handler) - logging.getLogger().removeHandler(self.handler) - self.handler.stream.close() - self.handler = None - - self.end = time.time() - - # Store stats for later access - RequestStats(self, environ).store() - - def get_logging_request_id(self): - """Return the identifier for this request used by GAE's logservice. - - This logging_request_id will match the request_id parameter of a - RequestLog object stored in App Engine's logging API: - https://developers.google.com/appengine/docs/python/logs/ - """ - return os.environ.get("REQUEST_LOG_ID", None) - - def add_handler(self): - if self.handler is None: - self.handler = RequestProfiler.create_handler() - logging.getLogger().addHandler(self.handler) - - @staticmethod - def create_handler(): - handler = logging.StreamHandler(StringIO.StringIO()) - handler.setLevel(logging.DEBUG) - formatter = logging.Formatter("\t".join([ - '%(levelno)s', - '%(asctime)s%(msecs)d', - '%(funcName)s', - '%(filename)s', - '%(lineno)d', - '%(message)s', - ]), '%M:%S.') - handler.setFormatter(formatter) - return handler - - @staticmethod - def get_logs(handler): - raw_lines = [l for l in handler.stream.getvalue().split("\n") if l] - - lines = [] - for line in raw_lines: - if "\t" in line: - fields = line.split("\t") - lines.append(fields) - else: # line is part of a multiline log message (prob a traceback) - prevline = lines[-1][-1] - if prevline: # ignore leading blank lines in the message - prevline += "\n" - prevline += line - lines[-1][-1] = prevline - - return lines - -class ProfilerWSGIMiddleware(object): - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - - CurrentRequestId.set(None) - - # Never profile calls to the profiler itself to avoid endless recursion. - if (not config.should_profile() or - environ.get("PATH_INFO", "").startswith("/gae_mini_profiler/")): - result = self.app(environ, start_response) - for value in result: - yield value - else: - # Set a random ID for this request so we can look up stats later - import base64 - CurrentRequestId.set(base64.urlsafe_b64encode(os.urandom(5))) - - # Send request id in headers so jQuery ajax calls can pick - # up profiles. - def profiled_start_response(status, headers, exc_info = None): - - if status.startswith("302 "): - # Temporary redirect. Add request identifier to redirect location - # so next rendered page can show this request's profile. - headers = ProfilerWSGIMiddleware.headers_with_modified_redirect(environ, headers) - # Access the profiler in closure scope - profiler.temporary_redirect = True - - # Append headers used when displaying profiler results from ajax requests - headers.append(("X-MiniProfiler-Id", CurrentRequestId.get())) - headers.append(("X-MiniProfiler-QS", environ.get("QUERY_STRING"))) - - return start_response(status, headers, exc_info) - - # As a simple form of rate-limiting, appstats protects all - # its work with a memcache lock to ensure that only one - # appstats request ever runs at a time, across all - # appengine instances. (GvR confirmed this is the purpose - # of the lock). So our attempt to profile will fail if - # appstats is running on another instance. Boo-urns! We - # just turn off the lock-checking for us, which means we - # don't rate-limit quite as much with the mini-profiler as - # we would do without. - old_memcache_add = memcache.add - old_memcache_delete = memcache.delete - memcache.add = (lambda key, *args, **kwargs: - (True if key == recording.lock_key() - else old_memcache_add(key, *args, **kwargs))) - memcache.delete = (lambda key, *args, **kwargs: - (True if key == recording.lock_key() - else old_memcache_delete(key, *args, **kwargs))) - - try: - profiler = RequestProfiler(CurrentRequestId.get(), - Mode.get_mode(environ)) - result = profiler.profile_start_response(self.app, environ, profiled_start_response) - for value in result: - yield value - finally: - CurrentRequestId.set(None) - memcache.add = old_memcache_add - memcache.delete = old_memcache_delete - - @staticmethod - def headers_with_modified_redirect(environ, headers): - headers_modified = [] - - for header in headers: - if header[0] == "Location": - reg = re.compile("mp-r-id=([^&]+)") - - # Keep any chain of redirects around - request_id_chain = CurrentRequestId.get() - match = reg.search(environ.get("QUERY_STRING")) - if match: - request_id_chain = ",".join([match.groups()[0], request_id_chain]) - - # Remove any pre-existing miniprofiler redirect id - location = header[1] - location = reg.sub("", location) - - # Add current request id as miniprofiler redirect id - location += ("&" if "?" in location else "?") - location = location.replace("&&", "&") - location += "mp-r-id=%s" % request_id_chain - - headers_modified.append((header[0], location)) - else: - headers_modified.append(header) - - return headers_modified diff --git a/src/lib/gae_mini_profiler/sampling_profiler.py b/src/lib/gae_mini_profiler/sampling_profiler.py deleted file mode 100755 index c47c7f7..0000000 --- a/src/lib/gae_mini_profiler/sampling_profiler.py +++ /dev/null @@ -1,176 +0,0 @@ -"""CPU profiler that works by sampling the call stack periodically. - -This profiler provides a very simplistic view of where your request is spending -its time. It does this by periodically sampling your request's call stack to -figure out in which functions real time is being spent. - -PRO: since the profiler only samples the call stack occasionally, it has much -less overhead than an instrumenting profiler, and avoids biases that -instrumenting profilers have due to instrumentation overhead (which causes -instrumenting profilers to overstate how much time is spent in frequently -called functions, or functions with deep call stacks). - -CON: since the profiler only samples, it does not allow you to accurately -answer a question like, "how much time was spent in routine X?", especially if -routine X takes relatively little time. (You *can* answer questions like "what -is the ratio of time spent in routine X vs routine Y," at least if both -routines take a reasonable amount of time.) It is better suited for answering -the question, "Where is the time spent by my app?" -""" - -from collections import defaultdict -import logging -import os -import sys -import time -import threading -import traceback - -from gae_mini_profiler import util - -_is_dev_server = os.environ["SERVER_SOFTWARE"].startswith("Devel") - -class InspectingThread(threading.Thread): - """Thread that periodically triggers profiler inspections.""" - SAMPLES_PER_SECOND = 250 - - def __init__(self, profile=None): - super(InspectingThread, self).__init__() - self._stop_event = threading.Event() - self.profile = profile - - def stop(self): - """Stop this thread.""" - # http://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python - self._stop_event.set() - - def should_stop(self): - return self._stop_event.is_set() - - def run(self): - """Start periodic profiler inspections. - - This will run, periodically inspecting and then sleeping, until - manually stopped via stop().""" - # Keep sampling until this thread is explicitly stopped. - while not self.should_stop(): - - # Take a sample of the main request thread's frame stack... - self.profile.take_sample() - - # ...then sleep and let it do some more work. - time.sleep(1.0 / InspectingThread.SAMPLES_PER_SECOND) - - # Only take one sample per thread if this is running on the - # single-threaded dev server. - if _is_dev_server and len(self.profile.samples) > 0: - break - - -class ProfileSample(object): - """Single stack trace sample gathered during a periodic inspection.""" - def __init__(self, stack): - self.stack_trace = traceback.extract_stack(stack) - - -class Profile(object): - """Profiler that periodically inspects a request and logs stack traces.""" - def __init__(self): - # All saved stack trace samples - self.samples = [] - - # Thread id for the request thread currently being profiled - self.current_request_thread_id = None - - # Thread that constantly waits, inspects, waits, inspect, ... - self.inspecting_thread = None - - def results(self): - """Return sampling results in a dictionary for template context.""" - aggregated_calls = defaultdict(int) - total_samples = len(self.samples) - - for sample in self.samples: - for filename, line_num, function_name, src in sample.stack_trace: - aggregated_calls["%s\n\n%s:%s (%s)" % - (src, filename, line_num, function_name)] += 1 - - # Turn aggregated call samples into dictionary of results - calls = [{ - "func_desc": item[0], - "func_desc_short": util.short_method_fmt(item[0]), - "count_samples": item[1], - "per_samples": "%s%%" % util.decimal_fmt( - 100.0 * item[1] / total_samples), - } for item in aggregated_calls.items()] - - # Sort call sample results by # of times calls appeared in a sample - calls = sorted(calls, reverse=True, - key=lambda call: call["count_samples"]) - - return { - "calls": calls, - "total_samples": total_samples, - "is_dev_server": _is_dev_server, - } - - def take_sample(self): - # Look at stacks of all existing threads... - # See http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/ - for thread_id, stack in sys._current_frames().items(): - - # ...but only sample from the main request thread. - - if _is_dev_server: - # In development, current_request_thread_id won't be set - # properly. threading.current_thread().ident always returns -1 - # in dev. So instead, we just take a peek at the stack's - # current package to figure out if it is the request thread. - # Even though the dev server is single-threaded, - # sys._current_frames will return multiple threads, because - # some of them are spawned by the App Engine dev server for - # internal purposes. We don't want to sample these internal dev - # server threads -- we want to sample the thread that is - # running the current request. Since the dev server will be - # running this sampling code immediately from the run() code - # below, we can spot this thread's stack by looking at its - # global namespace (f_globals) and making sure it's currently - # in the gae_mini_profiler package. - should_sample = (stack.f_globals["__package__"] == - "gae_mini_profiler") - else: - # In production, current_request_thread_id will be set properly - # by threading.current_thread().ident. - # TODO(kamens): this profiler will need work if we ever - # actually use multiple threads in a single request and want to - # profile more than one of them. - should_sample = thread_id == self.current_request_thread_id - - if should_sample: - # Grab a sample of this thread's current stack - self.samples.append(ProfileSample(stack)) - - def run(self, fxn): - """Run function with samping profiler enabled, saving results.""" - if not hasattr(threading, "current_thread"): - # Sampling profiler is not supported in Python2.5 - logging.warn("The sampling profiler is not supported in Python2.5") - return fxn() - - # Store the thread id for the current request's thread. This lets - # the inspecting thread know which thread to inspect. - self.current_request_thread_id = threading.current_thread().ident - - # Start the thread that will be periodically inspecting the frame - # stack of this current request thread - self.inspecting_thread = InspectingThread(profile=self) - self.inspecting_thread.start() - - try: - # Run the request fxn which will be inspected by the inspecting - # thread. - return fxn() - finally: - # Stop and clear the inspecting thread - self.inspecting_thread.stop() - self.inspecting_thread = None diff --git a/src/lib/gae_mini_profiler/static/css/profiler.css b/src/lib/gae_mini_profiler/static/css/profiler.css deleted file mode 100755 index 3473e4d..0000000 --- a/src/lib/gae_mini_profiler/static/css/profiler.css +++ /dev/null @@ -1,324 +0,0 @@ -.g-m-p-corner { - padding: 4px; - cursor: pointer; - text-align: right; - - max-height: 205px; - overflow: auto; - overflow-x: hidden; - overflow-y: auto; -} - -.g-m-p-corner .ms, -.g-m-p .ms { - color: #777; -} - -.g-m-p-corner .entry { - height: auto; - margin: auto; - white-space: nowrap; -} - -.g-m-p-corner .entry.ajax { - border-top: 1px dotted #DDD; -} - -.g-m-p-corner .entry.expanded { - font-weight: bold; -} - -.g-m-p { - min-width: 410px; - padding: 9px; - padding-bottom: 18px; -} - -.g-m-p, .g-m-p-corner, .g-m-p-shared { - font-family: Helvetica, Arial, sans-serif; - font-size: 12px; - color: #444; - line-height: 18px; -} - -.g-m-p a, .g-m-p-corner a, .g-m-p-shared a { - color: #069; - text-decoration: none; -} - -.g-m-p a.uses_script, .g-m-p-corner a.uses_script, .g-m-p-shared a.uses_script { - border-bottom: dotted 1px #069 -} - -.g-m-p, .g-m-p-corner { - position: absolute; - left: 9px; - top: 0; - - background: #F7F7F7; - - border-top: 0; - border-left: 1px solid #CCC; - border-bottom: 1px solid #DDD; - border-right: 1px solid #DDD; - - box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.25); - -moz-box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.25); - -webkit-box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.25); - - border-bottom-left-radius: 12px; - border-bottom-right-radius: 12px; - - z-index: 99999; -} - -.g-m-p .title { - font-size: 1.25em; - text-align: left; - font-weight: bold; - border-bottom: 1px solid #CCC; - padding: 0; -} - -.g-m-p .date_and_share { - text-align: right; - margin-bottom: 18px; -} - -.g-m-p .date_and_share .date { - font-style: italic; - color: #CCC; -} - -.g-m-p .appstats-link { - float:right; -} - -.g-m-p .url { - font-size: 125%; -} - -.g-m-p .redirect { - float: left; - color: #444; -} - -.g-m-p .total { - float: right; -} - -.g-m-p .warn { - font-weight: bold; - color: red; -} - -.g-m-p .summary { - float: right; - display: inline; -} - -.g-m-p .details { - max-width: 800px; - max-height: 500px; - margin-bottom: 18px; - overflow: auto; -} - -.g-m-p .expand.expanded { - border-bottom: 1px solid #DDD; -} - -.g-m-p .expand.expanded a { - text-decoration: none; - border-bottom: 0; - font-weight: bold; -} - -.g-m-p .details table { - margin-top: 6px; - border-spacing: 0; - font-size: 12px; -} - -.g-m-p .details table tr:nth-child(odd) { - background-color: #FFF; -} - -.g-m-p .details table tr:nth-child(even) { - background-color: #F7F7F7; -} - -.g-m-p .details table .callers-label { - color: #CCC; -} - -.g-m-p .details th, .g-m-p .details th.header { - font-size: 12px; - background-color: #F7F7F7; - font-weight: bold; - cursor: pointer; - float: none; - position: static; -} - -.g-m-p .details th.headerSortUp, -.g-m-p .details th.headerSortDown { - text-decoration: underline; -} - -.g-m-p .details th, -.g-m-p .details td { - padding-right: 18px; - vertical-align: top; -} - -.g-m-p .details .left { - text-align: left; - float: none; -} - -.g-m-p .details .right { - text-align: right; - float: none; -} - -.g-m-p-shared .shared-teaser-container { - text-align: center; -} - -.g-m-p-shared .shared-teaser { - margin-left: auto; - margin-right: auto; - height: 400px; - width: 600px; - text-align: center; - padding: 200px 0; - font-size: 24px; - font-weight: bold; -} - -.g-m-p span.loglevel { - display: inline-block; - height: 1.2em; - width: 1.2em; - line-height: 1.2; - text-align: center; - text-transform: capitalize; - font-weight: bold; - border-radius: 2px; -} - -.g-m-p span.loglevel span { - display: none; -} - -.g-m-p .boxes li { - display: inline; - margin: 0 10px 0 0; -} - -.g-m-p div.simple-timing { - margin-top: 18px; - text-align: center; -} - -.g-m-p .summary span.loglevel { - margin: 0 .1em 0 .6em; -} -.g-m-p span.loglevel.ll50:before { content: 'C'; } -.g-m-p span.loglevel.ll40:before { content: 'E'; } -.g-m-p span.loglevel.ll30:before { content: 'W'; } -.g-m-p span.loglevel.ll20:before { content: 'I'; } -.g-m-p span.loglevel.ll10:before { content: 'D'; } - -.g-m-p .ll50 { background-color: #F22; } -.g-m-p .ll40 { background-color: #F90; } -.g-m-p .ll30 { background-color: #Fd0; } -.g-m-p .ll20 { background-color: #3C0; } -.g-m-p .ll10 { background-color: #09F; } - -#slider { - padding: 5px 0 3px 0; -} - -#slider .container { - width: 150px; - display: inline-block; -} - -#slider .minlevel-text { - display: inline-block; - width: 4em; -} - -#slider .loglevel { - margin-left: .5em; -} - -#slider .ui-slider-horizontal { - /*height: .2em;*/ -} - -/* Borrowed from those smart guys @ Fog Creek. Props to Justin & Bobby */ -.g-m-p .fancy-scrollbar::-webkit-scrollbar { - height: 8px; - width: 8px; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-button:start:decrement, -.g-m-p .fancy-scrollbar::-webkit-scrollbar-button:end:increment { - background: transparent; - display: none; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-track-piece { - background: transparent; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-track-piece:vertical:start { - -webkit-border-top-left-radius: 4px; - -webkit-border-top-right-radius: 4px; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-track-piece:vertical:end { - -webkit-border-bottom-left-radius: 4px; - -webkit-border-bottom-right-radius: 4px; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-track-piece:horizontal:start { - -webkit-border-top-left-radius: 4px; - -webkit-border-bottom-left-radius: 4px; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-track-piece:horizontal:end { - -webkit-border-top-right-radius: 4px; - -webkit-border-bottom-right-radius: 4px; -} -.g-m-p .fancy-scrollbar::-webkit-scrollbar-thumb:vertical, -.g-m-p .fancy-scrollbar::-webkit-scrollbar-thumb:horizontal { - background: #ccc; - border: 1px solid #aaa; - border-radius: 4px; - display: block; - height: 50px; -} - -.g-m-p .settings { - background: #E6E6E6; - padding: 9px; - - border: 1px solid #CCC; - border-top: 0; - - border-bottom-left-radius: 4px; - border-bottom-right-radius: 4px; -} - -.g-m-p .settings table { - width: 100%; - padding: 0; -} -.g-m-p .settings table th, -.g-m-p .settings table td { - max-width: 200px; - text-align: left; - vertical-align: top; - width: 50%; -} -.g-m-p .settings table tr.tips td { - font-style: italic; -} diff --git a/src/lib/gae_mini_profiler/static/js/profiler.js b/src/lib/gae_mini_profiler/static/js/profiler.js deleted file mode 100755 index 061d997..0000000 --- a/src/lib/gae_mini_profiler/static/js/profiler.js +++ /dev/null @@ -1,475 +0,0 @@ - -var GaeMiniProfiler = { - - // Profiler modes match gae_mini_profiler.profiler.Mode's enum. - // TODO(kamens): switch this from an enum to a more sensible bitmask or - // other alternative that supports multiple settings without an exploding - // number of enums. - modes: { - SIMPLE: "simple", - CPU_INSTRUMENTED: "instrumented", - CPU_SAMPLING: "sampling", - RPC_ONLY: "rpc", - RPC_AND_CPU_INSTRUMENTED: "rpc_instrumented", - RPC_AND_CPU_SAMPLING: "rpc_sampling" - }, - - init: function(requestId, fShowImmediately) { - // Fetch profile results for any ajax calls - // (see http://code.google.com/p/mvc-mini-profiler/source/browse/MvcMiniProfiler/UI/Includes.js) - $(document).ajaxComplete(function (e, xhr, settings) { - if (xhr) { - var requestId = xhr.getResponseHeader('X-MiniProfiler-Id'); - if (requestId) { - var queryString = xhr.getResponseHeader('X-MiniProfiler-QS'); - GaeMiniProfiler.fetch(requestId, queryString); - } - } - }); - - GaeMiniProfiler.fetch(requestId, window.location.search, fShowImmediately); - }, - - /** - * Return the profiler mode being requested by the current browser cookie. - */ - getCookieMode: function() { - var mode = $.cookiePlugin("g-m-p-mode"); - - // Default to RPC + Instrumented - var valid = false; - for (var key in this.modes) { - if (mode == this.modes[key]) { - valid = true; - break; - } - } - if (!valid) { - mode = this.modes.RPC_AND_CPU_INSTRUMENTED; - } - - return mode; - }, - - /** - * Set browser cookie for current profiler mode according to radio inputs. - */ - setCookieMode: function(elLink) { - // Get current state of RPC/CPU profiler settings according to radios - var jel = $(elLink).closest(".g-m-p").find(".settings"), - rpcValue = jel.find("input:radio[name=rpc]:checked").val(), - cpuValue = jel.find("input:radio[name=cpu]:checked").val(); - - // Convert to format matching this.modes values. - var mode = "simple"; - if (rpcValue && cpuValue) { - mode = rpcValue + "_" + cpuValue; - } - else { - mode = rpcValue || cpuValue || "simple"; - } - - // Set mode cookie for profiler to detect on next request - $.cookiePlugin("g-m-p-mode", mode, {path: '/', expires: 365}); - }, - - /** - * True if profiler mode has enabled RPC profiling - */ - isRpcEnabled: function(mode) { - return (mode == this.modes.RPC_ONLY || - mode == this.modes.RPC_AND_CPU_INSTRUMENTED || - mode == this.modes.RPC_AND_CPU_SAMPLING); - }, - - /** - * True if profiler mode has enabled CPU instrumentation - */ - isInstrumentedEnabled: function(mode) { - return (mode == this.modes.CPU_INSTRUMENTED || - mode == this.modes.RPC_AND_CPU_INSTRUMENTED); - }, - - /** - * True if profiler mode has enabled CPU sampling - */ - isSamplingEnabled: function(mode) { - return (mode == this.modes.CPU_SAMPLING || - mode == this.modes.RPC_AND_CPU_SAMPLING); - }, - - /** - * True if either CPU instrumentation or CPU sampling is enabled - */ - isCpuEnabled: function(mode) { - return (GaeMiniProfiler.isInstrumentedEnabled(mode) || - GaeMiniProfiler.isSamplingEnabled(mode)); - }, - - appendRedirectIds: function(requestId, queryString) { - if (queryString) { - var re = /mp-r-id=([^&]+)/; - var matches = re.exec(queryString); - if (matches && matches.length) { - var sRedirectIds = matches[1]; - var list = sRedirectIds.split(","); - list[list.length] = requestId; - return list; - } - } - - return [requestId]; - }, - - fetch: function(requestId, queryString, fShowImmediately) { - var requestIds = this.appendRedirectIds(requestId, queryString); - - $.get( - "/gae_mini_profiler/request", - { "request_ids": requestIds.join(",") }, - function(data) { - GaeMiniProfilerTemplate.init(function() { GaeMiniProfiler.finishFetch(data, fShowImmediately); }); - }, - "json" - ); - }, - - finishFetch: function(data, fShowImmediately) { - if (!data || !data.length) return; - - for (var ix = 0; ix < data.length; ix++) { - - var jCorner = this.renderCorner(data[ix]); - - if (!jCorner.data("attached")) { - $('body') - .append(jCorner) - .click(function(e) { return GaeMiniProfiler.collapse(e); }); - jCorner - .data("attached", true); - } - - if (fShowImmediately) - jCorner.find(".entry").first().click(); - - } - }, - - /** - * Fetch the RequestLog data (pending_ms and loading_request) from App - * Engine's logservice API. - */ - fetchRequestLog: function(data, attempts) { - - // We're willing to ask the logservice API for its RequestLog data more - // than once, because it may take App Engine a while to flush its logs. - if (!attempts) { - attempts = 0; - } - - // We only try to get the request log three times. - if (attempts > 2) { - $(".g-m-p .request-log-" + data.logging_request_id) - .html("Request log data not found."); - return; - } - - $.get( - "/gae_mini_profiler/request/log", - { - "request_id": data.request_id, - "logging_request_id": data.logging_request_id - }, - function(requestLogData) { - - if (!requestLogData) { - // The request log may not be available just yet, because - // App Engine may still be writing its logs. We'll wait a - // sec and try up to three times. - setTimeout(function() { - GaeMiniProfiler.fetchRequestLog(data, attempts + 1); - }, 1000); - return; - } - - requestLogData.request_id = data.request_id; - GaeMiniProfiler.finishFetchRequestLog(requestLogData); - } - ); - }, - - /** - * Render the RequestLog information (pending_ms and loading_request). - */ - finishFetchRequestLog: function(requestLogData) { - $(".g-m-p .request-log-" + requestLogData.logging_request_id) - .empty() - .append( - $("#profilerRequestLogTemplate").tmplPlugin( - requestLogData)); - }, - - collapse: function(e) { - if ($(".g-m-p").is(":visible")) { - $(".g-m-p").slideUp("fast"); - $(".g-m-p-corner").slideDown("fast") - .find(".expanded").removeClass("expanded"); - return false; - } - - return true; - }, - - expand: function(elEntry, data) { - var jPopup = $(".g-m-p"); - - if (jPopup.length) - jPopup.remove(); - else - $(document).keyup(function(e) { if (e.which == 27) GaeMiniProfiler.collapse() }); - - jPopup = this.renderPopup(data); - $('body').append(jPopup); - - var jCorner = $(".g-m-p-corner"); - jCorner.find(".expanded").removeClass("expanded"); - $(elEntry).addClass("expanded"); - - jPopup - .find(".profile-link") - .click(function() { GaeMiniProfiler.toggleSection(this, ".profiler-details"); return false; }).end() - .find(".rpc-link") - .click(function() { GaeMiniProfiler.toggleSection(this, ".rpc-details"); return false; }).end() - .find(".logs-link") - .click(function() { GaeMiniProfiler.toggleSection(this, ".logs-details"); return false; }).end() - .find(".callers-link") - .click(function() { $(this).parents("td").find(".callers").slideToggle("fast"); return false; }).end() - .find(".request-log-link") - .click(function() { GaeMiniProfiler.showRequestLog(this); return false; }).end() - .find(".settings-link") - .click(function() { GaeMiniProfiler.toggleSettings(this); return false; }).end() - .find(".settings input") - .change(function() { GaeMiniProfiler.setCookieMode(this); return false; }).end() - .click(function(e) { e.stopPropagation(); }) - .css("left", jCorner.offset().left + jCorner.width() + 18) - .slideDown("fast"); - - var toggleLogRows = function(level) { - var names = {10:'Debug', 20:'Info', 30:'Warning', 40:'Error', 50:'Critical'}; - $('#slider .minlevel-text').text(names[level]); - $('#slider .loglevel').attr('class', 'loglevel ll'+level); - for (var i = 10; i<=50; i += 10) { - var rows = $('tr.ll'+i); - if (i)[^>]*$|\{\{\! /,b={},f={},e,p={key:0,data:{}},i=0,c=0,l=[];function g(g,d,h,e){var c={data:e||(e===0||e===false)?e:d?d.data:{},_wrap:d?d._wrap:null,tmplPlugin:null,parent:d||null,nodes:[],calls:u,nest:w,wrap:x,html:v,update:t};g&&a.extend(c,g,{nodes:[],parent:d});if(h){c.tmplPlugin=h;c._ctnt=c._ctnt||c.tmplPlugin(a,c);c.key=++i;(l.length?f:b)[i]=c}return c}a.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(f,d){a.fn[f]=function(n){var g=[],i=a(n),k,h,m,l,j=this.length===1&&this[0].parentNode;e=b||{};if(j&&j.nodeType===11&&j.childNodes.length===1&&i.length===1){i[d](this[0]);g=this}else{for(h=0,m=i.length;h0?this.clone(true):this).get();a(i[h])[d](k);g=g.concat(k)}c=0;g=this.pushStack(g,f,i.selector)}l=e;e=null;a.tmplPlugin.complete(l);return g}});a.fn.extend({tmplPlugin:function(d,c,b){return a.tmplPlugin(this[0],d,c,b)},tmplItem:function(){return a.tmplItem(this[0])},template:function(b){return a.template(b,this[0])},domManip:function(d,m,k){if(d[0]&&a.isArray(d[0])){var g=a.makeArray(arguments),h=d[0],j=h.length,i=0,f;while(i").join(">").split('"').join(""").split("'").join("'")}});a.extend(a.tmplPlugin,{tag:{tmplPlugin:{_default:{$2:"null"},open:"if($notnull_1){__=__.concat($item.nest($1,$2));}"},wrap:{_default:{$2:"null"},open:"$item.calls(__,$1,$2);__=[];",close:"call=$item.calls();__=call._.concat($item.wrap(call,__));"},each:{_default:{$2:"$index, $value"},open:"if($notnull_1){$.each($1a,function($2){with(this){",close:"}});}"},"if":{open:"if(($notnull_1) && $1a){",close:"}"},"else":{_default:{$1:"true"},open:"}else if(($notnull_1) && $1a){"},html:{open:"if($notnull_1){__.push($1a);}"},"=":{_default:{$1:"$data"},open:"if($notnull_1){__.push($.encode($1a));}"},"!":{open:""}},complete:function(){b={}},afterManip:function(f,b,d){var e=b.nodeType===11?a.makeArray(b.childNodes):b.nodeType===1?[b]:[];d.call(f,b);m(e);c++}});function j(e,g,f){var b,c=f?a.map(f,function(a){return typeof a==="string"?e.key?a.replace(/(<\w+)(?=[\s>])(?![^>]*_tmplitem)([^>]*)/g,"$1 "+d+'="'+e.key+'" $2'):a:j(a,e,a._ctnt)}):e;if(g)return c;c=c.join("");c.replace(/^\s*([^<\s][^<]*)?(<[\w\W]+>)([^>]*[^>\s])?\s*$/,function(f,c,e,d){b=a(e).get();m(b);if(c)b=k(c).concat(b);if(d)b=b.concat(k(d))});return b?b:k(c)}function k(c){var b=document.createElement("div");b.innerHTML=c;return a.makeArray(b.childNodes)}function o(b){return new Function("jQuery","$item","var $=jQuery,call,__=[],$data=$item.data;with($data){__.push('"+a.trim(b).replace(/([\\'])/g,"\\$1").replace(/[\r\t\n]/g," ").replace(/\$\{([^\}]*)\}/g,"{{= $1}}").replace(/\{\{(\/?)(\w+|.)(?:\(((?:[^\}]|\}(?!\}))*?)?\))?(?:\s+(.*?)?)?(\(((?:[^\}]|\}(?!\}))*?)\))?\s*\}\}/g,function(m,l,k,g,b,c,d){var j=a.tmplPlugin.tag[k],i,e,f;if(!j)throw"Unknown template tag: "+k;i=j._default||[];if(c&&!/\w$/.test(b)){b+=c;c=""}if(b){b=h(b);d=d?","+h(d)+")":c?")":"";e=c?b.indexOf(".")>-1?b+h(c):"("+b+").call($item"+d:b;f=c?e:"(typeof("+b+")==='function'?("+b+").call($item):("+b+"))"}else f=e=i.$1||"null";g=h(g);return"');"+j[l?"close":"open"].split("$notnull_1").join(b?"typeof("+b+")!=='undefined' && ("+b+")!=null":"true").split("$1a").join(f).split("$1").join(e).split("$2").join(g||i.$2||"")+"__.push('"})+"');}return __;")}function n(c,b){c._wrap=j(c,true,a.isArray(b)?b:[q.test(b)?b:a(b).html()]).join("")}function h(a){return a?a.replace(/\\'/g,"'").replace(/\\\\/g,"\\"):null}function s(b){var a=document.createElement("div");a.appendChild(b.cloneNode(true));return a.innerHTML}function m(o){var n="_"+c,k,j,l={},e,p,h;for(e=0,p=o.length;e=0;h--)m(j[h]);m(k)}function m(j){var p,h=j,k,e,m;if(m=j.getAttribute(d)){while(h.parentNode&&(h=h.parentNode).nodeType===1&&!(p=h.getAttribute(d)));if(p!==m){h=h.parentNode?h.nodeType===11?0:h.getAttribute(d)||0:0;if(!(e=b[m])){e=f[m];e=g(e,b[h]||f[h]);e.key=++i;b[i]=e}c&&o(m)}j.removeAttribute(d)}else if(c&&(e=a.data(j,"tmplItem"))){o(e.key);b[e.key]=e;h=a.data(j.parentNode,"tmplItem");h=h?h.key:0}if(e){k=e;while(k&&k.key!=h){k.nodes.push(j);k=k.parent}delete e._ctnt;delete e._wrap;a.data(j,"tmplItem",e)}function o(a){a=a+n;e=l[a]=l[a]||g(e,b[e.parent.key+n]||e.parent)}}}function u(a,d,c,b){if(!a)return l.pop();l.push({_:a,tmplPlugin:d,item:this,data:c,options:b})}function w(d,c,b){return a.tmplPlugin(a.template(d),c,b,this)}function x(b,d){var c=b.options||{};c.wrapped=d;return a.tmplPlugin(a.template(b.tmplPlugin),b.data,c,b.item)}function v(d,c){var b=this._wrap;return a.map(a(a.isArray(b)?b.join(""):b).filter(d||"*"),function(a){return c?a.innerText||a.textContent:a.outerHTML||s(a)})}function t(){var b=this.nodes;a.tmplPlugin(null,null,null,this).insertBefore(b[0]);a(b).remove()}})(jQuery); - -/* - * jQuery TableSorter plugin - * http://autobahn.tablesorter.com/jquery.tablesorter.min.js - */ - -(function($){$.extend({tablesorter:new -function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",cssChildRow:"expand-child",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,sortLocaleCompare:true,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'/\.|\,/g',onRenderHeader:null,selectorHeaders:'thead th',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}if(table.tBodies.length==0)return;var rows=table.tBodies[0].rows;if(rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function checkHeaderOptionsSortingLocked(table,i){if((table.config.headers[i])&&(table.config.headers[i].lockedOrder))return table.config.headers[i].lockedOrder;return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i b["+i+"]) ? 1 : 0));";};function makeSortTextDesc(i){return"((b["+i+"] < a["+i+"]) ? -1 : ((b["+i+"] > a["+i+"]) ? 1 : 0));";};function makeSortNumeric(i){return"a["+i+"]-b["+i+"];";};function makeSortNumericDesc(i){return"b["+i+"]-a["+i+"];";};function sortText(a,b){if(table.config.sortLocaleCompare)return a.localeCompare(b);return((ab)?1:0));};function sortTextDesc(a,b){if(table.config.sortLocaleCompare)return b.localeCompare(a);return((ba)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$.data(this,"tablesorter",config);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){$this.trigger("sortStart");var $cell=$(this);var i=this.column;this.order=this.count++%2;if(this.lockedOrder)this.order=this.lockedOrder;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i 1 && String(value) !== "[object Object]") { - options = jQuery.extend({}, options); - - if (value === null || value === undefined) { - options.expires = -1; - } - - if (typeof options.expires === 'number') { - var days = options.expires, t = options.expires = new Date(); - t.setDate(t.getDate() + days); - } - - value = String(value); - - return (document.cookie = [ - encodeURIComponent(key), '=', - options.raw ? value : encodeURIComponent(value), - options.expires ? '; expires=' + options.expires.toUTCString() : '', // use expires attribute, max-age is not supported by IE - options.path ? '; path=' + options.path : '', - options.domain ? '; domain=' + options.domain : '', - options.secure ? '; secure' : '' - ].join('')); - } - - // key and possibly options given, get cookie... - options = value || {}; - var result, decode = options.raw ? function (s) { return s; } : decodeURIComponent; - return (result = new RegExp('(?:^|; )' + encodeURIComponent(key) + '=([^;]*)').exec(document.cookie)) ? decode(result[1]) : null; -}; diff --git a/src/lib/gae_mini_profiler/static/js/template.tmpl b/src/lib/gae_mini_profiler/static/js/template.tmpl deleted file mode 100755 index ad54c69..0000000 --- a/src/lib/gae_mini_profiler/static/js/template.tmpl +++ /dev/null @@ -1,313 +0,0 @@ - - - - - - - diff --git a/src/lib/gae_mini_profiler/templates/includes.html b/src/lib/gae_mini_profiler/templates/includes.html deleted file mode 100755 index e161a97..0000000 --- a/src/lib/gae_mini_profiler/templates/includes.html +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/src/lib/gae_mini_profiler/templates/shared.html b/src/lib/gae_mini_profiler/templates/shared.html deleted file mode 100755 index 9793d86..0000000 --- a/src/lib/gae_mini_profiler/templates/shared.html +++ /dev/null @@ -1,13 +0,0 @@ - - - - - -
-
- Shared profiler results -
-
- {{profiler_includes}} - - diff --git a/src/lib/gae_mini_profiler/templatetags.py b/src/lib/gae_mini_profiler/templatetags.py deleted file mode 100755 index 02e9ec3..0000000 --- a/src/lib/gae_mini_profiler/templatetags.py +++ /dev/null @@ -1,25 +0,0 @@ -# use json in Python 2.7, fallback to simplejson for Python 2.5 -try: - import json -except ImportError: - import simplejson as json - -import profiler - - -def profiler_includes_request_id(request_id, show_immediately=False): - if not request_id: - return "" - - js_path = "/gae_mini_profiler/static/js/profiler.js" - css_path = "/gae_mini_profiler/static/css/profiler.css" - - return """ - - - - """ % (css_path, js_path, request_id, json.dumps(show_immediately)) - - -def profiler_includes(): - return profiler_includes_request_id(profiler.CurrentRequestId.get()) diff --git a/src/lib/gae_mini_profiler/unformatter/__init__.py b/src/lib/gae_mini_profiler/unformatter/__init__.py deleted file mode 100755 index a3f4ae6..0000000 --- a/src/lib/gae_mini_profiler/unformatter/__init__.py +++ /dev/null @@ -1,112 +0,0 @@ -import pprint -import re -import sys - - -_STRING = re.compile(r"^\s*(['\"])") -_NUMBER = re.compile(r"^\s*(\d+L?)\s*") -_BOOLEAN = re.compile(r"^\s*(True|False)\s*") -_DETAILS_OMMITTED = re.compile(r"^\s*\.\.\.\s*") -_LIST = re.compile(r"^\s*\[") -_LIST_SEPARATOR = re.compile(r"^\s*,\s*") -_LIST_END = re.compile(r"^\s*]\s*") -_DICT = re.compile(r"^\s*([\w-]+)<") -_DICT_FIELD = re.compile(r"^\s*([\w-]+)\s*=\s*") -_DICT_SEPARATOR = _LIST_SEPARATOR -_DICT_END = re.compile(r"^\s*>\s*") - - -def _consume_re(re, text): - m = re.match(text) - if not m: - return None, text - return m, text[m.end(0):] - - -def _parse_string(text, quote_char): - end = text.index(quote_char) - while text[end-1] == "\\": - end = text.index(quote_char, end+1) - return text[:end].decode('string_escape', 'ignore'), text[end+1:] - - -def _parse_list(text): - result = [] - while True: - m, text = _consume_re(_LIST_END, text) - if m: - return result, text - element, text = _parse(text) - result.append(element) - _, text = _consume_re(_LIST_SEPARATOR, text) - - -def _parse_dict(text, name): - args = [] - kwargs = {} - while True: - m, text = _consume_re(_DICT_END, text) - if m: - if args and kwargs: - obj = { 'args': args } - obj.update(kwargs) - elif args: - obj = args if len(args) > 1 else args[0] - elif kwargs: - obj = kwargs - else: - obj = None - return {name: obj}, text - m, text = _consume_re(_DICT_SEPARATOR, text) - if m: - continue - m, text = _consume_re(_DICT_FIELD, text) - if m: - element, text = _parse(text) - kwargs[ m.group(1).strip("_") ] = element - continue - element, text = _parse(text) - args.append(element) - - -def _parse(text): - m, text = _consume_re(_STRING, text) - if m: - return _parse_string(text, m.group(1)) - m, text = _consume_re(_NUMBER, text) - if m: - value = long(m.group(1)[:-1]) if m.group(1).endswith("L") else int(m.group(1)) - return value, text - m, text = _consume_re(_BOOLEAN, text) - if m: - return m.group(1) == "True", text - m, text = _consume_re(_DETAILS_OMMITTED, text) - if m: - return "...", text - m, text = _consume_re(_LIST, text) - if m: - return _parse_list(text) - m, text = _consume_re(_DICT, text) - if m: - return _parse_dict(text, m.group(1)) - raise ValueError(text) - - -def unformat(text): - result, remainder = _parse(text) - assert remainder == "" - return result - - -def main(): - from io import StringIO - f = open('examples.txt', 'r') - for line in f: - result = unformat(line.strip()) - pprint.pprint(result) - - raw_input('cont?') - f.close() - -if __name__ == '__main__': - main() diff --git a/src/lib/gae_mini_profiler/unformatter/examples.txt b/src/lib/gae_mini_profiler/unformatter/examples.txt deleted file mode 100755 index 7cd39fc..0000000 --- a/src/lib/gae_mini_profiler/unformatter/examples.txt +++ /dev/null @@ -1,16 +0,0 @@ -MemcacheGetRequest> -MemcacheGetResponse -MemcacheGetRequest, name_space_='764.1'> -MemcacheGetResponse]> -MemcacheGetRequest, name_space_='764.1'> -MemcacheGetResponseThank You Khan Academy!q\tU\x03keyq\ncgoogle.appengine.api.datastore_types\nKey\nq\x0b)\x81q\x0c}q\r(U\x0f_Key__referenceq\x0e(cgoogle.appengine.datastore.entity_pb...Interior Angles of Any Polygonr`\x02\x00\x00h\nh\x0b)\x81ra\x02\x00\x00}rb\x02\x00\x00(h\x0e(h\x0forc\x02\x00\x00U"j\x10dev~khan-academyr\x0e\x0b\x12\x05Video\x18\xed\xbc\xc5v\x0cbh\x11Nubh\x12X\x0b\x00\x00\x00ilsS0IDaQ5Ard\x02\x00\x00h\x14X3\x00\x00\x00http://img.youtube.com/vi/ilsS0IDaQ5A/hqdefault.jpgre\x02\x00\x00h\x16h\x17h\x18XD\x00\x00\x00Showing a generalized way to find the sum of the interior an…rf\x02\x00\x00uae.'>]> -MemcacheGetRequest, name_space_='764.1'> -MemcacheGetResponseSurveys and Samples\n\n
\n \n
\n \n \n \n \n \n'>]> -Query, stringvalue_='http://googleid.khanacademy.org/185804764220139124118'>>]>], has_app_=1, has_compile_=1, has_count_=1, has_kind_=1, has_limit_=1, kind_='UserData', lazy_init_lock_=LockType, limit_=1, order_=[Query_Order]> -QueryResult>>], key_=Reference]>>, lazy_init_lock_=LockType, start_inclusive_=False>]>, compiled_query_=CompiledQuery, ...>, ...> -MemcacheGetRequest, name_space_='764.1'> -MemcacheGetResponse]> -MemcacheGetRequest, name_space_='764.1'> -MemcacheGetResponse]> -PutRequest, key=Reference]>>, property=[Property>], raw_property=[Property>>]>]> -MemcacheGetResponse^F\x8c8t\x88>S\naR\x99x\x05\xf0\xcf?\xb4\x17\xe0\xcc\xbf\x01\xfa$l7\x88\x86q\x03cdatetime\ndatetime\nq\x04U\n\x07\xdd\x01\x1e\x13\x14#\x05h\xce\x85Rq\x05M\xe7\x17G?\xd4\xb7\x81\x00\x00\x00\x00tb.', ...>, ...]> diff --git a/src/lib/gae_mini_profiler/util.py b/src/lib/gae_mini_profiler/util.py deleted file mode 100755 index f18369a..0000000 --- a/src/lib/gae_mini_profiler/util.py +++ /dev/null @@ -1,17 +0,0 @@ -def seconds_fmt(f, n=0): - return milliseconds_fmt(f * 1000, n) - -def milliseconds_fmt(f, n=0): - return decimal_fmt(f, n) - -def decimal_fmt(f, n=0): - format = "%." + str(n) + "f" - return format % f - -def short_method_fmt(s): - return s[s.rfind("/") + 1:] - -def short_rpc_file_fmt(s): - if not s: - return "" - return s[s.find("/"):] diff --git a/src/lib/werkzeug/__init__.py b/src/lib/werkzeug/__init__.py deleted file mode 100644 index edccb26..0000000 --- a/src/lib/werkzeug/__init__.py +++ /dev/null @@ -1,149 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug - ~~~~~~~~ - - Werkzeug is the Swiss Army knife of Python web development. - - It provides useful classes and functions for any WSGI application to make - the life of a python web developer much easier. All of the provided - classes are independent from each other so you can mix it with any other - library. - - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from types import ModuleType -import sys - - -# the version. Usually set automatically by a script. -__version__ = '0.8.3' - - -# This import magic raises concerns quite often which is why the implementation -# and motivation is explained here in detail now. -# -# The majority of the functions and classes provided by Werkzeug work on the -# HTTP and WSGI layer. There is no useful grouping for those which is why -# they are all importable from "werkzeug" instead of the modules where they are -# implemented. The downside of that is, that now everything would be loaded at -# once, even if unused. -# -# The implementation of a lazy-loading module in this file replaces the -# werkzeug package when imported from within. Attribute access to the werkzeug -# module will then lazily import from the modules that implement the objects. - - -# import mapping to objects in other modules -all_by_module = { - 'werkzeug.debug': ['DebuggedApplication'], - 'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', - 'LocalStack', 'release_local'], - 'werkzeug.templates': ['Template'], - 'werkzeug.serving': ['run_simple'], - 'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ', - 'run_wsgi_app'], - 'werkzeug.testapp': ['test_app'], - 'werkzeug.exceptions': ['abort', 'Aborter'], - 'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote', - 'url_quote_plus', 'url_unquote', - 'url_unquote_plus', 'url_fix', 'Href', - 'iri_to_uri', 'uri_to_iri'], - 'werkzeug.formparser': ['parse_form_data'], - 'werkzeug.utils': ['escape', 'environ_property', - 'append_slash_redirect', 'redirect', - 'cached_property', 'import_string', - 'dump_cookie', 'parse_cookie', 'unescape', - 'format_string', 'find_modules', 'header_property', - 'html', 'xhtml', 'HTMLBuilder', - 'validate_arguments', 'ArgumentValidationError', - 'bind_arguments', 'secure_filename'], - 'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info', - 'peek_path_info', 'SharedDataMiddleware', - 'DispatcherMiddleware', 'ClosingIterator', - 'FileWrapper', 'make_line_iter', 'LimitedStream', - 'responder', 'wrap_file', 'extract_path_info'], - 'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers', - 'EnvironHeaders', 'ImmutableList', - 'ImmutableDict', 'ImmutableMultiDict', - 'TypeConversionDict', 'ImmutableTypeConversionDict', - 'Accept', 'MIMEAccept', 'CharsetAccept', - 'LanguageAccept', 'RequestCacheControl', - 'ResponseCacheControl', 'ETags', 'HeaderSet', - 'WWWAuthenticate', 'Authorization', - 'FileMultiDict', 'CallbackDict', 'FileStorage', - 'OrderedMultiDict', 'ImmutableOrderedMultiDict'], - 'werkzeug.useragents': ['UserAgent'], - 'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', - 'cookie_date', 'parse_cache_control_header', - 'is_resource_modified', 'parse_accept_header', - 'parse_set_header', 'quote_etag', 'unquote_etag', - 'generate_etag', 'dump_header', - 'parse_list_header', 'parse_dict_header', - 'parse_authorization_header', - 'parse_www_authenticate_header', - 'remove_entity_headers', 'is_entity_header', - 'remove_hop_by_hop_headers', 'parse_options_header', - 'dump_options_header', 'is_hop_by_hop_header', - 'unquote_header_value', - 'quote_header_value', 'HTTP_STATUS_CODES'], - 'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', - 'Response', 'AcceptMixin', 'ETagRequestMixin', - 'ETagResponseMixin', 'ResponseStreamMixin', - 'CommonResponseDescriptorsMixin', - 'UserAgentMixin', 'AuthorizationMixin', - 'WWWAuthenticateMixin', - 'CommonRequestDescriptorsMixin'], - 'werkzeug.security': ['generate_password_hash', 'check_password_hash'], - # the undocumented easteregg ;-) - 'werkzeug._internal': ['_easteregg'] -} - -# modules that should be imported when accessed as attributes of werkzeug -attribute_modules = frozenset(['exceptions', 'routing', 'script']) - - -object_origins = {} -for module, items in all_by_module.iteritems(): - for item in items: - object_origins[item] = module - - -class module(ModuleType): - """Automatically import objects from the modules.""" - - def __getattr__(self, name): - if name in object_origins: - module = __import__(object_origins[name], None, None, [name]) - for extra_name in all_by_module[module.__name__]: - setattr(self, extra_name, getattr(module, extra_name)) - return getattr(module, name) - elif name in attribute_modules: - __import__('werkzeug.' + name) - return ModuleType.__getattribute__(self, name) - - def __dir__(self): - """Just show what we want to show.""" - result = list(new_module.__all__) - result.extend(('__file__', '__path__', '__doc__', '__all__', - '__docformat__', '__name__', '__path__', - '__package__', '__version__')) - return result - -# keep a reference to this module so that it's not garbage collected -old_module = sys.modules['werkzeug'] - - -# setup the new module and patch it into the dict of loaded modules -new_module = sys.modules['werkzeug'] = module('werkzeug') -new_module.__dict__.update({ - '__file__': __file__, - '__package__': 'werkzeug', - '__path__': __path__, - '__doc__': __doc__, - '__version__': __version__, - '__all__': tuple(object_origins) + tuple(attribute_modules), - '__docformat__': 'restructuredtext en' -}) diff --git a/src/lib/werkzeug/_internal.py b/src/lib/werkzeug/_internal.py deleted file mode 100644 index f148f14..0000000 --- a/src/lib/werkzeug/_internal.py +++ /dev/null @@ -1,405 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug._internal - ~~~~~~~~~~~~~~~~~~ - - This module provides internally used helpers and constants. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import inspect -from weakref import WeakKeyDictionary -from cStringIO import StringIO -from Cookie import SimpleCookie, Morsel, CookieError -from time import gmtime -from datetime import datetime, date - - -_logger = None -_empty_stream = StringIO('') -_signature_cache = WeakKeyDictionary() -_epoch_ord = date(1970, 1, 1).toordinal() - - -HTTP_STATUS_CODES = { - 100: 'Continue', - 101: 'Switching Protocols', - 102: 'Processing', - 200: 'OK', - 201: 'Created', - 202: 'Accepted', - 203: 'Non Authoritative Information', - 204: 'No Content', - 205: 'Reset Content', - 206: 'Partial Content', - 207: 'Multi Status', - 226: 'IM Used', # see RFC 3229 - 300: 'Multiple Choices', - 301: 'Moved Permanently', - 302: 'Found', - 303: 'See Other', - 304: 'Not Modified', - 305: 'Use Proxy', - 307: 'Temporary Redirect', - 400: 'Bad Request', - 401: 'Unauthorized', - 402: 'Payment Required', # unused - 403: 'Forbidden', - 404: 'Not Found', - 405: 'Method Not Allowed', - 406: 'Not Acceptable', - 407: 'Proxy Authentication Required', - 408: 'Request Timeout', - 409: 'Conflict', - 410: 'Gone', - 411: 'Length Required', - 412: 'Precondition Failed', - 413: 'Request Entity Too Large', - 414: 'Request URI Too Long', - 415: 'Unsupported Media Type', - 416: 'Requested Range Not Satisfiable', - 417: 'Expectation Failed', - 418: 'I\'m a teapot', # see RFC 2324 - 422: 'Unprocessable Entity', - 423: 'Locked', - 424: 'Failed Dependency', - 426: 'Upgrade Required', - 449: 'Retry With', # proprietary MS extension - 500: 'Internal Server Error', - 501: 'Not Implemented', - 502: 'Bad Gateway', - 503: 'Service Unavailable', - 504: 'Gateway Timeout', - 505: 'HTTP Version Not Supported', - 507: 'Insufficient Storage', - 510: 'Not Extended' -} - - -class _Missing(object): - - def __repr__(self): - return 'no value' - - def __reduce__(self): - return '_missing' - -_missing = _Missing() - - -def _proxy_repr(cls): - def proxy_repr(self): - return '%s(%s)' % (self.__class__.__name__, cls.__repr__(self)) - return proxy_repr - - -def _get_environ(obj): - env = getattr(obj, 'environ', obj) - assert isinstance(env, dict), \ - '%r is not a WSGI environment (has to be a dict)' % type(obj).__name__ - return env - - -def _log(type, message, *args, **kwargs): - """Log into the internal werkzeug logger.""" - global _logger - if _logger is None: - import logging - _logger = logging.getLogger('werkzeug') - # Only set up a default log handler if the - # end-user application didn't set anything up. - if not logging.root.handlers and _logger.level == logging.NOTSET: - _logger.setLevel(logging.INFO) - handler = logging.StreamHandler() - _logger.addHandler(handler) - getattr(_logger, type)(message.rstrip(), *args, **kwargs) - - -def _parse_signature(func): - """Return a signature object for the function.""" - if hasattr(func, 'im_func'): - func = func.im_func - - # if we have a cached validator for this function, return it - parse = _signature_cache.get(func) - if parse is not None: - return parse - - # inspect the function signature and collect all the information - positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func) - defaults = defaults or () - arg_count = len(positional) - arguments = [] - for idx, name in enumerate(positional): - if isinstance(name, list): - raise TypeError('cannot parse functions that unpack tuples ' - 'in the function signature') - try: - default = defaults[idx - arg_count] - except IndexError: - param = (name, False, None) - else: - param = (name, True, default) - arguments.append(param) - arguments = tuple(arguments) - - def parse(args, kwargs): - new_args = [] - missing = [] - extra = {} - - # consume as many arguments as positional as possible - for idx, (name, has_default, default) in enumerate(arguments): - try: - new_args.append(args[idx]) - except IndexError: - try: - new_args.append(kwargs.pop(name)) - except KeyError: - if has_default: - new_args.append(default) - else: - missing.append(name) - else: - if name in kwargs: - extra[name] = kwargs.pop(name) - - # handle extra arguments - extra_positional = args[arg_count:] - if vararg_var is not None: - new_args.extend(extra_positional) - extra_positional = () - if kwargs and not kwarg_var is not None: - extra.update(kwargs) - kwargs = {} - - return new_args, kwargs, missing, extra, extra_positional, \ - arguments, vararg_var, kwarg_var - _signature_cache[func] = parse - return parse - - -def _patch_wrapper(old, new): - """Helper function that forwards all the function details to the - decorated function.""" - try: - new.__name__ = old.__name__ - new.__module__ = old.__module__ - new.__doc__ = old.__doc__ - new.__dict__ = old.__dict__ - except Exception: - pass - return new - - -def _decode_unicode(value, charset, errors): - """Like the regular decode function but this one raises an - `HTTPUnicodeError` if errors is `strict`.""" - fallback = None - if errors.startswith('fallback:'): - fallback = errors[9:] - errors = 'strict' - try: - return value.decode(charset, errors) - except UnicodeError, e: - if fallback is not None: - return value.decode(fallback, 'replace') - from werkzeug.exceptions import HTTPUnicodeError - raise HTTPUnicodeError(str(e)) - - -def _iter_modules(path): - """Iterate over all modules in a package.""" - import os - import pkgutil - if hasattr(pkgutil, 'iter_modules'): - for importer, modname, ispkg in pkgutil.iter_modules(path): - yield modname, ispkg - return - from inspect import getmodulename - from pydoc import ispackage - found = set() - for path in path: - for filename in os.listdir(path): - p = os.path.join(path, filename) - modname = getmodulename(filename) - if modname and modname != '__init__': - if modname not in found: - found.add(modname) - yield modname, ispackage(modname) - - -def _dump_date(d, delim): - """Used for `http_date` and `cookie_date`.""" - if d is None: - d = gmtime() - elif isinstance(d, datetime): - d = d.utctimetuple() - elif isinstance(d, (int, long, float)): - d = gmtime(d) - return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % ( - ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday], - d.tm_mday, delim, - ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', - 'Oct', 'Nov', 'Dec')[d.tm_mon - 1], - delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec - ) - - -def _date_to_unix(arg): - """Converts a timetuple, integer or datetime object into the seconds from - epoch in utc. - """ - if isinstance(arg, datetime): - arg = arg.utctimetuple() - elif isinstance(arg, (int, long, float)): - return int(arg) - year, month, day, hour, minute, second = arg[:6] - days = date(year, month, 1).toordinal() - _epoch_ord + day - 1 - hours = days * 24 + hour - minutes = hours * 60 + minute - seconds = minutes * 60 + second - return seconds - - -class _ExtendedMorsel(Morsel): - _reserved = {'httponly': 'HttpOnly'} - _reserved.update(Morsel._reserved) - - def __init__(self, name=None, value=None): - Morsel.__init__(self) - if name is not None: - self.set(name, value, value) - - def OutputString(self, attrs=None): - httponly = self.pop('httponly', False) - result = Morsel.OutputString(self, attrs).rstrip('\t ;') - if httponly: - result += '; HttpOnly' - return result - - -class _ExtendedCookie(SimpleCookie): - """Form of the base cookie that doesn't raise a `CookieError` for - malformed keys. This has the advantage that broken cookies submitted - by nonstandard browsers don't cause the cookie to be empty. - """ - - def _BaseCookie__set(self, key, real_value, coded_value): - morsel = self.get(key, _ExtendedMorsel()) - try: - morsel.set(key, real_value, coded_value) - except CookieError: - pass - dict.__setitem__(self, key, morsel) - - -class _DictAccessorProperty(object): - """Baseclass for `environ_property` and `header_property`.""" - read_only = False - - def __init__(self, name, default=None, load_func=None, dump_func=None, - read_only=None, doc=None): - self.name = name - self.default = default - self.load_func = load_func - self.dump_func = dump_func - if read_only is not None: - self.read_only = read_only - self.__doc__ = doc - - def __get__(self, obj, type=None): - if obj is None: - return self - storage = self.lookup(obj) - if self.name not in storage: - return self.default - rv = storage[self.name] - if self.load_func is not None: - try: - rv = self.load_func(rv) - except (ValueError, TypeError): - rv = self.default - return rv - - def __set__(self, obj, value): - if self.read_only: - raise AttributeError('read only property') - if self.dump_func is not None: - value = self.dump_func(value) - self.lookup(obj)[self.name] = value - - def __delete__(self, obj): - if self.read_only: - raise AttributeError('read only property') - self.lookup(obj).pop(self.name, None) - - def __repr__(self): - return '<%s %s>' % ( - self.__class__.__name__, - self.name - ) - - -def _easteregg(app): - """Like the name says. But who knows how it works?""" - gyver = '\n'.join([x + (77 - len(x)) * ' ' for x in ''' -eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m -9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz -4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY -jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc -q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5 -jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317 -8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ -v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r -XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu -LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk -iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI -tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE -1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI -GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN -Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A -QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG -8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu -jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz -DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8 -MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4 -GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i -RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi -Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c -NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS -pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ -sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm -p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ -krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/ -nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt -mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p -7f2zLkGNv8b191cD/3vs9Q833z8t'''.decode('base64').decode('zlib').splitlines()]) - def easteregged(environ, start_response): - def injecting_start_response(status, headers, exc_info=None): - headers.append(('X-Powered-By', 'Werkzeug')) - return start_response(status, headers, exc_info) - if environ.get('QUERY_STRING') != 'macgybarchakku': - return app(environ, injecting_start_response) - injecting_start_response('200 OK', [('Content-Type', 'text/html')]) - return [''' - - - -About Werkzeug - - - -

Werkzeug

-

the Swiss Army knife of Python web development.

-
%s\n\n\n
- -''' % gyver] - return easteregged diff --git a/src/lib/werkzeug/contrib/__init__.py b/src/lib/werkzeug/contrib/__init__.py deleted file mode 100644 index ffc48c9..0000000 --- a/src/lib/werkzeug/contrib/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.contrib - ~~~~~~~~~~~~~~~~ - - Contains user-submitted code that other users may find useful, but which - is not part of the Werkzeug core. Anyone can write code for inclusion in - the `contrib` package. All modules in this package are distributed as an - add-on library and thus are not part of Werkzeug itself. - - This file itself is mostly for informational purposes and to tell the - Python interpreter that `contrib` is a package. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" diff --git a/src/lib/werkzeug/contrib/cache.py b/src/lib/werkzeug/contrib/cache.py deleted file mode 100644 index becd85b..0000000 --- a/src/lib/werkzeug/contrib/cache.py +++ /dev/null @@ -1,672 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.contrib.cache - ~~~~~~~~~~~~~~~~~~~~~~ - - The main problem with dynamic Web sites is, well, they're dynamic. Each - time a user requests a page, the webserver executes a lot of code, queries - the database, renders templates until the visitor gets the page he sees. - - This is a lot more expensive than just loading a file from the file system - and sending it to the visitor. - - For most Web applications, this overhead isn't a big deal but once it - becomes, you will be glad to have a cache system in place. - - How Caching Works - ================= - - Caching is pretty simple. Basically you have a cache object lurking around - somewhere that is connected to a remote cache or the file system or - something else. When the request comes in you check if the current page - is already in the cache and if so, you're returning it from the cache. - Otherwise you generate the page and put it into the cache. (Or a fragment - of the page, you don't have to cache the full thing) - - Here is a simple example of how to cache a sidebar for a template:: - - def get_sidebar(user): - identifier = 'sidebar_for/user%d' % user.id - value = cache.get(identifier) - if value is not None: - return value - value = generate_sidebar_for(user=user) - cache.set(identifier, value, timeout=60 * 5) - return value - - Creating a Cache Object - ======================= - - To create a cache object you just import the cache system of your choice - from the cache module and instantiate it. Then you can start working - with that object: - - >>> from werkzeug.contrib.cache import SimpleCache - >>> c = SimpleCache() - >>> c.set("foo", "value") - >>> c.get("foo") - 'value' - >>> c.get("missing") is None - True - - Please keep in mind that you have to create the cache and put it somewhere - you have access to it (either as a module global you can import or you just - put it into your WSGI application). - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import os -import re -import tempfile -try: - from hashlib import md5 -except ImportError: - from md5 import new as md5 -from itertools import izip -from time import time -from werkzeug.posixemulation import rename - -try: - import cPickle as pickle -except ImportError: - import pickle - - -def _items(mappingorseq): - """Wrapper for efficient iteration over mappings represented by dicts - or sequences:: - - >>> for k, v in _items((i, i*i) for i in xrange(5)): - ... assert k*k == v - - >>> for k, v in _items(dict((i, i*i) for i in xrange(5))): - ... assert k*k == v - - """ - return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \ - else mappingorseq - - -class BaseCache(object): - """Baseclass for the cache systems. All the cache systems implement this - API or a superset of it. - - :param default_timeout: the default timeout that is used if no timeout is - specified on :meth:`set`. - """ - - def __init__(self, default_timeout=300): - self.default_timeout = default_timeout - - def get(self, key): - """Looks up key in the cache and returns the value for it. - If the key does not exist `None` is returned instead. - - :param key: the key to be looked up. - """ - return None - - def delete(self, key): - """Deletes `key` from the cache. If it does not exist in the cache - nothing happens. - - :param key: the key to delete. - """ - pass - - def get_many(self, *keys): - """Returns a list of values for the given keys. - For each key a item in the list is created. Example:: - - foo, bar = cache.get_many("foo", "bar") - - If a key can't be looked up `None` is returned for that key - instead. - - :param keys: The function accepts multiple keys as positional - arguments. - """ - return map(self.get, keys) - - def get_dict(self, *keys): - """Works like :meth:`get_many` but returns a dict:: - - d = cache.get_dict("foo", "bar") - foo = d["foo"] - bar = d["bar"] - - :param keys: The function accepts multiple keys as positional - arguments. - """ - return dict(izip(keys, self.get_many(*keys))) - - def set(self, key, value, timeout=None): - """Adds a new key/value to the cache (overwrites value, if key already - exists in the cache). - - :param key: the key to set - :param value: the value for the key - :param timeout: the cache timeout for the key (if not specified, - it uses the default timeout). - """ - pass - - def add(self, key, value, timeout=None): - """Works like :meth:`set` but does not overwrite the values of already - existing keys. - - :param key: the key to set - :param value: the value for the key - :param timeout: the cache timeout for the key or the default - timeout if not specified. - """ - pass - - def set_many(self, mapping, timeout=None): - """Sets multiple keys and values from a mapping. - - :param mapping: a mapping with the keys/values to set. - :param timeout: the cache timeout for the key (if not specified, - it uses the default timeout). - """ - for key, value in _items(mapping): - self.set(key, value, timeout) - - def delete_many(self, *keys): - """Deletes multiple keys at once. - - :param keys: The function accepts multiple keys as positional - arguments. - """ - for key in keys: - self.delete(key) - - def clear(self): - """Clears the cache. Keep in mind that not all caches support - completely clearing the cache. - """ - pass - - def inc(self, key, delta=1): - """Increments the value of a key by `delta`. If the key does - not yet exist it is initialized with `delta`. - - For supporting caches this is an atomic operation. - - :param key: the key to increment. - :param delta: the delta to add. - """ - self.set(key, (self.get(key) or 0) + delta) - - def dec(self, key, delta=1): - """Decrements the value of a key by `delta`. If the key does - not yet exist it is initialized with `-delta`. - - For supporting caches this is an atomic operation. - - :param key: the key to increment. - :param delta: the delta to subtract. - """ - self.set(key, (self.get(key) or 0) - delta) - - -class NullCache(BaseCache): - """A cache that doesn't cache. This can be useful for unit testing. - - :param default_timeout: a dummy parameter that is ignored but exists - for API compatibility with other caches. - """ - - -class SimpleCache(BaseCache): - """Simple memory cache for single process environments. This class exists - mainly for the development server and is not 100% thread safe. It tries - to use as many atomic operations as possible and no locks for simplicity - but it could happen under heavy load that keys are added multiple times. - - :param threshold: the maximum number of items the cache stores before - it starts deleting some. - :param default_timeout: the default timeout that is used if no timeout is - specified on :meth:`~BaseCache.set`. - """ - - def __init__(self, threshold=500, default_timeout=300): - BaseCache.__init__(self, default_timeout) - self._cache = {} - self.clear = self._cache.clear - self._threshold = threshold - - def _prune(self): - if len(self._cache) > self._threshold: - now = time() - for idx, (key, (expires, _)) in enumerate(self._cache.items()): - if expires <= now or idx % 3 == 0: - self._cache.pop(key, None) - - def get(self, key): - now = time() - expires, value = self._cache.get(key, (0, None)) - if expires > time(): - return pickle.loads(value) - - def set(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - self._prune() - self._cache[key] = (time() + timeout, pickle.dumps(value, - pickle.HIGHEST_PROTOCOL)) - - def add(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - if len(self._cache) > self._threshold: - self._prune() - item = (time() + timeout, pickle.dumps(value, - pickle.HIGHEST_PROTOCOL)) - self._cache.setdefault(key, item) - - def delete(self, key): - self._cache.pop(key, None) - - -_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match - -class MemcachedCache(BaseCache): - """A cache that uses memcached as backend. - - The first argument can either be an object that resembles the API of a - :class:`memcache.Client` or a tuple/list of server addresses. In the - event that a tuple/list is passed, Werkzeug tries to import the best - available memcache library. - - Implementation notes: This cache backend works around some limitations in - memcached to simplify the interface. For example unicode keys are encoded - to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return - the keys in the same format as passed. Furthermore all get methods - silently ignore key errors to not cause problems when untrusted user data - is passed to the get methods which is often the case in web applications. - - :param servers: a list or tuple of server addresses or alternatively - a :class:`memcache.Client` or a compatible client. - :param default_timeout: the default timeout that is used if no timeout is - specified on :meth:`~BaseCache.set`. - :param key_prefix: a prefix that is added before all keys. This makes it - possible to use the same memcached server for different - applications. Keep in mind that - :meth:`~BaseCache.clear` will also clear keys with a - different prefix. - """ - - def __init__(self, servers=None, default_timeout=300, key_prefix=None): - BaseCache.__init__(self, default_timeout) - if servers is None or isinstance(servers, (list, tuple)): - if servers is None: - servers = ['127.0.0.1:11211'] - self._client = self.import_preferred_memcache_lib(servers) - if self._client is None: - raise RuntimeError('no memcache module found') - else: - # NOTE: servers is actually an already initialized memcache - # client. - self._client = servers - - self.key_prefix = key_prefix - - def get(self, key): - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - # memcached doesn't support keys longer than that. Because often - # checks for so long keys can occour because it's tested from user - # submitted data etc we fail silently for getting. - if _test_memcached_key(key): - return self._client.get(key) - - def get_dict(self, *keys): - key_mapping = {} - have_encoded_keys = False - for key in keys: - if isinstance(key, unicode): - encoded_key = key.encode('utf-8') - have_encoded_keys = True - else: - encoded_key = key - if self.key_prefix: - encoded_key = self.key_prefix + encoded_key - if _test_memcached_key(key): - key_mapping[encoded_key] = key - d = rv = self._client.get_multi(key_mapping.keys()) - if have_encoded_keys or self.key_prefix: - rv = {} - for key, value in d.iteritems(): - rv[key_mapping[key]] = value - if len(rv) < len(keys): - for key in keys: - if key not in rv: - rv[key] = None - return rv - - def add(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - self._client.add(key, value, timeout) - - def set(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - self._client.set(key, value, timeout) - - def get_many(self, *keys): - d = self.get_dict(*keys) - return [d[key] for key in keys] - - def set_many(self, mapping, timeout=None): - if timeout is None: - timeout = self.default_timeout - new_mapping = {} - for key, value in _items(mapping): - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - new_mapping[key] = value - self._client.set_multi(new_mapping, timeout) - - def delete(self, key): - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - if _test_memcached_key(key): - self._client.delete(key) - - def delete_many(self, *keys): - new_keys = [] - for key in keys: - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - if _test_memcached_key(key): - new_keys.append(key) - self._client.delete_multi(new_keys) - - def clear(self): - self._client.flush_all() - - def inc(self, key, delta=1): - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - self._client.incr(key, delta) - - def dec(self, key, delta=1): - if isinstance(key, unicode): - key = key.encode('utf-8') - if self.key_prefix: - key = self.key_prefix + key - self._client.decr(key, delta) - - def import_preferred_memcache_lib(self, servers): - """Returns an initialized memcache client. Used by the constructor.""" - try: - import pylibmc - except ImportError: - pass - else: - return pylibmc.Client(servers) - - try: - from google.appengine.api import memcache - except ImportError: - pass - else: - return memcache.Client() - - try: - import memcache - except ImportError: - pass - else: - return memcache.Client(servers) - - -# backwards compatibility -GAEMemcachedCache = MemcachedCache - - -class RedisCache(BaseCache): - """Uses the Redis key-value store as a cache backend. - - The first argument can be either a string denoting address of the Redis - server or an object resembling an instance of a redis.Redis class. - - Note: Python Redis API already takes care of encoding unicode strings on - the fly. - - .. versionadded:: 0.7 - - .. versionadded:: 0.8 - `key_prefix` was added. - - .. versionchanged:: 0.8 - This cache backend now properly serializes objects. - - :param host: address of the Redis server or an object which API is - compatible with the official Python Redis client (redis-py). - :param port: port number on which Redis server listens for connections - :param default_timeout: the default timeout that is used if no timeout is - specified on :meth:`~BaseCache.set`. - :param key_prefix: A prefix that should be added to all keys. - """ - - def __init__(self, host='localhost', port=6379, password=None, - default_timeout=300, key_prefix=None): - BaseCache.__init__(self, default_timeout) - if isinstance(host, basestring): - try: - import redis - except ImportError: - raise RuntimeError('no redis module found') - self._client = redis.Redis(host=host, port=port, password=password) - else: - self._client = host - self.key_prefix = key_prefix or '' - - def dump_object(self, value): - """Dumps an object into a string for redis. By default it serializes - integers as regular string and pickle dumps everything else. - """ - t = type(value) - if t is int or t is long: - return str(value) - return '!' + pickle.dumps(value) - - def load_object(self, value): - """The reversal of :meth:`dump_object`. This might be callde with - None. - """ - if value is None: - return None - if value.startswith('!'): - return pickle.loads(value[1:]) - try: - return int(value) - except ValueError: - # before 0.8 we did not have serialization. Still support that. - return value - - def get(self, key): - return self.load_object(self._client.get(self.key_prefix + key)) - - def get_many(self, *keys): - if self.key_prefix: - keys = [self.key_prefix + key for key in keys] - return [self.load_object(x) for x in self._client.mget(keys)] - - def set(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - dump = self.dump_object(value) - self._client.setex(self.key_prefix + key, dump, timeout) - - def add(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - dump = self.dump_object(value) - added = self._client.setnx(self.key_prefix + key, dump) - if added: - self._client.expire(self.key_prefix + key, timeout) - - def set_many(self, mapping, timeout=None): - if timeout is None: - timeout = self.default_timeout - pipe = self._client.pipeline() - for key, value in _items(mapping): - dump = self.dump_object(value) - pipe.setex(self.key_prefix + key, dump, timeout) - pipe.execute() - - def delete(self, key): - self._client.delete(self.key_prefix + key) - - def delete_many(self, *keys): - if not keys: - return - if self.key_prefix: - keys = [self.key_prefix + key for key in keys] - self._client.delete(*keys) - - def clear(self): - if self.key_prefix: - keys = self._client.keys(self.key_prefix + '*') - if keys: - self._client.delete(*keys) - else: - self._client.flushdb() - - def inc(self, key, delta=1): - return self._client.incr(self.key_prefix + key, delta) - - def dec(self, key, delta=1): - return self._client.decr(self.key_prefix + key, delta) - - -class FileSystemCache(BaseCache): - """A cache that stores the items on the file system. This cache depends - on being the only user of the `cache_dir`. Make absolutely sure that - nobody but this cache stores files there or otherwise the cache will - randomly delete files therein. - - :param cache_dir: the directory where cache files are stored. - :param threshold: the maximum number of items the cache stores before - it starts deleting some. - :param default_timeout: the default timeout that is used if no timeout is - specified on :meth:`~BaseCache.set`. - :param mode: the file mode wanted for the cache files, default 0600 - """ - - #: used for temporary files by the FileSystemCache - _fs_transaction_suffix = '.__wz_cache' - - def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0600): - BaseCache.__init__(self, default_timeout) - self._path = cache_dir - self._threshold = threshold - self._mode = mode - if not os.path.exists(self._path): - os.makedirs(self._path) - - def _list_dir(self): - """return a list of (fully qualified) cache filenames - """ - return [os.path.join(self._path, fn) for fn in os.listdir(self._path) - if not fn.endswith(self._fs_transaction_suffix)] - - def _prune(self): - entries = self._list_dir() - if len(entries) > self._threshold: - now = time() - for idx, fname in enumerate(entries): - remove = False - f = None - try: - try: - f = open(fname, 'rb') - expires = pickle.load(f) - remove = expires <= now or idx % 3 == 0 - finally: - if f is not None: - f.close() - except Exception: - pass - if remove: - try: - os.remove(fname) - except (IOError, OSError): - pass - - def clear(self): - for fname in self._list_dir(): - try: - os.remove(fname) - except (IOError, OSError): - pass - - def _get_filename(self, key): - hash = md5(key).hexdigest() - return os.path.join(self._path, hash) - - def get(self, key): - filename = self._get_filename(key) - try: - f = open(filename, 'rb') - try: - if pickle.load(f) >= time(): - return pickle.load(f) - finally: - f.close() - os.remove(filename) - except Exception: - return None - - def add(self, key, value, timeout=None): - filename = self._get_filename(key) - if not os.path.exists(filename): - self.set(key, value, timeout) - - def set(self, key, value, timeout=None): - if timeout is None: - timeout = self.default_timeout - filename = self._get_filename(key) - self._prune() - try: - fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix, - dir=self._path) - f = os.fdopen(fd, 'wb') - try: - pickle.dump(int(time() + timeout), f, 1) - pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) - finally: - f.close() - rename(tmp, filename) - os.chmod(filename, self._mode) - except (IOError, OSError): - pass - - def delete(self, key): - try: - os.remove(self._get_filename(key)) - except (IOError, OSError): - pass diff --git a/src/lib/werkzeug/contrib/fixers.py b/src/lib/werkzeug/contrib/fixers.py deleted file mode 100644 index 6286e6c..0000000 --- a/src/lib/werkzeug/contrib/fixers.py +++ /dev/null @@ -1,224 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.contrib.fixers - ~~~~~~~~~~~~~~~~~~~~~~~ - - .. versionadded:: 0.5 - - This module includes various helpers that fix bugs in web servers. They may - be necessary for some versions of a buggy web server but not others. We try - to stay updated with the status of the bugs as good as possible but you have - to make sure whether they fix the problem you encounter. - - If you notice bugs in webservers not fixed in this module consider - contributing a patch. - - :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from urllib import unquote -from werkzeug.http import parse_options_header, parse_cache_control_header, \ - parse_set_header -from werkzeug.useragents import UserAgent -from werkzeug.datastructures import Headers, ResponseCacheControl - - -class LighttpdCGIRootFix(object): - """Wrap the application in this middleware if you are using lighttpd - with FastCGI or CGI and the application is mounted on the URL root. - - :param app: the WSGI application - """ - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - # only set PATH_INFO for older versions of Lighty or if no - # server software is provided. That's because the test was - # added in newer Werkzeug versions and we don't want to break - # people's code if they are using this fixer in a test that - # does not set the SERVER_SOFTWARE key. - if 'SERVER_SOFTWARE' not in environ or \ - environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28': - environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \ - environ.get('PATH_INFO', '') - environ['SCRIPT_NAME'] = '' - return self.app(environ, start_response) - - -class PathInfoFromRequestUriFix(object): - """On windows environment variables are limited to the system charset - which makes it impossible to store the `PATH_INFO` variable in the - environment without loss of information on some systems. - - This is for example a problem for CGI scripts on a Windows Apache. - - This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`, - `REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the - fix can only be applied if the webserver supports either of these - variables. - - :param app: the WSGI application - """ - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL': - if key not in environ: - continue - request_uri = unquote(environ[key]) - script_name = unquote(environ.get('SCRIPT_NAME', '')) - if request_uri.startswith(script_name): - environ['PATH_INFO'] = request_uri[len(script_name):] \ - .split('?', 1)[0] - break - return self.app(environ, start_response) - - -class ProxyFix(object): - """This middleware can be applied to add HTTP proxy support to an - application that was not designed with HTTP proxies in mind. It - sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. - - Do not use this middleware in non-proxy setups for security reasons. - - The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in - the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and - `werkzeug.proxy_fix.orig_http_host`. - - :param app: the WSGI application - """ - - def __init__(self, app): - self.app = app - - def get_remote_addr(self, forwarded_for): - """Selects the new remote addr from the given list of ips in - X-Forwarded-For. By default the first one is picked. - - .. versionadded:: 0.8 - """ - if forwarded_for: - return forwarded_for[0] - - def __call__(self, environ, start_response): - getter = environ.get - forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '') - forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',') - forwarded_host = getter('HTTP_X_FORWARDED_HOST', '') - environ.update({ - 'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'), - 'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'), - 'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST') - }) - forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x] - remote_addr = self.get_remote_addr(forwarded_for) - if remote_addr is not None: - environ['REMOTE_ADDR'] = remote_addr - if forwarded_host: - environ['HTTP_HOST'] = forwarded_host - if forwarded_proto: - environ['wsgi.url_scheme'] = forwarded_proto - return self.app(environ, start_response) - - -class HeaderRewriterFix(object): - """This middleware can remove response headers and add others. This - is for example useful to remove the `Date` header from responses if you - are using a server that adds that header, no matter if it's present or - not or to add `X-Powered-By` headers:: - - app = HeaderRewriterFix(app, remove_headers=['Date'], - add_headers=[('X-Powered-By', 'WSGI')]) - - :param app: the WSGI application - :param remove_headers: a sequence of header keys that should be - removed. - :param add_headers: a sequence of ``(key, value)`` tuples that should - be added. - """ - - def __init__(self, app, remove_headers=None, add_headers=None): - self.app = app - self.remove_headers = set(x.lower() for x in (remove_headers or ())) - self.add_headers = list(add_headers or ()) - - def __call__(self, environ, start_response): - def rewriting_start_response(status, headers, exc_info=None): - new_headers = [] - for key, value in headers: - if key.lower() not in self.remove_headers: - new_headers.append((key, value)) - new_headers += self.add_headers - return start_response(status, new_headers, exc_info) - return self.app(environ, rewriting_start_response) - - -class InternetExplorerFix(object): - """This middleware fixes a couple of bugs with Microsoft Internet - Explorer. Currently the following fixes are applied: - - - removing of `Vary` headers for unsupported mimetypes which - causes troubles with caching. Can be disabled by passing - ``fix_vary=False`` to the constructor. - see: http://support.microsoft.com/kb/824847/en-us - - - removes offending headers to work around caching bugs in - Internet Explorer if `Content-Disposition` is set. Can be - disabled by passing ``fix_attach=False`` to the constructor. - - If it does not detect affected Internet Explorer versions it won't touch - the request / response. - """ - - # This code was inspired by Django fixers for the same bugs. The - # fix_vary and fix_attach fixers were originally implemented in Django - # by Michael Axiak and is available as part of the Django project: - # http://code.djangoproject.com/ticket/4148 - - def __init__(self, app, fix_vary=True, fix_attach=True): - self.app = app - self.fix_vary = fix_vary - self.fix_attach = fix_attach - - def fix_headers(self, environ, headers, status=None): - if self.fix_vary: - header = headers.get('content-type', '') - mimetype, options = parse_options_header(header) - if mimetype not in ('text/html', 'text/plain', 'text/sgml'): - headers.pop('vary', None) - - if self.fix_attach and 'content-disposition' in headers: - pragma = parse_set_header(headers.get('pragma', '')) - pragma.discard('no-cache') - header = pragma.to_header() - if not header: - headers.pop('pragma', '') - else: - headers['Pragma'] = header - header = headers.get('cache-control', '') - if header: - cc = parse_cache_control_header(header, - cls=ResponseCacheControl) - cc.no_cache = None - cc.no_store = False - header = cc.to_header() - if not header: - headers.pop('cache-control', '') - else: - headers['Cache-Control'] = header - - def run_fixed(self, environ, start_response): - def fixing_start_response(status, headers, exc_info=None): - self.fix_headers(environ, Headers.linked(headers), status) - return start_response(status, headers, exc_info) - return self.app(environ, fixing_start_response) - - def __call__(self, environ, start_response): - ua = UserAgent(environ) - if ua.browser != 'msie': - return self.app(environ, start_response) - return self.run_fixed(environ, start_response) diff --git a/src/lib/werkzeug/contrib/iterio.py b/src/lib/werkzeug/contrib/iterio.py deleted file mode 100644 index 0718659..0000000 --- a/src/lib/werkzeug/contrib/iterio.py +++ /dev/null @@ -1,277 +0,0 @@ -# -*- coding: utf-8 -*- -r""" - werkzeug.contrib.iterio - ~~~~~~~~~~~~~~~~~~~~~~~ - - This module implements a :class:`IterIO` that converts an iterator into - a stream object and the other way round. Converting streams into - iterators requires the `greenlet`_ module. - - To convert an iterator into a stream all you have to do is to pass it - directly to the :class:`IterIO` constructor. In this example we pass it - a newly created generator:: - - def foo(): - yield "something\n" - yield "otherthings" - stream = IterIO(foo()) - print stream.read() # read the whole iterator - - The other way round works a bit different because we have to ensure that - the code execution doesn't take place yet. An :class:`IterIO` call with a - callable as first argument does two things. The function itself is passed - an :class:`IterIO` stream it can feed. The object returned by the - :class:`IterIO` constructor on the other hand is not an stream object but - an iterator:: - - def foo(stream): - stream.write("some") - stream.write("thing") - stream.flush() - stream.write("otherthing") - iterator = IterIO(foo) - print iterator.next() # prints something - print iterator.next() # prints otherthing - iterator.next() # raises StopIteration - - .. _greenlet: http://codespeak.net/py/dist/greenlet.html - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -try: - import greenlet -except ImportError: - greenlet = None - - -class IterIO(object): - """Instances of this object implement an interface compatible with the - standard Python :class:`file` object. Streams are either read-only or - write-only depending on how the object is created. - """ - - def __new__(cls, obj): - try: - iterator = iter(obj) - except TypeError: - return IterI(obj) - return IterO(iterator) - - def __iter__(self): - return self - - def tell(self): - if self.closed: - raise ValueError('I/O operation on closed file') - return self.pos - - def isatty(self): - if self.closed: - raise ValueError('I/O operation on closed file') - return False - - def seek(self, pos, mode=0): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def truncate(self, size=None): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def write(self, s): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def writelines(self, list): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def read(self, n=-1): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def readlines(self, sizehint=0): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def readline(self, length=None): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def flush(self): - if self.closed: - raise ValueError('I/O operation on closed file') - raise IOError(9, 'Bad file descriptor') - - def next(self): - if self.closed: - raise StopIteration() - line = self.readline() - if not line: - raise StopIteration() - return line - - -class IterI(IterIO): - """Convert an stream into an iterator.""" - - def __new__(cls, func): - if greenlet is None: - raise RuntimeError('IterI requires greenlet support') - stream = object.__new__(cls) - stream._parent = greenlet.getcurrent() - stream._buffer = [] - stream.closed = False - stream.pos = 0 - - def run(): - func(stream) - stream.flush() - - g = greenlet.greenlet(run, stream._parent) - while 1: - rv = g.switch() - if not rv: - return - yield rv[0] - - def close(self): - if not self.closed: - self.closed = True - - def write(self, s): - if self.closed: - raise ValueError('I/O operation on closed file') - self.pos += len(s) - self._buffer.append(s) - - def writelines(self, list): - self.write(''.join(list)) - - def flush(self): - if self.closed: - raise ValueError('I/O operation on closed file') - data = ''.join(self._buffer) - self._buffer = [] - self._parent.switch((data,)) - - -class IterO(IterIO): - """Iter output. Wrap an iterator and give it a stream like interface.""" - - def __new__(cls, gen): - self = object.__new__(cls) - self._gen = gen - self._buf = '' - self.closed = False - self.pos = 0 - return self - - def __iter__(self): - return self - - def close(self): - if not self.closed: - self.closed = True - if hasattr(self._gen, 'close'): - self._gen.close() - - def seek(self, pos, mode=0): - if self.closed: - raise ValueError('I/O operation on closed file') - if mode == 1: - pos += self.pos - elif mode == 2: - self.read() - self.pos = min(self.pos, self.pos + pos) - return - elif mode != 0: - raise IOError('Invalid argument') - buf = [] - try: - tmp_end_pos = len(self._buf) - while pos > tmp_end_pos: - item = self._gen.next() - tmp_end_pos += len(item) - buf.append(item) - except StopIteration: - pass - if buf: - self._buf += ''.join(buf) - self.pos = max(0, pos) - - def read(self, n=-1): - if self.closed: - raise ValueError('I/O operation on closed file') - if n < 0: - self._buf += ''.join(self._gen) - result = self._buf[self.pos:] - self.pos += len(result) - return result - new_pos = self.pos + n - buf = [] - try: - tmp_end_pos = len(self._buf) - while new_pos > tmp_end_pos: - item = self._gen.next() - tmp_end_pos += len(item) - buf.append(item) - except StopIteration: - pass - if buf: - self._buf += ''.join(buf) - new_pos = max(0, new_pos) - try: - return self._buf[self.pos:new_pos] - finally: - self.pos = min(new_pos, len(self._buf)) - - def readline(self, length=None): - if self.closed: - raise ValueError('I/O operation on closed file') - nl_pos = self._buf.find('\n', self.pos) - buf = [] - try: - pos = self.pos - while nl_pos < 0: - item = self._gen.next() - local_pos = item.find('\n') - buf.append(item) - if local_pos >= 0: - nl_pos = pos + local_pos - break - pos += len(item) - except StopIteration: - pass - if buf: - self._buf += ''.join(buf) - if nl_pos < 0: - new_pos = len(self._buf) - else: - new_pos = nl_pos + 1 - if length is not None and self.pos + length < new_pos: - new_pos = self.pos + length - try: - return self._buf[self.pos:new_pos] - finally: - self.pos = min(new_pos, len(self._buf)) - - def readlines(self, sizehint=0): - total = 0 - lines = [] - line = self.readline() - while line: - lines.append(line) - total += len(line) - if 0 < sizehint <= total: - break - line = self.readline() - return lines diff --git a/src/lib/werkzeug/contrib/kickstart.py b/src/lib/werkzeug/contrib/kickstart.py deleted file mode 100644 index 43c0e7c..0000000 --- a/src/lib/werkzeug/contrib/kickstart.py +++ /dev/null @@ -1,288 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.contrib.kickstart - ~~~~~~~~~~~~~~~~~~~~~~~~~~ - - This module provides some simple shortcuts to make using Werkzeug simpler - for small scripts. - - These improvements include predefined `Request` and `Response` objects as - well as a predefined `Application` object which can be customized in child - classes, of course. The `Request` and `Reponse` objects handle URL - generation as well as sessions via `werkzeug.contrib.sessions` and are - purely optional. - - There is also some integration of template engines. The template loaders - are, of course, not neccessary to use the template engines in Werkzeug, - but they provide a common interface. Currently supported template engines - include Werkzeug's minitmpl and Genshi_. Support for other engines can be - added in a trivial way. These loaders provide a template interface - similar to the one used by Django_. - - .. _Genshi: http://genshi.edgewall.org/ - .. _Django: http://www.djangoproject.com/ - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from os import path -from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase -from werkzeug.templates import Template -from werkzeug.exceptions import HTTPException -from werkzeug.routing import RequestRedirect - -__all__ = ['Request', 'Response', 'TemplateNotFound', 'TemplateLoader', - 'GenshiTemplateLoader', 'Application'] - -from warnings import warn -warn(DeprecationWarning('werkzeug.contrib.kickstart is deprecated and ' - 'will be removed in Werkzeug 1.0')) - - -class Request(RequestBase): - """A handy subclass of the base request that adds a URL builder. - It when supplied a session store, it is also able to handle sessions. - """ - - def __init__(self, environ, url_map, - session_store=None, cookie_name=None): - # call the parent for initialization - RequestBase.__init__(self, environ) - # create an adapter - self.url_adapter = url_map.bind_to_environ(environ) - # create all stuff for sessions - self.session_store = session_store - self.cookie_name = cookie_name - - if session_store is not None and cookie_name is not None: - if cookie_name in self.cookies: - # get the session out of the storage - self.session = session_store.get(self.cookies[cookie_name]) - else: - # create a new session - self.session = session_store.new() - - def url_for(self, callback, **values): - return self.url_adapter.build(callback, values) - - -class Response(ResponseBase): - """ - A subclass of base response which sets the default mimetype to text/html. - It the `Request` that came in is using Werkzeug sessions, this class - takes care of saving that session. - """ - default_mimetype = 'text/html' - - def __call__(self, environ, start_response): - # get the request object - request = environ['werkzeug.request'] - - if request.session_store is not None: - # save the session if neccessary - request.session_store.save_if_modified(request.session) - - # set the cookie for the browser if it is not there: - if request.cookie_name not in request.cookies: - self.set_cookie(request.cookie_name, request.session.sid) - - # go on with normal response business - return ResponseBase.__call__(self, environ, start_response) - - -class Processor(object): - """A request and response processor - it is what Django calls a - middleware, but Werkzeug also includes straight-foward support for real - WSGI middlewares, so another name was chosen. - - The code of this processor is derived from the example in the Werkzeug - trac, called `Request and Response Processor - `_ - """ - - def process_request(self, request): - return request - - def process_response(self, request, response): - return response - - def process_view(self, request, view_func, view_args, view_kwargs): - """process_view() is called just before the Application calls the - function specified by view_func. - - If this returns None, the Application processes the next Processor, - and if it returns something else (like a Response instance), that - will be returned without any further processing. - """ - return None - - def process_exception(self, request, exception): - return None - - -class Application(object): - """A generic WSGI application which can be used to start with Werkzeug in - an easy, straightforward way. - """ - - def __init__(self, name, url_map, session=False, processors=None): - # save the name and the URL-map, as it'll be needed later on - self.name = name - self.url_map = url_map - # save the list of processors if supplied - self.processors = processors or [] - # create an instance of the storage - if session: - self.store = session - else: - self.store = None - - def __call__(self, environ, start_response): - # create a request - with or without session support - if self.store is not None: - request = Request(environ, self.url_map, - session_store=self.store, cookie_name='%s_sid' % self.name) - else: - request = Request(environ, self.url_map) - - # apply the request processors - for processor in self.processors: - request = processor.process_request(request) - - try: - # find the callback to which the URL is mapped - callback, args = request.url_adapter.match(request.path) - except (HTTPException, RequestRedirect), e: - response = e - else: - # check all view processors - for processor in self.processors: - action = processor.process_view(request, callback, (), args) - if action is not None: - # it is overriding the default behaviour, this is - # short-circuiting the processing, so it returns here - return action(environ, start_response) - - try: - response = callback(request, **args) - except Exception, exception: - # the callback raised some exception, need to process that - for processor in reversed(self.processors): - # filter it through the exception processor - action = processor.process_exception(request, exception) - if action is not None: - # the exception processor returned some action - return action(environ, start_response) - # still not handled by a exception processor, so re-raise - raise - - # apply the response processors - for processor in reversed(self.processors): - response = processor.process_response(request, response) - - # return the completely processed response - return response(environ, start_response) - - - def config_session(self, store, expiration='session'): - """ - Configures the setting for cookies. You can also disable cookies by - setting store to None. - """ - self.store = store - # expiration=session is the default anyway - # TODO: add settings to define the expiration date, the domain, the - # path any maybe the secure parameter. - - -class TemplateNotFound(IOError, LookupError): - """ - A template was not found by the template loader. - """ - - def __init__(self, name): - IOError.__init__(self, name) - self.name = name - - -class TemplateLoader(object): - """ - A simple loader interface for the werkzeug minitmpl - template language. - """ - - def __init__(self, search_path, encoding='utf-8'): - self.search_path = path.abspath(search_path) - self.encoding = encoding - - def get_template(self, name): - """Get a template from a given name.""" - filename = path.join(self.search_path, *[p for p in name.split('/') - if p and p[0] != '.']) - if not path.exists(filename): - raise TemplateNotFound(name) - return Template.from_file(filename, self.encoding) - - def render_to_response(self, *args, **kwargs): - """Load and render a template into a response object.""" - return Response(self.render_to_string(*args, **kwargs)) - - def render_to_string(self, *args, **kwargs): - """Load and render a template into a unicode string.""" - try: - template_name, args = args[0], args[1:] - except IndexError: - raise TypeError('name of template required') - return self.get_template(template_name).render(*args, **kwargs) - - -class GenshiTemplateLoader(TemplateLoader): - """A unified interface for loading Genshi templates. Actually a quite thin - wrapper for Genshi's TemplateLoader. - - It sets some defaults that differ from the Genshi loader, most notably - auto_reload is active. All imporant options can be passed through to - Genshi. - The default output type is 'html', but can be adjusted easily by changing - the `output_type` attribute. - """ - def __init__(self, search_path, encoding='utf-8', **kwargs): - TemplateLoader.__init__(self, search_path, encoding) - # import Genshi here, because we don't want a general Genshi - # dependency, only a local one - from genshi.template import TemplateLoader as GenshiLoader - from genshi.template.loader import TemplateNotFound - - self.not_found_exception = TemplateNotFound - # set auto_reload to True per default - reload_template = kwargs.pop('auto_reload', True) - # get rid of default_encoding as this template loaders overwrites it - # with the value of encoding - kwargs.pop('default_encoding', None) - - # now, all arguments are clean, pass them on - self.loader = GenshiLoader(search_path, default_encoding=encoding, - auto_reload=reload_template, **kwargs) - - # the default output is HTML but can be overridden easily - self.output_type = 'html' - self.encoding = encoding - - def get_template(self, template_name): - """Get the template which is at the given name""" - try: - return self.loader.load(template_name, encoding=self.encoding) - except self.not_found_exception, e: - # catch the exception raised by Genshi, convert it into a werkzeug - # exception (for the sake of consistency) - raise TemplateNotFound(template_name) - - def render_to_string(self, template_name, context=None): - """Load and render a template into an unicode string""" - # create an empty context if no context was specified - context = context or {} - tmpl = self.get_template(template_name) - # render the template into a unicode string (None means unicode) - return tmpl. \ - generate(**context). \ - render(self.output_type, encoding=None) diff --git a/src/lib/werkzeug/contrib/profiler.py b/src/lib/werkzeug/contrib/profiler.py deleted file mode 100644 index 58d1465..0000000 --- a/src/lib/werkzeug/contrib/profiler.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.contrib.profiler - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - This module provides a simple WSGI profiler middleware for finding - bottlenecks in web application. It uses the :mod:`profile` or - :mod:`cProfile` module to do the profiling and writes the stats to the - stream provided (defaults to stderr). - - Example usage:: - - from werkzeug.contrib.profiler import ProfilerMiddleware - app = ProfilerMiddleware(app) - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import sys -try: - try: - from cProfile import Profile - except ImportError: - from profile import Profile - from pstats import Stats - available = True -except ImportError: - available = False - - -class MergeStream(object): - """An object that redirects `write` calls to multiple streams. - Use this to log to both `sys.stdout` and a file:: - - f = open('profiler.log', 'w') - stream = MergeStream(sys.stdout, f) - profiler = ProfilerMiddleware(app, stream) - """ - - def __init__(self, *streams): - if not streams: - raise TypeError('at least one stream must be given') - self.streams = streams - - def write(self, data): - for stream in self.streams: - stream.write(data) - - -class ProfilerMiddleware(object): - """Simple profiler middleware. Wraps a WSGI application and profiles - a request. This intentionally buffers the response so that timings are - more exact. - - For the exact meaning of `sort_by` and `restrictions` consult the - :mod:`profile` documentation. - - :param app: the WSGI application to profile. - :param stream: the stream for the profiled stats. defaults to stderr. - :param sort_by: a tuple of columns to sort the result by. - :param restrictions: a tuple of profiling strictions. - """ - - def __init__(self, app, stream=None, - sort_by=('time', 'calls'), restrictions=()): - if not available: - raise RuntimeError('the profiler is not available because ' - 'profile or pstat is not installed.') - self._app = app - self._stream = stream or sys.stdout - self._sort_by = sort_by - self._restrictions = restrictions - - def __call__(self, environ, start_response): - response_body = [] - - def catching_start_response(status, headers, exc_info=None): - start_response(status, headers, exc_info) - return response_body.append - - def runapp(): - appiter = self._app(environ, catching_start_response) - response_body.extend(appiter) - if hasattr(appiter, 'close'): - appiter.close() - - p = Profile() - p.runcall(runapp) - body = ''.join(response_body) - stats = Stats(p, stream=self._stream) - stats.sort_stats(*self._sort_by) - - self._stream.write('-' * 80) - self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO')) - stats.print_stats(*self._restrictions) - self._stream.write('-' * 80 + '\n\n') - - return [body] - - -def make_action(app_factory, hostname='localhost', port=5000, - threaded=False, processes=1, stream=None, - sort_by=('time', 'calls'), restrictions=()): - """Return a new callback for :mod:`werkzeug.script` that starts a local - server with the profiler enabled. - - :: - - from werkzeug.contrib import profiler - action_profile = profiler.make_action(make_app) - """ - def action(hostname=('h', hostname), port=('p', port), - threaded=threaded, processes=processes): - """Start a new development server.""" - from werkzeug.serving import run_simple - app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions) - run_simple(hostname, port, app, False, None, threaded, processes) - return action diff --git a/src/lib/werkzeug/contrib/securecookie.py b/src/lib/werkzeug/contrib/securecookie.py deleted file mode 100644 index 9e6feeb..0000000 --- a/src/lib/werkzeug/contrib/securecookie.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -r""" - werkzeug.contrib.securecookie - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - This module implements a cookie that is not alterable from the client - because it adds a checksum the server checks for. You can use it as - session replacement if all you have is a user id or something to mark - a logged in user. - - Keep in mind that the data is still readable from the client as a - normal cookie is. However you don't have to store and flush the - sessions you have at the server. - - Example usage: - - >>> from werkzeug.contrib.securecookie import SecureCookie - >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") - - Dumping into a string so that one can store it in a cookie: - - >>> value = x.serialize() - - Loading from that string again: - - >>> x = SecureCookie.unserialize(value, "deadbeef") - >>> x["baz"] - (1, 2, 3) - - If someone modifies the cookie and the checksum is wrong the unserialize - method will fail silently and return a new empty `SecureCookie` object. - - Keep in mind that the values will be visible in the cookie so do not - store data in a cookie you don't want the user to see. - - Application Integration - ======================= - - If you are using the werkzeug request objects you could integrate the - secure cookie into your application like this:: - - from werkzeug.utils import cached_property - from werkzeug.wrappers import BaseRequest - from werkzeug.contrib.securecookie import SecureCookie - - # don't use this key but a different one; you could just use - # os.urandom(20) to get something random - SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea' - - class Request(BaseRequest): - - @cached_property - def client_session(self): - data = self.cookies.get('session_data') - if not data: - return SecureCookie(secret_key=SECRET_KEY) - return SecureCookie.unserialize(data, SECRET_KEY) - - def application(environ, start_response): - request = Request(environ, start_response) - - # get a response object here - response = ... - - if request.client_session.should_save: - session_data = request.client_session.serialize() - response.set_cookie('session_data', session_data, - httponly=True) - return response(environ, start_response) - - A less verbose integration can be achieved by using shorthand methods:: - - class Request(BaseRequest): - - @cached_property - def client_session(self): - return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET) - - def application(environ, start_response): - request = Request(environ, start_response) - - # get a response object here - response = ... - - request.client_session.save_cookie(response) - return response(environ, start_response) - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import sys -import cPickle as pickle -from hmac import new as hmac -from time import time -from werkzeug.urls import url_quote_plus, url_unquote_plus -from werkzeug._internal import _date_to_unix -from werkzeug.contrib.sessions import ModificationTrackingDict -from werkzeug.security import safe_str_cmp - - -# rather ugly way to import the correct hash method. Because -# hmac either accepts modules with a new method (sha, md5 etc.) -# or a hashlib factory function we have to figure out what to -# pass to it. If we have 2.5 or higher (so not 2.4 with a -# custom hashlib) we import from hashlib and fail if it does -# not exist (have seen that in old OS X versions). -# in all other cases the now deprecated sha module is used. -_default_hash = None -if sys.version_info >= (2, 5): - try: - from hashlib import sha1 as _default_hash - except ImportError: - pass -if _default_hash is None: - import sha as _default_hash - - -class UnquoteError(Exception): - """Internal exception used to signal failures on quoting.""" - - -class SecureCookie(ModificationTrackingDict): - """Represents a secure cookie. You can subclass this class and provide - an alternative mac method. The import thing is that the mac method - is a function with a similar interface to the hashlib. Required - methods are update() and digest(). - - Example usage: - - >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") - >>> x["foo"] - 42 - >>> x["baz"] - (1, 2, 3) - >>> x["blafasel"] = 23 - >>> x.should_save - True - - :param data: the initial data. Either a dict, list of tuples or `None`. - :param secret_key: the secret key. If not set `None` or not specified - it has to be set before :meth:`serialize` is called. - :param new: The initial value of the `new` flag. - """ - - #: The hash method to use. This has to be a module with a new function - #: or a function that creates a hashlib object. Such as `hashlib.md5` - #: Subclasses can override this attribute. The default hash is sha1. - #: Make sure to wrap this in staticmethod() if you store an arbitrary - #: function there such as hashlib.sha1 which might be implemented - #: as a function. - hash_method = staticmethod(_default_hash) - - #: the module used for serialization. Unless overriden by subclasses - #: the standard pickle module is used. - serialization_method = pickle - - #: if the contents should be base64 quoted. This can be disabled if the - #: serialization process returns cookie safe strings only. - quote_base64 = True - - def __init__(self, data=None, secret_key=None, new=True): - ModificationTrackingDict.__init__(self, data or ()) - # explicitly convert it into a bytestring because python 2.6 - # no longer performs an implicit string conversion on hmac - if secret_key is not None: - secret_key = str(secret_key) - self.secret_key = secret_key - self.new = new - - def __repr__(self): - return '<%s %s%s>' % ( - self.__class__.__name__, - dict.__repr__(self), - self.should_save and '*' or '' - ) - - @property - def should_save(self): - """True if the session should be saved. By default this is only true - for :attr:`modified` cookies, not :attr:`new`. - """ - return self.modified - - @classmethod - def quote(cls, value): - """Quote the value for the cookie. This can be any object supported - by :attr:`serialization_method`. - - :param value: the value to quote. - """ - if cls.serialization_method is not None: - value = cls.serialization_method.dumps(value) - if cls.quote_base64: - value = ''.join(value.encode('base64').splitlines()).strip() - return value - - @classmethod - def unquote(cls, value): - """Unquote the value for the cookie. If unquoting does not work a - :exc:`UnquoteError` is raised. - - :param value: the value to unquote. - """ - try: - if cls.quote_base64: - value = value.decode('base64') - if cls.serialization_method is not None: - value = cls.serialization_method.loads(value) - return value - except Exception: - # unfortunately pickle and other serialization modules can - # cause pretty every error here. if we get one we catch it - # and convert it into an UnquoteError - raise UnquoteError() - - def serialize(self, expires=None): - """Serialize the secure cookie into a string. - - If expires is provided, the session will be automatically invalidated - after expiration when you unseralize it. This provides better - protection against session cookie theft. - - :param expires: an optional expiration date for the cookie (a - :class:`datetime.datetime` object) - """ - if self.secret_key is None: - raise RuntimeError('no secret key defined') - if expires: - self['_expires'] = _date_to_unix(expires) - result = [] - mac = hmac(self.secret_key, None, self.hash_method) - for key, value in sorted(self.items()): - result.append('%s=%s' % ( - url_quote_plus(key), - self.quote(value) - )) - mac.update('|' + result[-1]) - return '%s?%s' % ( - mac.digest().encode('base64').strip(), - '&'.join(result) - ) - - @classmethod - def unserialize(cls, string, secret_key): - """Load the secure cookie from a serialized string. - - :param string: the cookie value to unserialize. - :param secret_key: the secret key used to serialize the cookie. - :return: a new :class:`SecureCookie`. - """ - if isinstance(string, unicode): - string = string.encode('utf-8', 'replace') - try: - base64_hash, data = string.split('?', 1) - except (ValueError, IndexError): - items = () - else: - items = {} - mac = hmac(secret_key, None, cls.hash_method) - for item in data.split('&'): - mac.update('|' + item) - if not '=' in item: - items = None - break - key, value = item.split('=', 1) - # try to make the key a string - key = url_unquote_plus(key) - try: - key = str(key) - except UnicodeError: - pass - items[key] = value - - # no parsing error and the mac looks okay, we can now - # sercurely unpickle our cookie. - try: - client_hash = base64_hash.decode('base64') - except Exception: - items = client_hash = None - if items is not None and safe_str_cmp(client_hash, mac.digest()): - try: - for key, value in items.iteritems(): - items[key] = cls.unquote(value) - except UnquoteError: - items = () - else: - if '_expires' in items: - if time() > items['_expires']: - items = () - else: - del items['_expires'] - else: - items = () - return cls(items, secret_key, False) - - @classmethod - def load_cookie(cls, request, key='session', secret_key=None): - """Loads a :class:`SecureCookie` from a cookie in request. If the - cookie is not set, a new :class:`SecureCookie` instanced is - returned. - - :param request: a request object that has a `cookies` attribute - which is a dict of all cookie values. - :param key: the name of the cookie. - :param secret_key: the secret key used to unquote the cookie. - Always provide the value even though it has - no default! - """ - data = request.cookies.get(key) - if not data: - return cls(secret_key=secret_key) - return cls.unserialize(data, secret_key) - - def save_cookie(self, response, key='session', expires=None, - session_expires=None, max_age=None, path='/', domain=None, - secure=None, httponly=False, force=False): - """Saves the SecureCookie in a cookie on response object. All - parameters that are not described here are forwarded directly - to :meth:`~BaseResponse.set_cookie`. - - :param response: a response object that has a - :meth:`~BaseResponse.set_cookie` method. - :param key: the name of the cookie. - :param session_expires: the expiration date of the secure cookie - stored information. If this is not provided - the cookie `expires` date is used instead. - """ - if force or self.should_save: - data = self.serialize(session_expires or expires) - response.set_cookie(key, data, expires=expires, max_age=max_age, - path=path, domain=domain, secure=secure, - httponly=httponly) diff --git a/src/lib/werkzeug/contrib/sessions.py b/src/lib/werkzeug/contrib/sessions.py deleted file mode 100644 index b81351a..0000000 --- a/src/lib/werkzeug/contrib/sessions.py +++ /dev/null @@ -1,344 +0,0 @@ -# -*- coding: utf-8 -*- -r""" - werkzeug.contrib.sessions - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - This module contains some helper classes that help one to add session - support to a python WSGI application. For full client-side session - storage see :mod:`~werkzeug.contrib.securecookie` which implements a - secure, client-side session storage. - - - Application Integration - ======================= - - :: - - from werkzeug.contrib.sessions import SessionMiddleware, \ - FilesystemSessionStore - - app = SessionMiddleware(app, FilesystemSessionStore()) - - The current session will then appear in the WSGI environment as - `werkzeug.session`. However it's recommended to not use the middleware - but the stores directly in the application. However for very simple - scripts a middleware for sessions could be sufficient. - - This module does not implement methods or ways to check if a session is - expired. That should be done by a cronjob and storage specific. For - example to prune unused filesystem sessions one could check the modified - time of the files. It sessions are stored in the database the new() - method should add an expiration timestamp for the session. - - For better flexibility it's recommended to not use the middleware but the - store and session object directly in the application dispatching:: - - session_store = FilesystemSessionStore() - - def application(environ, start_response): - request = Request(environ) - sid = request.cookies.get('cookie_name') - if sid is None: - request.session = session_store.new() - else: - request.session = session_store.get(sid) - response = get_the_response_object(request) - if request.session.should_save: - session_store.save(request.session) - response.set_cookie('cookie_name', request.session.sid) - return response(environ, start_response) - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -import os -import sys -import tempfile -from os import path -from time import time -from random import random -try: - from hashlib import sha1 -except ImportError: - from sha import new as sha1 -from cPickle import dump, load, HIGHEST_PROTOCOL - -from werkzeug.datastructures import CallbackDict -from werkzeug.utils import dump_cookie, parse_cookie -from werkzeug.wsgi import ClosingIterator -from werkzeug.posixemulation import rename - - -_sha1_re = re.compile(r'^[a-f0-9]{40}$') - - -def _urandom(): - if hasattr(os, 'urandom'): - return os.urandom(30) - return random() - - -def generate_key(salt=None): - return sha1('%s%s%s' % (salt, time(), _urandom())).hexdigest() - - -class ModificationTrackingDict(CallbackDict): - __slots__ = ('modified',) - - def __init__(self, *args, **kwargs): - def on_update(self): - self.modified = True - self.modified = False - CallbackDict.__init__(self, on_update=on_update) - dict.update(self, *args, **kwargs) - - def copy(self): - """Create a flat copy of the dict.""" - missing = object() - result = object.__new__(self.__class__) - for name in self.__slots__: - val = getattr(self, name, missing) - if val is not missing: - setattr(result, name, val) - return result - - def __copy__(self): - return self.copy() - - -class Session(ModificationTrackingDict): - """Subclass of a dict that keeps track of direct object changes. Changes - in mutable structures are not tracked, for those you have to set - `modified` to `True` by hand. - """ - __slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new') - - def __init__(self, data, sid, new=False): - ModificationTrackingDict.__init__(self, data) - self.sid = sid - self.new = new - - def __repr__(self): - return '<%s %s%s>' % ( - self.__class__.__name__, - dict.__repr__(self), - self.should_save and '*' or '' - ) - - @property - def should_save(self): - """True if the session should be saved. - - .. versionchanged:: 0.6 - By default the session is now only saved if the session is - modified, not if it is new like it was before. - """ - return self.modified - - -class SessionStore(object): - """Baseclass for all session stores. The Werkzeug contrib module does not - implement any useful stores besides the filesystem store, application - developers are encouraged to create their own stores. - - :param session_class: The session class to use. Defaults to - :class:`Session`. - """ - - def __init__(self, session_class=None): - if session_class is None: - session_class = Session - self.session_class = session_class - - def is_valid_key(self, key): - """Check if a key has the correct format.""" - return _sha1_re.match(key) is not None - - def generate_key(self, salt=None): - """Simple function that generates a new session key.""" - return generate_key(salt) - - def new(self): - """Generate a new session.""" - return self.session_class({}, self.generate_key(), True) - - def save(self, session): - """Save a session.""" - - def save_if_modified(self, session): - """Save if a session class wants an update.""" - if session.should_save: - self.save(session) - - def delete(self, session): - """Delete a session.""" - - def get(self, sid): - """Get a session for this sid or a new session object. This method - has to check if the session key is valid and create a new session if - that wasn't the case. - """ - return self.session_class({}, sid, True) - - -#: used for temporary files by the filesystem session store -_fs_transaction_suffix = '.__wz_sess' - - -class FilesystemSessionStore(SessionStore): - """Simple example session store that saves sessions on the filesystem. - This store works best on POSIX systems and Windows Vista / Windows - Server 2008 and newer. - - .. versionchanged:: 0.6 - `renew_missing` was added. Previously this was considered `True`, - now the default changed to `False` and it can be explicitly - deactivated. - - :param path: the path to the folder used for storing the sessions. - If not provided the default temporary directory is used. - :param filename_template: a string template used to give the session - a filename. ``%s`` is replaced with the - session id. - :param session_class: The session class to use. Defaults to - :class:`Session`. - :param renew_missing: set to `True` if you want the store to - give the user a new sid if the session was - not yet saved. - """ - - def __init__(self, path=None, filename_template='werkzeug_%s.sess', - session_class=None, renew_missing=False, mode=0644): - SessionStore.__init__(self, session_class) - if path is None: - path = tempfile.gettempdir() - self.path = path - if isinstance(filename_template, unicode): - filename_template = filename_template.encode( - sys.getfilesystemencoding() or 'utf-8') - assert not filename_template.endswith(_fs_transaction_suffix), \ - 'filename templates may not end with %s' % _fs_transaction_suffix - self.filename_template = filename_template - self.renew_missing = renew_missing - self.mode = mode - - def get_session_filename(self, sid): - # out of the box, this should be a strict ASCII subset but - # you might reconfigure the session object to have a more - # arbitrary string. - if isinstance(sid, unicode): - sid = sid.encode(sys.getfilesystemencoding() or 'utf-8') - return path.join(self.path, self.filename_template % sid) - - def save(self, session): - fn = self.get_session_filename(session.sid) - fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, - dir=self.path) - f = os.fdopen(fd, 'wb') - try: - dump(dict(session), f, HIGHEST_PROTOCOL) - finally: - f.close() - try: - rename(tmp, fn) - os.chmod(fn, self.mode) - except (IOError, OSError): - pass - - def delete(self, session): - fn = self.get_session_filename(session.sid) - try: - os.unlink(fn) - except OSError: - pass - - def get(self, sid): - if not self.is_valid_key(sid): - return self.new() - try: - f = open(self.get_session_filename(sid), 'rb') - except IOError: - if self.renew_missing: - return self.new() - data = {} - else: - try: - try: - data = load(f) - except Exception: - data = {} - finally: - f.close() - return self.session_class(data, sid, False) - - def list(self): - """Lists all sessions in the store. - - .. versionadded:: 0.6 - """ - before, after = self.filename_template.split('%s', 1) - filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before), - re.escape(after))) - result = [] - for filename in os.listdir(self.path): - #: this is a session that is still being saved. - if filename.endswith(_fs_transaction_suffix): - continue - match = filename_re.match(filename) - if match is not None: - result.append(match.group(1)) - return result - - -class SessionMiddleware(object): - """A simple middleware that puts the session object of a store provided - into the WSGI environ. It automatically sets cookies and restores - sessions. - - However a middleware is not the preferred solution because it won't be as - fast as sessions managed by the application itself and will put a key into - the WSGI environment only relevant for the application which is against - the concept of WSGI. - - The cookie parameters are the same as for the :func:`~dump_cookie` - function just prefixed with ``cookie_``. Additionally `max_age` is - called `cookie_age` and not `cookie_max_age` because of backwards - compatibility. - """ - - def __init__(self, app, store, cookie_name='session_id', - cookie_age=None, cookie_expires=None, cookie_path='/', - cookie_domain=None, cookie_secure=None, - cookie_httponly=False, environ_key='werkzeug.session'): - self.app = app - self.store = store - self.cookie_name = cookie_name - self.cookie_age = cookie_age - self.cookie_expires = cookie_expires - self.cookie_path = cookie_path - self.cookie_domain = cookie_domain - self.cookie_secure = cookie_secure - self.cookie_httponly = cookie_httponly - self.environ_key = environ_key - - def __call__(self, environ, start_response): - cookie = parse_cookie(environ.get('HTTP_COOKIE', '')) - sid = cookie.get(self.cookie_name, None) - if sid is None: - session = self.store.new() - else: - session = self.store.get(sid) - environ[self.environ_key] = session - - def injecting_start_response(status, headers, exc_info=None): - if session.should_save: - self.store.save(session) - headers.append(('Set-Cookie', dump_cookie(self.cookie_name, - session.sid, self.cookie_age, - self.cookie_expires, self.cookie_path, - self.cookie_domain, self.cookie_secure, - self.cookie_httponly))) - return start_response(status, headers, exc_info) - return ClosingIterator(self.app(environ, injecting_start_response), - lambda: self.store.save_if_modified(session)) diff --git a/src/lib/werkzeug/contrib/wrappers.py b/src/lib/werkzeug/contrib/wrappers.py deleted file mode 100644 index bd6a2d4..0000000 --- a/src/lib/werkzeug/contrib/wrappers.py +++ /dev/null @@ -1,275 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.contrib.wrappers - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Extra wrappers or mixins contributed by the community. These wrappers can - be mixed in into request objects to add extra functionality. - - Example:: - - from werkzeug.wrappers import Request as RequestBase - from werkzeug.contrib.wrappers import JSONRequestMixin - - class Request(RequestBase, JSONRequestMixin): - pass - - Afterwards this request object provides the extra functionality of the - :class:`JSONRequestMixin`. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import codecs -from werkzeug.exceptions import BadRequest -from werkzeug.utils import cached_property -from werkzeug.http import dump_options_header, parse_options_header -from werkzeug._internal import _decode_unicode -try: - from simplejson import loads -except ImportError: - from json import loads - - -def is_known_charset(charset): - """Checks if the given charset is known to Python.""" - try: - codecs.lookup(charset) - except LookupError: - return False - return True - - -class JSONRequestMixin(object): - """Add json method to a request object. This will parse the input data - through simplejson if possible. - - :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type - is not json or if the data itself cannot be parsed as json. - """ - - @cached_property - def json(self): - """Get the result of simplejson.loads if possible.""" - if 'json' not in self.environ.get('CONTENT_TYPE', ''): - raise BadRequest('Not a JSON request') - try: - return loads(self.data) - except Exception: - raise BadRequest('Unable to read JSON request') - - -class ProtobufRequestMixin(object): - """Add protobuf parsing method to a request object. This will parse the - input data through `protobuf`_ if possible. - - :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type - is not protobuf or if the data itself cannot be parsed property. - - .. _protobuf: http://code.google.com/p/protobuf/ - """ - - #: by default the :class:`ProtobufRequestMixin` will raise a - #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not - #: initialized. You can bypass that check by setting this - #: attribute to `False`. - protobuf_check_initialization = True - - def parse_protobuf(self, proto_type): - """Parse the data into an instance of proto_type.""" - if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): - raise BadRequest('Not a Protobuf request') - - obj = proto_type() - try: - obj.ParseFromString(self.data) - except Exception: - raise BadRequest("Unable to parse Protobuf request") - - # Fail if not all required fields are set - if self.protobuf_check_initialization and not obj.IsInitialized(): - raise BadRequest("Partial Protobuf request") - - return obj - - -class RoutingArgsRequestMixin(object): - """This request mixin adds support for the wsgiorg routing args - `specification`_. - - .. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args - """ - - def _get_routing_args(self): - return self.environ.get('wsgiorg.routing_args', (()))[0] - - def _set_routing_args(self, value): - if self.shallow: - raise RuntimeError('A shallow request tried to modify the WSGI ' - 'environment. If you really want to do that, ' - 'set `shallow` to False.') - self.environ['wsgiorg.routing_args'] = (value, self.routing_vars) - - routing_args = property(_get_routing_args, _set_routing_args, doc=''' - The positional URL arguments as `tuple`.''') - del _get_routing_args, _set_routing_args - - def _get_routing_vars(self): - rv = self.environ.get('wsgiorg.routing_args') - if rv is not None: - return rv[1] - rv = {} - if not self.shallow: - self.routing_vars = rv - return rv - - def _set_routing_vars(self, value): - if self.shallow: - raise RuntimeError('A shallow request tried to modify the WSGI ' - 'environment. If you really want to do that, ' - 'set `shallow` to False.') - self.environ['wsgiorg.routing_args'] = (self.routing_args, value) - - routing_vars = property(_get_routing_vars, _set_routing_vars, doc=''' - The keyword URL arguments as `dict`.''') - del _get_routing_vars, _set_routing_vars - - -class ReverseSlashBehaviorRequestMixin(object): - """This mixin reverses the trailing slash behavior of :attr:`script_root` - and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin` - directly on the paths. - - Because it changes the behavior or :class:`Request` this class has to be - mixed in *before* the actual request class:: - - class MyRequest(ReverseSlashBehaviorRequestMixin, Request): - pass - - This example shows the differences (for an application mounted on - `/application` and the request going to `/application/foo/bar`): - - +---------------+-------------------+---------------------+ - | | normal behavior | reverse behavior | - +===============+===================+=====================+ - | `script_root` | ``/application`` | ``/application/`` | - +---------------+-------------------+---------------------+ - | `path` | ``/foo/bar`` | ``foo/bar`` | - +---------------+-------------------+---------------------+ - """ - - @cached_property - def path(self): - """Requested path as unicode. This works a bit like the regular path - info in the WSGI environment but will not include a leading slash. - """ - path = (self.environ.get('PATH_INFO') or '').lstrip('/') - return _decode_unicode(path, self.charset, self.encoding_errors) - - @cached_property - def script_root(self): - """The root path of the script includling a trailing slash.""" - path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/') + '/' - return _decode_unicode(path, self.charset, self.encoding_errors) - - -class DynamicCharsetRequestMixin(object): - """"If this mixin is mixed into a request class it will provide - a dynamic `charset` attribute. This means that if the charset is - transmitted in the content type headers it's used from there. - - Because it changes the behavior or :class:`Request` this class has - to be mixed in *before* the actual request class:: - - class MyRequest(DynamicCharsetRequestMixin, Request): - pass - - By default the request object assumes that the URL charset is the - same as the data charset. If the charset varies on each request - based on the transmitted data it's not a good idea to let the URLs - change based on that. Most browsers assume either utf-8 or latin1 - for the URLs if they have troubles figuring out. It's strongly - recommended to set the URL charset to utf-8:: - - class MyRequest(DynamicCharsetRequestMixin, Request): - url_charset = 'utf-8' - - .. versionadded:: 0.6 - """ - - #: the default charset that is assumed if the content type header - #: is missing or does not contain a charset parameter. The default - #: is latin1 which is what HTTP specifies as default charset. - #: You may however want to set this to utf-8 to better support - #: browsers that do not transmit a charset for incoming data. - default_charset = 'latin1' - - def unknown_charset(self, charset): - """Called if a charset was provided but is not supported by - the Python codecs module. By default latin1 is assumed then - to not lose any information, you may override this method to - change the behavior. - - :param charset: the charset that was not found. - :return: the replacement charset. - """ - return 'latin1' - - @cached_property - def charset(self): - """The charset from the content type.""" - header = self.environ.get('CONTENT_TYPE') - if header: - ct, options = parse_options_header(header) - charset = options.get('charset') - if charset: - if is_known_charset(charset): - return charset - return self.unknown_charset(charset) - return self.default_charset - - -class DynamicCharsetResponseMixin(object): - """If this mixin is mixed into a response class it will provide - a dynamic `charset` attribute. This means that if the charset is - looked up and stored in the `Content-Type` header and updates - itself automatically. This also means a small performance hit but - can be useful if you're working with different charsets on - responses. - - Because the charset attribute is no a property at class-level, the - default value is stored in `default_charset`. - - Because it changes the behavior or :class:`Response` this class has - to be mixed in *before* the actual response class:: - - class MyResponse(DynamicCharsetResponseMixin, Response): - pass - - .. versionadded:: 0.6 - """ - - #: the default charset. - default_charset = 'utf-8' - - def _get_charset(self): - header = self.headers.get('content-type') - if header: - charset = parse_options_header(header)[1].get('charset') - if charset: - return charset - return self.default_charset - - def _set_charset(self, charset): - header = self.headers.get('content-type') - ct, options = parse_options_header(header) - if not ct: - raise TypeError('Cannot set charset if Content-Type ' - 'header is missing.') - options['charset'] = charset - self.headers['Content-Type'] = dump_options_header(ct, options) - - charset = property(_get_charset, _set_charset, doc=""" - The charset for the response. It's stored inside the - Content-Type header as a parameter.""") - del _get_charset, _set_charset diff --git a/src/lib/werkzeug/datastructures.py b/src/lib/werkzeug/datastructures.py deleted file mode 100644 index 09d59e8..0000000 --- a/src/lib/werkzeug/datastructures.py +++ /dev/null @@ -1,2573 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.datastructures - ~~~~~~~~~~~~~~~~~~~~~~~ - - This module provides mixins and classes with an immutable interface. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -import codecs -import mimetypes -from itertools import repeat - -from werkzeug._internal import _proxy_repr, _missing, _empty_stream - - -_locale_delim_re = re.compile(r'[_-]') - - -def is_immutable(self): - raise TypeError('%r objects are immutable' % self.__class__.__name__) - - -def iter_multi_items(mapping): - """Iterates over the items of a mapping yielding keys and values - without dropping any from more complex structures. - """ - if isinstance(mapping, MultiDict): - for item in mapping.iteritems(multi=True): - yield item - elif isinstance(mapping, dict): - for key, value in mapping.iteritems(): - if isinstance(value, (tuple, list)): - for value in value: - yield key, value - else: - yield key, value - else: - for item in mapping: - yield item - - -class ImmutableListMixin(object): - """Makes a :class:`list` immutable. - - .. versionadded:: 0.5 - - :private: - """ - - _hash_cache = None - - def __hash__(self): - if self._hash_cache is not None: - return self._hash_cache - rv = self._hash_cache = hash(tuple(self)) - return rv - - def __reduce_ex__(self, protocol): - return type(self), (list(self),) - - def __delitem__(self, key): - is_immutable(self) - - def __delslice__(self, i, j): - is_immutable(self) - - def __iadd__(self, other): - is_immutable(self) - __imul__ = __iadd__ - - def __setitem__(self, key, value): - is_immutable(self) - - def __setslice__(self, i, j, value): - is_immutable(self) - - def append(self, item): - is_immutable(self) - remove = append - - def extend(self, iterable): - is_immutable(self) - - def insert(self, pos, value): - is_immutable(self) - - def pop(self, index=-1): - is_immutable(self) - - def reverse(self): - is_immutable(self) - - def sort(self, cmp=None, key=None, reverse=None): - is_immutable(self) - - -class ImmutableList(ImmutableListMixin, list): - """An immutable :class:`list`. - - .. versionadded:: 0.5 - - :private: - """ - - __repr__ = _proxy_repr(list) - - -class ImmutableDictMixin(object): - """Makes a :class:`dict` immutable. - - .. versionadded:: 0.5 - - :private: - """ - _hash_cache = None - - @classmethod - def fromkeys(cls, keys, value=None): - instance = super(cls, cls).__new__(cls) - instance.__init__(zip(keys, repeat(value))) - return instance - - def __reduce_ex__(self, protocol): - return type(self), (dict(self),) - - def _iter_hashitems(self): - return self.iteritems() - - def __hash__(self): - if self._hash_cache is not None: - return self._hash_cache - rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) - return rv - - def setdefault(self, key, default=None): - is_immutable(self) - - def update(self, *args, **kwargs): - is_immutable(self) - - def pop(self, key, default=None): - is_immutable(self) - - def popitem(self): - is_immutable(self) - - def __setitem__(self, key, value): - is_immutable(self) - - def __delitem__(self, key): - is_immutable(self) - - def clear(self): - is_immutable(self) - - -class ImmutableMultiDictMixin(ImmutableDictMixin): - """Makes a :class:`MultiDict` immutable. - - .. versionadded:: 0.5 - - :private: - """ - - def __reduce_ex__(self, protocol): - return type(self), (self.items(multi=True),) - - def _iter_hashitems(self): - return self.iteritems(multi=True) - - def add(self, key, value): - is_immutable(self) - - def popitemlist(self): - is_immutable(self) - - def poplist(self, key): - is_immutable(self) - - def setlist(self, key, new_list): - is_immutable(self) - - def setlistdefault(self, key, default_list=None): - is_immutable(self) - - -class UpdateDictMixin(object): - """Makes dicts call `self.on_update` on modifications. - - .. versionadded:: 0.5 - - :private: - """ - - on_update = None - - def calls_update(name): - def oncall(self, *args, **kw): - rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) - if self.on_update is not None: - self.on_update(self) - return rv - oncall.__name__ = name - return oncall - - __setitem__ = calls_update('__setitem__') - __delitem__ = calls_update('__delitem__') - clear = calls_update('clear') - pop = calls_update('pop') - popitem = calls_update('popitem') - setdefault = calls_update('setdefault') - update = calls_update('update') - del calls_update - - -class TypeConversionDict(dict): - """Works like a regular dict but the :meth:`get` method can perform - type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` - are subclasses of this class and provide the same feature. - - .. versionadded:: 0.5 - """ - - def get(self, key, default=None, type=None): - """Return the default value if the requested data doesn't exist. - If `type` is provided and is a callable it should convert the value, - return it or raise a :exc:`ValueError` if that is not possible. In - this case the function will return the default as if the value was not - found: - - >>> d = TypeConversionDict(foo='42', bar='blub') - >>> d.get('foo', type=int) - 42 - >>> d.get('bar', -1, type=int) - -1 - - :param key: The key to be looked up. - :param default: The default value to be returned if the key can't - be looked up. If not further specified `None` is - returned. - :param type: A callable that is used to cast the value in the - :class:`MultiDict`. If a :exc:`ValueError` is raised - by this callable the default value is returned. - """ - try: - rv = self[key] - if type is not None: - rv = type(rv) - except (KeyError, ValueError): - rv = default - return rv - - -class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): - """Works like a :class:`TypeConversionDict` but does not support - modifications. - - .. versionadded:: 0.5 - """ - - def copy(self): - """Return a shallow mutable copy of this object. Keep in mind that - the standard library's :func:`copy` function is a no-op for this class - like for any other python immutable type (eg: :class:`tuple`). - """ - return TypeConversionDict(self) - - def __copy__(self): - return self - - -class MultiDict(TypeConversionDict): - """A :class:`MultiDict` is a dictionary subclass customized to deal with - multiple values for the same key which is for example used by the parsing - functions in the wrappers. This is necessary because some HTML form - elements pass multiple values for the same key. - - :class:`MultiDict` implements all standard dictionary methods. - Internally, it saves all values for a key as a list, but the standard dict - access methods will only return the first value for a key. If you want to - gain access to the other values, too, you have to use the `list` methods as - explained below. - - Basic Usage: - - >>> d = MultiDict([('a', 'b'), ('a', 'c')]) - >>> d - MultiDict([('a', 'b'), ('a', 'c')]) - >>> d['a'] - 'b' - >>> d.getlist('a') - ['b', 'c'] - >>> 'a' in d - True - - It behaves like a normal dict thus all dict functions will only return the - first value when multiple values for one key are found. - - From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a - subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will - render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP - exceptions. - - A :class:`MultiDict` can be constructed from an iterable of - ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 - onwards some keyword parameters. - - :param mapping: the initial value for the :class:`MultiDict`. Either a - regular dict, an iterable of ``(key, value)`` tuples - or `None`. - """ - - def __init__(self, mapping=None): - if isinstance(mapping, MultiDict): - dict.__init__(self, ((k, l[:]) for k, l in mapping.iterlists())) - elif isinstance(mapping, dict): - tmp = {} - for key, value in mapping.iteritems(): - if isinstance(value, (tuple, list)): - value = list(value) - else: - value = [value] - tmp[key] = value - dict.__init__(self, tmp) - else: - tmp = {} - for key, value in mapping or (): - tmp.setdefault(key, []).append(value) - dict.__init__(self, tmp) - - def __getstate__(self): - return dict(self.lists()) - - def __setstate__(self, value): - dict.clear(self) - dict.update(self, value) - - def __iter__(self): - return self.iterkeys() - - def __getitem__(self, key): - """Return the first data value for this key; - raises KeyError if not found. - - :param key: The key to be looked up. - :raise KeyError: if the key does not exist. - """ - if key in self: - return dict.__getitem__(self, key)[0] - raise BadRequestKeyError(key) - - def __setitem__(self, key, value): - """Like :meth:`add` but removes an existing key first. - - :param key: the key for the value. - :param value: the value to set. - """ - dict.__setitem__(self, key, [value]) - - def add(self, key, value): - """Adds a new value for the key. - - .. versionadded:: 0.6 - - :param key: the key for the value. - :param value: the value to add. - """ - dict.setdefault(self, key, []).append(value) - - def getlist(self, key, type=None): - """Return the list of items for a given key. If that key is not in the - `MultiDict`, the return value will be an empty list. Just as `get` - `getlist` accepts a `type` parameter. All items will be converted - with the callable defined there. - - :param key: The key to be looked up. - :param type: A callable that is used to cast the value in the - :class:`MultiDict`. If a :exc:`ValueError` is raised - by this callable the value will be removed from the list. - :return: a :class:`list` of all the values for the key. - """ - try: - rv = dict.__getitem__(self, key) - except KeyError: - return [] - if type is None: - return list(rv) - result = [] - for item in rv: - try: - result.append(type(item)) - except ValueError: - pass - return result - - def setlist(self, key, new_list): - """Remove the old values for a key and add new ones. Note that the list - you pass the values in will be shallow-copied before it is inserted in - the dictionary. - - >>> d = MultiDict() - >>> d.setlist('foo', ['1', '2']) - >>> d['foo'] - '1' - >>> d.getlist('foo') - ['1', '2'] - - :param key: The key for which the values are set. - :param new_list: An iterable with the new values for the key. Old values - are removed first. - """ - dict.__setitem__(self, key, list(new_list)) - - def setdefault(self, key, default=None): - """Returns the value for the key if it is in the dict, otherwise it - returns `default` and sets that value for `key`. - - :param key: The key to be looked up. - :param default: The default value to be returned if the key is not - in the dict. If not further specified it's `None`. - """ - if key not in self: - self[key] = default - else: - default = self[key] - return default - - def setlistdefault(self, key, default_list=None): - """Like `setdefault` but sets multiple values. The list returned - is not a copy, but the list that is actually used internally. This - means that you can put new values into the dict by appending items - to the list: - - >>> d = MultiDict({"foo": 1}) - >>> d.setlistdefault("foo").extend([2, 3]) - >>> d.getlist("foo") - [1, 2, 3] - - :param key: The key to be looked up. - :param default: An iterable of default values. It is either copied - (in case it was a list) or converted into a list - before returned. - :return: a :class:`list` - """ - if key not in self: - default_list = list(default_list or ()) - dict.__setitem__(self, key, default_list) - else: - default_list = dict.__getitem__(self, key) - return default_list - - def items(self, multi=False): - """Return a list of ``(key, value)`` pairs. - - :param multi: If set to `True` the list returned will have a - pair for each value of each key. Otherwise it - will only contain pairs for the first value of - each key. - - :return: a :class:`list` - """ - return list(self.iteritems(multi)) - - def lists(self): - """Return a list of ``(key, values)`` pairs, where values is the list of - all values associated with the key. - - :return: a :class:`list` - """ - return list(self.iterlists()) - - def values(self): - """Returns a list of the first value on every key's value list. - - :return: a :class:`list`. - """ - return [self[key] for key in self.iterkeys()] - - def listvalues(self): - """Return a list of all values associated with a key. Zipping - :meth:`keys` and this is the same as calling :meth:`lists`: - - >>> d = MultiDict({"foo": [1, 2, 3]}) - >>> zip(d.keys(), d.listvalues()) == d.lists() - True - - :return: a :class:`list` - """ - return list(self.iterlistvalues()) - - def iteritems(self, multi=False): - """Like :meth:`items` but returns an iterator.""" - for key, values in dict.iteritems(self): - if multi: - for value in values: - yield key, value - else: - yield key, values[0] - - def iterlists(self): - """Like :meth:`items` but returns an iterator.""" - for key, values in dict.iteritems(self): - yield key, list(values) - - def itervalues(self): - """Like :meth:`values` but returns an iterator.""" - for values in dict.itervalues(self): - yield values[0] - - def iterlistvalues(self): - """Like :meth:`listvalues` but returns an iterator.""" - return dict.itervalues(self) - - def copy(self): - """Return a shallow copy of this object.""" - return self.__class__(self) - - def to_dict(self, flat=True): - """Return the contents as regular dict. If `flat` is `True` the - returned dict will only have the first item present, if `flat` is - `False` all values will be returned as lists. - - :param flat: If set to `False` the dict returned will have lists - with all the values in it. Otherwise it will only - contain the first value for each key. - :return: a :class:`dict` - """ - if flat: - return dict(self.iteritems()) - return dict(self.lists()) - - def update(self, other_dict): - """update() extends rather than replaces existing key lists.""" - for key, value in iter_multi_items(other_dict): - MultiDict.add(self, key, value) - - def pop(self, key, default=_missing): - """Pop the first item for a list on the dict. Afterwards the - key is removed from the dict, so additional values are discarded: - - >>> d = MultiDict({"foo": [1, 2, 3]}) - >>> d.pop("foo") - 1 - >>> "foo" in d - False - - :param key: the key to pop. - :param default: if provided the value to return if the key was - not in the dictionary. - """ - try: - return dict.pop(self, key)[0] - except KeyError, e: - if default is not _missing: - return default - raise BadRequestKeyError(str(e)) - - def popitem(self): - """Pop an item from the dict.""" - try: - item = dict.popitem(self) - return (item[0], item[1][0]) - except KeyError, e: - raise BadRequestKeyError(str(e)) - - def poplist(self, key): - """Pop the list for a key from the dict. If the key is not in the dict - an empty list is returned. - - .. versionchanged:: 0.5 - If the key does no longer exist a list is returned instead of - raising an error. - """ - return dict.pop(self, key, []) - - def popitemlist(self): - """Pop a ``(key, list)`` tuple from the dict.""" - try: - return dict.popitem(self) - except KeyError, e: - raise BadRequestKeyError(str(e)) - - def __copy__(self): - return self.copy() - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.items(multi=True)) - - -class _omd_bucket(object): - """Wraps values in the :class:`OrderedMultiDict`. This makes it - possible to keep an order over multiple different keys. It requires - a lot of extra memory and slows down access a lot, but makes it - possible to access elements in O(1) and iterate in O(n). - """ - __slots__ = ('prev', 'key', 'value', 'next') - - def __init__(self, omd, key, value): - self.prev = omd._last_bucket - self.key = key - self.value = value - self.next = None - - if omd._first_bucket is None: - omd._first_bucket = self - if omd._last_bucket is not None: - omd._last_bucket.next = self - omd._last_bucket = self - - def unlink(self, omd): - if self.prev: - self.prev.next = self.next - if self.next: - self.next.prev = self.prev - if omd._first_bucket is self: - omd._first_bucket = self.next - if omd._last_bucket is self: - omd._last_bucket = self.prev - - -class OrderedMultiDict(MultiDict): - """Works like a regular :class:`MultiDict` but preserves the - order of the fields. To convert the ordered multi dict into a - list you can use the :meth:`items` method and pass it ``multi=True``. - - In general an :class:`OrderedMultiDict` is an order of magnitude - slower than a :class:`MultiDict`. - - .. admonition:: note - - Due to a limitation in Python you cannot convert an ordered - multi dict into a regular dict by using ``dict(multidict)``. - Instead you have to use the :meth:`to_dict` method, otherwise - the internal bucket objects are exposed. - """ - - def __init__(self, mapping=None): - dict.__init__(self) - self._first_bucket = self._last_bucket = None - if mapping is not None: - OrderedMultiDict.update(self, mapping) - - def __eq__(self, other): - if not isinstance(other, MultiDict): - return NotImplemented - if isinstance(other, OrderedMultiDict): - iter1 = self.iteritems(multi=True) - iter2 = other.iteritems(multi=True) - try: - for k1, v1 in iter1: - k2, v2 = iter2.next() - if k1 != k2 or v1 != v2: - return False - except StopIteration: - return False - try: - iter2.next() - except StopIteration: - return True - return False - if len(self) != len(other): - return False - for key, values in self.iterlists(): - if other.getlist(key) != values: - return False - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __reduce_ex__(self, protocol): - return type(self), (self.items(multi=True),) - - def __getstate__(self): - return self.items(multi=True) - - def __setstate__(self, values): - dict.clear(self) - for key, value in values: - self.add(key, value) - - def __getitem__(self, key): - if key in self: - return dict.__getitem__(self, key)[0].value - raise BadRequestKeyError(key) - - def __setitem__(self, key, value): - self.poplist(key) - self.add(key, value) - - def __delitem__(self, key): - self.pop(key) - - def iterkeys(self): - return (key for key, value in self.iteritems()) - - def itervalues(self): - return (value for key, value in self.iteritems()) - - def iteritems(self, multi=False): - ptr = self._first_bucket - if multi: - while ptr is not None: - yield ptr.key, ptr.value - ptr = ptr.next - else: - returned_keys = set() - while ptr is not None: - if ptr.key not in returned_keys: - returned_keys.add(ptr.key) - yield ptr.key, ptr.value - ptr = ptr.next - - def iterlists(self): - returned_keys = set() - ptr = self._first_bucket - while ptr is not None: - if ptr.key not in returned_keys: - yield ptr.key, self.getlist(ptr.key) - returned_keys.add(ptr.key) - ptr = ptr.next - - def iterlistvalues(self): - for key, values in self.iterlists(): - yield values - - def add(self, key, value): - dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) - - def getlist(self, key, type=None): - try: - rv = dict.__getitem__(self, key) - except KeyError: - return [] - if type is None: - return [x.value for x in rv] - result = [] - for item in rv: - try: - result.append(type(item.value)) - except ValueError: - pass - return result - - def setlist(self, key, new_list): - self.poplist(key) - for value in new_list: - self.add(key, value) - - def setlistdefault(self, key, default_list=None): - raise TypeError('setlistdefault is unsupported for ' - 'ordered multi dicts') - - def update(self, mapping): - for key, value in iter_multi_items(mapping): - OrderedMultiDict.add(self, key, value) - - def poplist(self, key): - buckets = dict.pop(self, key, ()) - for bucket in buckets: - bucket.unlink(self) - return [x.value for x in buckets] - - def pop(self, key, default=_missing): - try: - buckets = dict.pop(self, key) - except KeyError, e: - if default is not _missing: - return default - raise BadRequestKeyError(str(e)) - for bucket in buckets: - bucket.unlink(self) - return buckets[0].value - - def popitem(self): - try: - key, buckets = dict.popitem(self) - except KeyError, e: - raise BadRequestKeyError(str(e)) - for bucket in buckets: - bucket.unlink(self) - return key, buckets[0].value - - def popitemlist(self): - try: - key, buckets = dict.popitem(self) - except KeyError, e: - raise BadRequestKeyError(str(e)) - for bucket in buckets: - bucket.unlink(self) - return key, [x.value for x in buckets] - - -def _options_header_vkw(value, kw): - return dump_options_header(value, dict((k.replace('_', '-'), v) - for k, v in kw.items())) - - -class Headers(object): - """An object that stores some headers. It has a dict-like interface - but is ordered and can store the same keys multiple times. - - This data structure is useful if you want a nicer way to handle WSGI - headers which are stored as tuples in a list. - - From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is - also a subclass of the :class:`~exceptions.BadRequest` HTTP exception - and will render a page for a ``400 BAD REQUEST`` if caught in a - catch-all for HTTP exceptions. - - Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` - class, with the exception of `__getitem__`. :mod:`wsgiref` will return - `None` for ``headers['missing']``, whereas :class:`Headers` will raise - a :class:`KeyError`. - - To create a new :class:`Headers` object pass it a list or dict of headers - which are used as default values. This does not reuse the list passed - to the constructor for internal usage. To create a :class:`Headers` - object that uses as internal storage the list or list-like object you - can use the :meth:`linked` class method. - - :param defaults: The list of default values for the :class:`Headers`. - """ - - def __init__(self, defaults=None, _list=None): - if _list is None: - _list = [] - self._list = _list - if defaults is not None: - if isinstance(defaults, (list, Headers)): - self._list.extend(defaults) - else: - self.extend(defaults) - - @classmethod - def linked(cls, headerlist): - """Create a new :class:`Headers` object that uses the list of headers - passed as internal storage: - - >>> headerlist = [('Content-Length', '40')] - >>> headers = Headers.linked(headerlist) - >>> headers['Content-Type'] = 'text/html' - >>> headerlist - [('Content-Length', '40'), ('Content-Type', 'text/html')] - - :param headerlist: The list of headers the class is linked to. - :return: new linked :class:`Headers` object. - """ - return cls(_list=headerlist) - - def __getitem__(self, key, _get_mode=False): - if not _get_mode: - if isinstance(key, (int, long)): - return self._list[key] - elif isinstance(key, slice): - return self.__class__(self._list[key]) - ikey = key.lower() - for k, v in self._list: - if k.lower() == ikey: - return v - # micro optimization: if we are in get mode we will catch that - # exception one stack level down so we can raise a standard - # key error instead of our special one. - if _get_mode: - raise KeyError() - raise BadRequestKeyError(key) - - def __eq__(self, other): - return other.__class__ is self.__class__ and \ - set(other._list) == set(self._list) - - def __ne__(self, other): - return not self.__eq__(other) - - def get(self, key, default=None, type=None): - """Return the default value if the requested data doesn't exist. - If `type` is provided and is a callable it should convert the value, - return it or raise a :exc:`ValueError` if that is not possible. In - this case the function will return the default as if the value was not - found: - - >>> d = Headers([('Content-Length', '42')]) - >>> d.get('Content-Length', type=int) - 42 - - If a headers object is bound you must not add unicode strings - because no encoding takes place. - - :param key: The key to be looked up. - :param default: The default value to be returned if the key can't - be looked up. If not further specified `None` is - returned. - :param type: A callable that is used to cast the value in the - :class:`Headers`. If a :exc:`ValueError` is raised - by this callable the default value is returned. - """ - try: - rv = self.__getitem__(key, _get_mode=True) - except KeyError: - return default - if type is None: - return rv - try: - return type(rv) - except ValueError: - return default - - def getlist(self, key, type=None): - """Return the list of items for a given key. If that key is not in the - :class:`Headers`, the return value will be an empty list. Just as - :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will - be converted with the callable defined there. - - :param key: The key to be looked up. - :param type: A callable that is used to cast the value in the - :class:`Headers`. If a :exc:`ValueError` is raised - by this callable the value will be removed from the list. - :return: a :class:`list` of all the values for the key. - """ - ikey = key.lower() - result = [] - for k, v in self: - if k.lower() == ikey: - if type is not None: - try: - v = type(v) - except ValueError: - continue - result.append(v) - return result - - def get_all(self, name): - """Return a list of all the values for the named field. - - This method is compatible with the :mod:`wsgiref` - :meth:`~wsgiref.headers.Headers.get_all` method. - """ - return self.getlist(name) - - def iteritems(self, lower=False): - for key, value in self: - if lower: - key = key.lower() - yield key, value - - def iterkeys(self, lower=False): - for key, _ in self.iteritems(lower): - yield key - - def itervalues(self): - for _, value in self.iteritems(): - yield value - - def keys(self, lower=False): - return list(self.iterkeys(lower)) - - def values(self): - return list(self.itervalues()) - - def items(self, lower=False): - return list(self.iteritems(lower)) - - def extend(self, iterable): - """Extend the headers with a dict or an iterable yielding keys and - values. - """ - if isinstance(iterable, dict): - for key, value in iterable.iteritems(): - if isinstance(value, (tuple, list)): - for v in value: - self.add(key, v) - else: - self.add(key, value) - else: - for key, value in iterable: - self.add(key, value) - - def __delitem__(self, key, _index_operation=True): - if _index_operation and isinstance(key, (int, long, slice)): - del self._list[key] - return - key = key.lower() - new = [] - for k, v in self._list: - if k.lower() != key: - new.append((k, v)) - self._list[:] = new - - def remove(self, key): - """Remove a key. - - :param key: The key to be removed. - """ - return self.__delitem__(key, _index_operation=False) - - def pop(self, key=None, default=_missing): - """Removes and returns a key or index. - - :param key: The key to be popped. If this is an integer the item at - that position is removed, if it's a string the value for - that key is. If the key is omitted or `None` the last - item is removed. - :return: an item. - """ - if key is None: - return self._list.pop() - if isinstance(key, (int, long)): - return self._list.pop(key) - try: - rv = self[key] - self.remove(key) - except KeyError: - if default is not _missing: - return default - raise - return rv - - def popitem(self): - """Removes a key or index and returns a (key, value) item.""" - return self.pop() - - def __contains__(self, key): - """Check if a key is present.""" - try: - self.__getitem__(key, _get_mode=True) - except KeyError: - return False - return True - - has_key = __contains__ - - def __iter__(self): - """Yield ``(key, value)`` tuples.""" - return iter(self._list) - - def __len__(self): - return len(self._list) - - def add(self, _key, _value, **kw): - """Add a new header tuple to the list. - - Keyword arguments can specify additional parameters for the header - value, with underscores converted to dashes:: - - >>> d = Headers() - >>> d.add('Content-Type', 'text/plain') - >>> d.add('Content-Disposition', 'attachment', filename='foo.png') - - The keyword argument dumping uses :func:`dump_options_header` - behind the scenes. - - .. versionadded:: 0.4.1 - keyword arguments were added for :mod:`wsgiref` compatibility. - """ - if kw: - _value = _options_header_vkw(_value, kw) - self._validate_value(_value) - self._list.append((_key, _value)) - - def _validate_value(self, value): - if isinstance(value, basestring) and ('\n' in value or '\r' in value): - raise ValueError('Detected newline in header value. This is ' - 'a potential security problem') - - def add_header(self, _key, _value, **_kw): - """Add a new header tuple to the list. - - An alias for :meth:`add` for compatibility with the :mod:`wsgiref` - :meth:`~wsgiref.headers.Headers.add_header` method. - """ - self.add(_key, _value, **_kw) - - def clear(self): - """Clears all headers.""" - del self._list[:] - - def set(self, _key, _value, **kw): - """Remove all header tuples for `key` and add a new one. The newly - added key either appears at the end of the list if there was no - entry or replaces the first one. - - Keyword arguments can specify additional parameters for the header - value, with underscores converted to dashes. See :meth:`add` for - more information. - - .. versionchanged:: 0.6.1 - :meth:`set` now accepts the same arguments as :meth:`add`. - - :param key: The key to be inserted. - :param value: The value to be inserted. - """ - if kw: - _value = _options_header_vkw(_value, kw) - self._validate_value(_value) - if not self._list: - self._list.append((_key, _value)) - return - listiter = iter(self._list) - ikey = _key.lower() - for idx, (old_key, old_value) in enumerate(listiter): - if old_key.lower() == ikey: - # replace first ocurrence - self._list[idx] = (_key, _value) - break - else: - self._list.append((_key, _value)) - return - self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] - - def setdefault(self, key, value): - """Returns the value for the key if it is in the dict, otherwise it - returns `default` and sets that value for `key`. - - :param key: The key to be looked up. - :param default: The default value to be returned if the key is not - in the dict. If not further specified it's `None`. - """ - if key in self: - return self[key] - self.set(key, value) - return value - - def __setitem__(self, key, value): - """Like :meth:`set` but also supports index/slice based setting.""" - if isinstance(key, (slice, int, long)): - self._validate_value(value) - self._list[key] = value - else: - self.set(key, value) - - def to_list(self, charset='iso-8859-1'): - """Convert the headers into a list and converts the unicode header - items to the specified charset. - - :return: list - """ - return [(k, isinstance(v, unicode) and v.encode(charset) or str(v)) - for k, v in self] - - def copy(self): - return self.__class__(self._list) - - def __copy__(self): - return self.copy() - - def __str__(self, charset='iso-8859-1'): - """Returns formatted headers suitable for HTTP transmission.""" - strs = [] - for key, value in self.to_list(charset): - strs.append('%s: %s' % (key, value)) - strs.append('\r\n') - return '\r\n'.join(strs) - - def __repr__(self): - return '%s(%r)' % ( - self.__class__.__name__, - list(self) - ) - - -class ImmutableHeadersMixin(object): - """Makes a :class:`Headers` immutable. We do not mark them as - hashable though since the only usecase for this datastructure - in Werkzeug is a view on a mutable structure. - - .. versionadded:: 0.5 - - :private: - """ - - def __delitem__(self, key): - is_immutable(self) - - def __setitem__(self, key, value): - is_immutable(self) - set = __setitem__ - - def add(self, item): - is_immutable(self) - remove = add_header = add - - def extend(self, iterable): - is_immutable(self) - - def insert(self, pos, value): - is_immutable(self) - - def pop(self, index=-1): - is_immutable(self) - - def popitem(self): - is_immutable(self) - - def setdefault(self, key, default): - is_immutable(self) - - -class EnvironHeaders(ImmutableHeadersMixin, Headers): - """Read only version of the headers from a WSGI environment. This - provides the same interface as `Headers` and is constructed from - a WSGI environment. - - From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a - subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will - render a page for a ``400 BAD REQUEST`` if caught in a catch-all for - HTTP exceptions. - """ - - def __init__(self, environ): - self.environ = environ - - @classmethod - def linked(cls, environ): - raise TypeError('%r object is always linked to environment, ' - 'no separate initializer' % cls.__name__) - - def __eq__(self, other): - return self.environ is other.environ - - def __getitem__(self, key, _get_mode=False): - # _get_mode is a no-op for this class as there is no index but - # used because get() calls it. - key = key.upper().replace('-', '_') - if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): - return self.environ[key] - return self.environ['HTTP_' + key] - - def __len__(self): - # the iter is necessary because otherwise list calls our - # len which would call list again and so forth. - return len(list(iter(self))) - - def __iter__(self): - for key, value in self.environ.iteritems(): - if key.startswith('HTTP_') and key not in \ - ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): - yield key[5:].replace('_', '-').title(), value - elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): - yield key.replace('_', '-').title(), value - - def copy(self): - raise TypeError('cannot create %r copies' % self.__class__.__name__) - - -class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): - """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` - instances as sequence and it will combine the return values of all wrapped - dicts: - - >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict - >>> post = MultiDict([('foo', 'bar')]) - >>> get = MultiDict([('blub', 'blah')]) - >>> combined = CombinedMultiDict([get, post]) - >>> combined['foo'] - 'bar' - >>> combined['blub'] - 'blah' - - This works for all read operations and will raise a `TypeError` for - methods that usually change data which isn't possible. - - From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a - subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will - render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP - exceptions. - """ - - def __reduce_ex__(self, protocol): - return type(self), (self.dicts,) - - def __init__(self, dicts=None): - self.dicts = dicts or [] - - @classmethod - def fromkeys(cls): - raise TypeError('cannot create %r instances by fromkeys' % - cls.__name__) - - def __getitem__(self, key): - for d in self.dicts: - if key in d: - return d[key] - raise BadRequestKeyError(key) - - def get(self, key, default=None, type=None): - for d in self.dicts: - if key in d: - if type is not None: - try: - return type(d[key]) - except ValueError: - continue - return d[key] - return default - - def getlist(self, key, type=None): - rv = [] - for d in self.dicts: - rv.extend(d.getlist(key, type)) - return rv - - def keys(self): - rv = set() - for d in self.dicts: - rv.update(d.keys()) - return list(rv) - - def iteritems(self, multi=False): - found = set() - for d in self.dicts: - for key, value in d.iteritems(multi): - if multi: - yield key, value - elif key not in found: - found.add(key) - yield key, value - - def itervalues(self): - for key, value in self.iteritems(): - yield value - - def values(self): - return list(self.itervalues()) - - def items(self, multi=False): - return list(self.iteritems(multi)) - - def iterlists(self): - rv = {} - for d in self.dicts: - for key, values in d.iterlists(): - rv.setdefault(key, []).extend(values) - return rv.iteritems() - - def lists(self): - return list(self.iterlists()) - - def iterlistvalues(self): - return (x[0] for x in self.lists()) - - def listvalues(self): - return list(self.iterlistvalues()) - - def iterkeys(self): - return iter(self.keys()) - - __iter__ = iterkeys - - def copy(self): - """Return a shallow copy of this object.""" - return self.__class__(self.dicts[:]) - - def to_dict(self, flat=True): - """Return the contents as regular dict. If `flat` is `True` the - returned dict will only have the first item present, if `flat` is - `False` all values will be returned as lists. - - :param flat: If set to `False` the dict returned will have lists - with all the values in it. Otherwise it will only - contain the first item for each key. - :return: a :class:`dict` - """ - rv = {} - for d in reversed(self.dicts): - rv.update(d.to_dict(flat)) - return rv - - def __len__(self): - return len(self.keys()) - - def __contains__(self, key): - for d in self.dicts: - if key in d: - return True - return False - - has_key = __contains__ - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.dicts) - - -class FileMultiDict(MultiDict): - """A special :class:`MultiDict` that has convenience methods to add - files to it. This is used for :class:`EnvironBuilder` and generally - useful for unittesting. - - .. versionadded:: 0.5 - """ - - def add_file(self, name, file, filename=None, content_type=None): - """Adds a new file to the dict. `file` can be a file name or - a :class:`file`-like or a :class:`FileStorage` object. - - :param name: the name of the field. - :param file: a filename or :class:`file`-like object - :param filename: an optional filename - :param content_type: an optional content type - """ - if isinstance(file, FileStorage): - value = file - else: - if isinstance(file, basestring): - if filename is None: - filename = file - file = open(file, 'rb') - if filename and content_type is None: - content_type = mimetypes.guess_type(filename)[0] or \ - 'application/octet-stream' - value = FileStorage(file, filename, name, content_type) - - self.add(name, value) - - -class ImmutableDict(ImmutableDictMixin, dict): - """An immutable :class:`dict`. - - .. versionadded:: 0.5 - """ - - __repr__ = _proxy_repr(dict) - - def copy(self): - """Return a shallow mutable copy of this object. Keep in mind that - the standard library's :func:`copy` function is a no-op for this class - like for any other python immutable type (eg: :class:`tuple`). - """ - return dict(self) - - def __copy__(self): - return self - - -class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): - """An immutable :class:`MultiDict`. - - .. versionadded:: 0.5 - """ - - def copy(self): - """Return a shallow mutable copy of this object. Keep in mind that - the standard library's :func:`copy` function is a no-op for this class - like for any other python immutable type (eg: :class:`tuple`). - """ - return MultiDict(self) - - def __copy__(self): - return self - - -class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): - """An immutable :class:`OrderedMultiDict`. - - .. versionadded:: 0.6 - """ - - def _iter_hashitems(self): - return enumerate(self.iteritems(multi=True)) - - def copy(self): - """Return a shallow mutable copy of this object. Keep in mind that - the standard library's :func:`copy` function is a no-op for this class - like for any other python immutable type (eg: :class:`tuple`). - """ - return OrderedMultiDict(self) - - def __copy__(self): - return self - - -class Accept(ImmutableList): - """An :class:`Accept` object is just a list subclass for lists of - ``(value, quality)`` tuples. It is automatically sorted by quality. - - All :class:`Accept` objects work similar to a list but provide extra - functionality for working with the data. Containment checks are - normalized to the rules of that header: - - >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) - >>> a.best - 'ISO-8859-1' - >>> 'iso-8859-1' in a - True - >>> 'UTF8' in a - True - >>> 'utf7' in a - False - - To get the quality for an item you can use normal item lookup: - - >>> print a['utf-8'] - 0.7 - >>> a['utf7'] - 0 - - .. versionchanged:: 0.5 - :class:`Accept` objects are forced immutable now. - """ - - def __init__(self, values=()): - if values is None: - list.__init__(self) - self.provided = False - elif isinstance(values, Accept): - self.provided = values.provided - list.__init__(self, values) - else: - self.provided = True - values = [(a, b) for b, a in values] - values.sort() - values.reverse() - list.__init__(self, [(a, b) for b, a in values]) - - def _value_matches(self, value, item): - """Check if a value matches a given accept item.""" - return item == '*' or item.lower() == value.lower() - - def __getitem__(self, key): - """Besides index lookup (getting item n) you can also pass it a string - to get the quality for the item. If the item is not in the list, the - returned quality is ``0``. - """ - if isinstance(key, basestring): - return self.quality(key) - return list.__getitem__(self, key) - - def quality(self, key): - """Returns the quality of the key. - - .. versionadded:: 0.6 - In previous versions you had to use the item-lookup syntax - (eg: ``obj[key]`` instead of ``obj.quality(key)``) - """ - for item, quality in self: - if self._value_matches(key, item): - return quality - return 0 - - def __contains__(self, value): - for item, quality in self: - if self._value_matches(value, item): - return True - return False - - def __repr__(self): - return '%s([%s])' % ( - self.__class__.__name__, - ', '.join('(%r, %s)' % (x, y) for x, y in self) - ) - - def index(self, key): - """Get the position of an entry or raise :exc:`ValueError`. - - :param key: The key to be looked up. - - .. versionchanged:: 0.5 - This used to raise :exc:`IndexError`, which was inconsistent - with the list API. - """ - if isinstance(key, basestring): - for idx, (item, quality) in enumerate(self): - if self._value_matches(key, item): - return idx - raise ValueError(key) - return list.index(self, key) - - def find(self, key): - """Get the position of an entry or return -1. - - :param key: The key to be looked up. - """ - try: - return self.index(key) - except ValueError: - return -1 - - def values(self): - """Return a list of the values, not the qualities.""" - return list(self.itervalues()) - - def itervalues(self): - """Iterate over all values.""" - for item in self: - yield item[0] - - def to_header(self): - """Convert the header set into an HTTP header string.""" - result = [] - for value, quality in self: - if quality != 1: - value = '%s;q=%s' % (value, quality) - result.append(value) - return ','.join(result) - - def __str__(self): - return self.to_header() - - def best_match(self, matches, default=None): - """Returns the best match from a list of possible matches based - on the quality of the client. If two items have the same quality, - the one is returned that comes first. - - :param matches: a list of matches to check for - :param default: the value that is returned if none match - """ - best_quality = -1 - result = default - for server_item in matches: - for client_item, quality in self: - if quality <= best_quality: - break - if self._value_matches(server_item, client_item): - best_quality = quality - result = server_item - return result - - @property - def best(self): - """The best match as value.""" - if self: - return self[0][0] - - -class MIMEAccept(Accept): - """Like :class:`Accept` but with special methods and behavior for - mimetypes. - """ - - def _value_matches(self, value, item): - def _normalize(x): - x = x.lower() - return x == '*' and ('*', '*') or x.split('/', 1) - - # this is from the application which is trusted. to avoid developer - # frustration we actually check these for valid values - if '/' not in value: - raise ValueError('invalid mimetype %r' % value) - value_type, value_subtype = _normalize(value) - if value_type == '*' and value_subtype != '*': - raise ValueError('invalid mimetype %r' % value) - - if '/' not in item: - return False - item_type, item_subtype = _normalize(item) - if item_type == '*' and item_subtype != '*': - return False - return ( - (item_type == item_subtype == '*' or - value_type == value_subtype == '*') or - (item_type == value_type and (item_subtype == '*' or - value_subtype == '*' or - item_subtype == value_subtype)) - ) - - @property - def accept_html(self): - """True if this object accepts HTML.""" - return ( - 'text/html' in self or - 'application/xhtml+xml' in self or - self.accept_xhtml - ) - - @property - def accept_xhtml(self): - """True if this object accepts XHTML.""" - return ( - 'application/xhtml+xml' in self or - 'application/xml' in self - ) - - @property - def accept_json(self): - """True if this object accepts JSON.""" - return 'application/json' in self - - -class LanguageAccept(Accept): - """Like :class:`Accept` but with normalization for languages.""" - - def _value_matches(self, value, item): - def _normalize(language): - return _locale_delim_re.split(language.lower()) - return item == '*' or _normalize(value) == _normalize(item) - - -class CharsetAccept(Accept): - """Like :class:`Accept` but with normalization for charsets.""" - - def _value_matches(self, value, item): - def _normalize(name): - try: - return codecs.lookup(name).name - except LookupError: - return name.lower() - return item == '*' or _normalize(value) == _normalize(item) - - -def cache_property(key, empty, type): - """Return a new property object for a cache header. Useful if you - want to add support for a cache extension in a subclass.""" - return property(lambda x: x._get_cache_value(key, empty, type), - lambda x, v: x._set_cache_value(key, v, type), - lambda x: x._del_cache_value(key), - 'accessor for %r' % key) - - -class _CacheControl(UpdateDictMixin, dict): - """Subclass of a dict that stores values for a Cache-Control header. It - has accessors for all the cache-control directives specified in RFC 2616. - The class does not differentiate between request and response directives. - - Because the cache-control directives in the HTTP header use dashes the - python descriptors use underscores for that. - - To get a header of the :class:`CacheControl` object again you can convert - the object into a string or call the :meth:`to_header` method. If you plan - to subclass it and add your own items have a look at the sourcecode for - that class. - - .. versionchanged:: 0.4 - - Setting `no_cache` or `private` to boolean `True` will set the implicit - none-value which is ``*``: - - >>> cc = ResponseCacheControl() - >>> cc.no_cache = True - >>> cc - - >>> cc.no_cache - '*' - >>> cc.no_cache = None - >>> cc - - - In versions before 0.5 the behavior documented here affected the now - no longer existing `CacheControl` class. - """ - - no_cache = cache_property('no-cache', '*', None) - no_store = cache_property('no-store', None, bool) - max_age = cache_property('max-age', -1, int) - no_transform = cache_property('no-transform', None, None) - - def __init__(self, values=(), on_update=None): - dict.__init__(self, values or ()) - self.on_update = on_update - self.provided = values is not None - - def _get_cache_value(self, key, empty, type): - """Used internally by the accessor properties.""" - if type is bool: - return key in self - if key in self: - value = self[key] - if value is None: - return empty - elif type is not None: - try: - value = type(value) - except ValueError: - pass - return value - - def _set_cache_value(self, key, value, type): - """Used internally by the accessor properties.""" - if type is bool: - if value: - self[key] = None - else: - self.pop(key, None) - else: - if value is None: - self.pop(key) - elif value is True: - self[key] = None - else: - self[key] = value - - def _del_cache_value(self, key): - """Used internally by the accessor properties.""" - if key in self: - del self[key] - - def to_header(self): - """Convert the stored values into a cache control header.""" - return dump_header(self) - - def __str__(self): - return self.to_header() - - def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self.to_header() - ) - - -class RequestCacheControl(ImmutableDictMixin, _CacheControl): - """A cache control for requests. This is immutable and gives access - to all the request-relevant cache control headers. - - To get a header of the :class:`RequestCacheControl` object again you can - convert the object into a string or call the :meth:`to_header` method. If - you plan to subclass it and add your own items have a look at the sourcecode - for that class. - - .. versionadded:: 0.5 - In previous versions a `CacheControl` class existed that was used - both for request and response. - """ - - max_stale = cache_property('max-stale', '*', int) - min_fresh = cache_property('min-fresh', '*', int) - no_transform = cache_property('no-transform', None, None) - only_if_cached = cache_property('only-if-cached', None, bool) - - -class ResponseCacheControl(_CacheControl): - """A cache control for responses. Unlike :class:`RequestCacheControl` - this is mutable and gives access to response-relevant cache control - headers. - - To get a header of the :class:`ResponseCacheControl` object again you can - convert the object into a string or call the :meth:`to_header` method. If - you plan to subclass it and add your own items have a look at the sourcecode - for that class. - - .. versionadded:: 0.5 - In previous versions a `CacheControl` class existed that was used - both for request and response. - """ - - public = cache_property('public', None, bool) - private = cache_property('private', '*', None) - must_revalidate = cache_property('must-revalidate', None, bool) - proxy_revalidate = cache_property('proxy-revalidate', None, bool) - s_maxage = cache_property('s-maxage', None, None) - - -# attach cache_property to the _CacheControl as staticmethod -# so that others can reuse it. -_CacheControl.cache_property = staticmethod(cache_property) - - -class CallbackDict(UpdateDictMixin, dict): - """A dict that calls a function passed every time something is changed. - The function is passed the dict instance. - """ - - def __init__(self, initial=None, on_update=None): - dict.__init__(self, initial or ()) - self.on_update = on_update - - def __repr__(self): - return '<%s %s>' % ( - self.__class__.__name__, - dict.__repr__(self) - ) - - -class HeaderSet(object): - """Similar to the :class:`ETags` class this implements a set-like structure. - Unlike :class:`ETags` this is case insensitive and used for vary, allow, and - content-language headers. - - If not constructed using the :func:`parse_set_header` function the - instantiation works like this: - - >>> hs = HeaderSet(['foo', 'bar', 'baz']) - >>> hs - HeaderSet(['foo', 'bar', 'baz']) - """ - - def __init__(self, headers=None, on_update=None): - self._headers = list(headers or ()) - self._set = set([x.lower() for x in self._headers]) - self.on_update = on_update - - def add(self, header): - """Add a new header to the set.""" - self.update((header,)) - - def remove(self, header): - """Remove a header from the set. This raises an :exc:`KeyError` if the - header is not in the set. - - .. versionchanged:: 0.5 - In older versions a :exc:`IndexError` was raised instead of a - :exc:`KeyError` if the object was missing. - - :param header: the header to be removed. - """ - key = header.lower() - if key not in self._set: - raise KeyError(header) - self._set.remove(key) - for idx, key in enumerate(self._headers): - if key.lower() == header: - del self._headers[idx] - break - if self.on_update is not None: - self.on_update(self) - - def update(self, iterable): - """Add all the headers from the iterable to the set. - - :param iterable: updates the set with the items from the iterable. - """ - inserted_any = False - for header in iterable: - key = header.lower() - if key not in self._set: - self._headers.append(header) - self._set.add(key) - inserted_any = True - if inserted_any and self.on_update is not None: - self.on_update(self) - - def discard(self, header): - """Like :meth:`remove` but ignores errors. - - :param header: the header to be discarded. - """ - try: - return self.remove(header) - except KeyError: - pass - - def find(self, header): - """Return the index of the header in the set or return -1 if not found. - - :param header: the header to be looked up. - """ - header = header.lower() - for idx, item in enumerate(self._headers): - if item.lower() == header: - return idx - return -1 - - def index(self, header): - """Return the index of the header in the set or raise an - :exc:`IndexError`. - - :param header: the header to be looked up. - """ - rv = self.find(header) - if rv < 0: - raise IndexError(header) - return rv - - def clear(self): - """Clear the set.""" - self._set.clear() - del self._headers[:] - if self.on_update is not None: - self.on_update(self) - - def as_set(self, preserve_casing=False): - """Return the set as real python set type. When calling this, all - the items are converted to lowercase and the ordering is lost. - - :param preserve_casing: if set to `True` the items in the set returned - will have the original case like in the - :class:`HeaderSet`, otherwise they will - be lowercase. - """ - if preserve_casing: - return set(self._headers) - return set(self._set) - - def to_header(self): - """Convert the header set into an HTTP header string.""" - return ', '.join(map(quote_header_value, self._headers)) - - def __getitem__(self, idx): - return self._headers[idx] - - def __delitem__(self, idx): - rv = self._headers.pop(idx) - self._set.remove(rv.lower()) - if self.on_update is not None: - self.on_update(self) - - def __setitem__(self, idx, value): - old = self._headers[idx] - self._set.remove(old.lower()) - self._headers[idx] = value - self._set.add(value.lower()) - if self.on_update is not None: - self.on_update(self) - - def __contains__(self, header): - return header.lower() in self._set - - def __len__(self): - return len(self._set) - - def __iter__(self): - return iter(self._headers) - - def __nonzero__(self): - return bool(self._set) - - def __str__(self): - return self.to_header() - - def __repr__(self): - return '%s(%r)' % ( - self.__class__.__name__, - self._headers - ) - - -class ETags(object): - """A set that can be used to check if one etag is present in a collection - of etags. - """ - - def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): - self._strong = frozenset(not star_tag and strong_etags or ()) - self._weak = frozenset(weak_etags or ()) - self.star_tag = star_tag - - def as_set(self, include_weak=False): - """Convert the `ETags` object into a python set. Per default all the - weak etags are not part of this set.""" - rv = set(self._strong) - if include_weak: - rv.update(self._weak) - return rv - - def is_weak(self, etag): - """Check if an etag is weak.""" - return etag in self._weak - - def contains_weak(self, etag): - """Check if an etag is part of the set including weak and strong tags.""" - return self.is_weak(etag) or self.contains(etag) - - def contains(self, etag): - """Check if an etag is part of the set ignoring weak tags. - It is also possible to use the ``in`` operator. - - """ - if self.star_tag: - return True - return etag in self._strong - - def contains_raw(self, etag): - """When passed a quoted tag it will check if this tag is part of the - set. If the tag is weak it is checked against weak and strong tags, - otherwise strong only.""" - etag, weak = unquote_etag(etag) - if weak: - return self.contains_weak(etag) - return self.contains(etag) - - def to_header(self): - """Convert the etags set into a HTTP header string.""" - if self.star_tag: - return '*' - return ', '.join( - ['"%s"' % x for x in self._strong] + - ['w/"%s"' % x for x in self._weak] - ) - - def __call__(self, etag=None, data=None, include_weak=False): - if [etag, data].count(None) != 1: - raise TypeError('either tag or data required, but at least one') - if etag is None: - etag = generate_etag(data) - if include_weak: - if etag in self._weak: - return True - return etag in self._strong - - def __nonzero__(self): - return bool(self.star_tag or self._strong) - - def __str__(self): - return self.to_header() - - def __iter__(self): - return iter(self._strong) - - def __contains__(self, etag): - return self.contains(etag) - - def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, str(self)) - - -class IfRange(object): - """Very simple object that represents the `If-Range` header in parsed - form. It will either have neither a etag or date or one of either but - never both. - - .. versionadded:: 0.7 - """ - - def __init__(self, etag=None, date=None): - #: The etag parsed and unquoted. Ranges always operate on strong - #: etags so the weakness information is not necessary. - self.etag = etag - #: The date in parsed format or `None`. - self.date = date - - def to_header(self): - """Converts the object back into an HTTP header.""" - if self.date is not None: - return http_date(self.date) - if self.etag is not None: - return quote_etag(self.etag) - return '' - - def __str__(self): - return self.to_header() - - def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, str(self)) - - -class Range(object): - """Represents a range header. All the methods are only supporting bytes - as unit. It does store multiple ranges but :meth:`range_for_length` will - only work if only one range is provided. - - .. versionadded:: 0.7 - """ - - def __init__(self, units, ranges): - #: The units of this range. Usually "bytes". - self.units = units - #: A list of ``(begin, end)`` tuples for the range header provided. - #: The ranges are non-inclusive. - self.ranges = ranges - - def range_for_length(self, length): - """If the range is for bytes, the length is not None and there is - exactly one range and it is satisfiable it returns a ``(start, stop)`` - tuple, otherwise `None`. - """ - if self.units != 'bytes' or length is None or len(self.ranges) != 1: - return None - start, end = self.ranges[0] - if end is None: - end = length - if start < 0: - start += length - if is_byte_range_valid(start, end, length): - return start, min(end, length) - - def make_content_range(self, length): - """Creates a :class:`~werkzeug.datastructures.ContentRange` object - from the current range and given content length. - """ - rng = self.range_for_length(length) - if rng is not None: - return ContentRange(self.units, rng[0], rng[1], length) - - def to_header(self): - """Converts the object back into an HTTP header.""" - ranges = [] - for begin, end in self.ranges: - if end is None: - ranges.append(begin >= 0 and '%s-' % begin or str(begin)) - else: - ranges.append('%s-%s' % (begin, end - 1)) - return '%s=%s' % (self.units, ','.join(ranges)) - - def __str__(self): - return self.to_header() - - def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, str(self)) - - -class ContentRange(object): - """Represents the content range header. - - .. versionadded:: 0.7 - """ - - def __init__(self, units, start, stop, length=None, on_update=None): - assert is_byte_range_valid(start, stop, length), \ - 'Bad range provided' - self.on_update = on_update - self.set(start, stop, length, units) - - def _callback_property(name): - def fget(self): - return getattr(self, name) - def fset(self, value): - setattr(self, name, value) - if self.on_update is not None: - self.on_update(self) - return property(fget, fset) - - #: The units to use, usually "bytes" - units = _callback_property('_units') - #: The start point of the range or `None`. - start = _callback_property('_start') - #: The stop point of the range (non-inclusive) or `None`. Can only be - #: `None` if also start is `None`. - stop = _callback_property('_stop') - #: The length of the range or `None`. - length = _callback_property('_length') - - def set(self, start, stop, length=None, units='bytes'): - """Simple method to update the ranges.""" - assert is_byte_range_valid(start, stop, length), \ - 'Bad range provided' - self._units = units - self._start = start - self._stop = stop - self._length = length - if self.on_update is not None: - self.on_update(self) - - def unset(self): - """Sets the units to `None` which indicates that the header should - no longer be used. - """ - self.set(None, None, units=None) - - def to_header(self): - if self.units is None: - return '' - if self.length is None: - length = '*' - else: - length = self.length - if self.start is None: - return '%s */%s' % (self.units, length) - return '%s %s-%s/%s' % ( - self.units, - self.start, - self.stop - 1, - length - ) - - def __nonzero__(self): - return self.units is not None - - def __str__(self): - return self.to_header() - - def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, str(self)) - - -class Authorization(ImmutableDictMixin, dict): - """Represents an `Authorization` header sent by the client. You should - not create this kind of object yourself but use it when it's returned by - the `parse_authorization_header` function. - - This object is a dict subclass and can be altered by setting dict items - but it should be considered immutable as it's returned by the client and - not meant for modifications. - - .. versionchanged:: 0.5 - This object became immutable. - """ - - def __init__(self, auth_type, data=None): - dict.__init__(self, data or {}) - self.type = auth_type - - username = property(lambda x: x.get('username'), doc=''' - The username transmitted. This is set for both basic and digest - auth all the time.''') - password = property(lambda x: x.get('password'), doc=''' - When the authentication type is basic this is the password - transmitted by the client, else `None`.''') - realm = property(lambda x: x.get('realm'), doc=''' - This is the server realm sent back for HTTP digest auth.''') - nonce = property(lambda x: x.get('nonce'), doc=''' - The nonce the server sent for digest auth, sent back by the client. - A nonce should be unique for every 401 response for HTTP digest - auth.''') - uri = property(lambda x: x.get('uri'), doc=''' - The URI from Request-URI of the Request-Line; duplicated because - proxies are allowed to change the Request-Line in transit. HTTP - digest auth only.''') - nc = property(lambda x: x.get('nc'), doc=''' - The nonce count value transmitted by clients if a qop-header is - also transmitted. HTTP digest auth only.''') - cnonce = property(lambda x: x.get('cnonce'), doc=''' - If the server sent a qop-header in the ``WWW-Authenticate`` - header, the client has to provide this value for HTTP digest auth. - See the RFC for more details.''') - response = property(lambda x: x.get('response'), doc=''' - A string of 32 hex digits computed as defined in RFC 2617, which - proves that the user knows a password. Digest auth only.''') - opaque = property(lambda x: x.get('opaque'), doc=''' - The opaque header from the server returned unchanged by the client. - It is recommended that this string be base64 or hexadecimal data. - Digest auth only.''') - - @property - def qop(self): - """Indicates what "quality of protection" the client has applied to - the message for HTTP digest auth.""" - def on_update(header_set): - if not header_set and 'qop' in self: - del self['qop'] - elif header_set: - self['qop'] = header_set.to_header() - return parse_set_header(self.get('qop'), on_update) - - -class WWWAuthenticate(UpdateDictMixin, dict): - """Provides simple access to `WWW-Authenticate` headers.""" - - #: list of keys that require quoting in the generated header - _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm']) - - def __init__(self, auth_type=None, values=None, on_update=None): - dict.__init__(self, values or ()) - if auth_type: - self['__auth_type__'] = auth_type - self.on_update = on_update - - def set_basic(self, realm='authentication required'): - """Clear the auth info and enable basic auth.""" - dict.clear(self) - dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) - if self.on_update: - self.on_update(self) - - def set_digest(self, realm, nonce, qop=('auth',), opaque=None, - algorithm=None, stale=False): - """Clear the auth info and enable digest auth.""" - d = { - '__auth_type__': 'digest', - 'realm': realm, - 'nonce': nonce, - 'qop': dump_header(qop) - } - if stale: - d['stale'] = 'TRUE' - if opaque is not None: - d['opaque'] = opaque - if algorithm is not None: - d['algorithm'] = algorithm - dict.clear(self) - dict.update(self, d) - if self.on_update: - self.on_update(self) - - def to_header(self): - """Convert the stored values into a WWW-Authenticate header.""" - d = dict(self) - auth_type = d.pop('__auth_type__', None) or 'basic' - return '%s %s' % (auth_type.title(), ', '.join([ - '%s=%s' % (key, quote_header_value(value, - allow_token=key not in self._require_quoting)) - for key, value in d.iteritems() - ])) - - def __str__(self): - return self.to_header() - - def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self.to_header() - ) - - def auth_property(name, doc=None): - """A static helper function for subclasses to add extra authentication - system properties onto a class:: - - class FooAuthenticate(WWWAuthenticate): - special_realm = auth_property('special_realm') - - For more information have a look at the sourcecode to see how the - regular properties (:attr:`realm` etc.) are implemented. - """ - def _set_value(self, value): - if value is None: - self.pop(name, None) - else: - self[name] = str(value) - return property(lambda x: x.get(name), _set_value, doc=doc) - - def _set_property(name, doc=None): - def fget(self): - def on_update(header_set): - if not header_set and name in self: - del self[name] - elif header_set: - self[name] = header_set.to_header() - return parse_set_header(self.get(name), on_update) - return property(fget, doc=doc) - - type = auth_property('__auth_type__', doc=''' - The type of the auth mechanism. HTTP currently specifies - `Basic` and `Digest`.''') - realm = auth_property('realm', doc=''' - A string to be displayed to users so they know which username and - password to use. This string should contain at least the name of - the host performing the authentication and might additionally - indicate the collection of users who might have access.''') - domain = _set_property('domain', doc=''' - A list of URIs that define the protection space. If a URI is an - absolute path, it is relative to the canonical root URL of the - server being accessed.''') - nonce = auth_property('nonce', doc=''' - A server-specified data string which should be uniquely generated - each time a 401 response is made. It is recommended that this - string be base64 or hexadecimal data.''') - opaque = auth_property('opaque', doc=''' - A string of data, specified by the server, which should be returned - by the client unchanged in the Authorization header of subsequent - requests with URIs in the same protection space. It is recommended - that this string be base64 or hexadecimal data.''') - algorithm = auth_property('algorithm', doc=''' - A string indicating a pair of algorithms used to produce the digest - and a checksum. If this is not present it is assumed to be "MD5". - If the algorithm is not understood, the challenge should be ignored - (and a different one used, if there is more than one).''') - qop = _set_property('qop', doc=''' - A set of quality-of-privacy directives such as auth and auth-int.''') - - def _get_stale(self): - val = self.get('stale') - if val is not None: - return val.lower() == 'true' - def _set_stale(self, value): - if value is None: - self.pop('stale', None) - else: - self['stale'] = value and 'TRUE' or 'FALSE' - stale = property(_get_stale, _set_stale, doc=''' - A flag, indicating that the previous request from the client was - rejected because the nonce value was stale.''') - del _get_stale, _set_stale - - # make auth_property a staticmethod so that subclasses of - # `WWWAuthenticate` can use it for new properties. - auth_property = staticmethod(auth_property) - del _set_property - - -class FileStorage(object): - """The :class:`FileStorage` class is a thin wrapper over incoming files. - It is used by the request object to represent uploaded files. All the - attributes of the wrapper stream are proxied by the file storage so - it's possible to do ``storage.read()`` instead of the long form - ``storage.stream.read()``. - """ - - def __init__(self, stream=None, filename=None, name=None, - content_type=None, content_length=None, - headers=None): - self.name = name - self.stream = stream or _empty_stream - - # if no filename is provided we can attempt to get the filename - # from the stream object passed. There we have to be careful to - # skip things like , etc. Python marks these - # special filenames with angular brackets. - if filename is None: - filename = getattr(stream, 'name', None) - if filename and filename[0] == '<' and filename[-1] == '>': - filename = None - - self.filename = filename - if headers is None: - headers = Headers() - self.headers = headers - if content_type is not None: - headers['Content-Type'] = content_type - if content_length is not None: - headers['Content-Length'] = str(content_length) - - def _parse_content_type(self): - if not hasattr(self, '_parsed_content_type'): - self._parsed_content_type = \ - parse_options_header(self.content_type) - - @property - def content_type(self): - """The file's content type. Usually not available""" - return self.headers.get('content-type') - - @property - def content_length(self): - """The file's content length. Usually not available""" - return int(self.headers.get('content-length') or 0) - - @property - def mimetype(self): - """Like :attr:`content_type` but without parameters (eg, without - charset, type etc.). For example if the content - type is ``text/html; charset=utf-8`` the mimetype would be - ``'text/html'``. - - .. versionadded:: 0.7 - """ - self._parse_content_type() - return self._parsed_content_type[0] - - @property - def mimetype_params(self): - """The mimetype parameters as dict. For example if the content - type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - - .. versionadded:: 0.7 - """ - self._parse_content_type() - return self._parsed_content_type[1] - - def save(self, dst, buffer_size=16384): - """Save the file to a destination path or file object. If the - destination is a file object you have to close it yourself after the - call. The buffer size is the number of bytes held in memory during - the copy process. It defaults to 16KB. - - For secure file saving also have a look at :func:`secure_filename`. - - :param dst: a filename or open file object the uploaded file - is saved to. - :param buffer_size: the size of the buffer. This works the same as - the `length` parameter of - :func:`shutil.copyfileobj`. - """ - from shutil import copyfileobj - close_dst = False - if isinstance(dst, basestring): - dst = file(dst, 'wb') - close_dst = True - try: - copyfileobj(self.stream, dst, buffer_size) - finally: - if close_dst: - dst.close() - - def close(self): - """Close the underlying file if possible.""" - try: - self.stream.close() - except Exception: - pass - - def __nonzero__(self): - return bool(self.filename) - - def __getattr__(self, name): - return getattr(self.stream, name) - - def __iter__(self): - return iter(self.readline, '') - - def __repr__(self): - return '<%s: %r (%r)>' % ( - self.__class__.__name__, - self.filename, - self.content_type - ) - - -# circular dependencies -from werkzeug.http import dump_options_header, dump_header, generate_etag, \ - quote_header_value, parse_set_header, unquote_etag, quote_etag, \ - parse_options_header, http_date, is_byte_range_valid -from werkzeug.exceptions import BadRequestKeyError diff --git a/src/lib/werkzeug/debug/__init__.py b/src/lib/werkzeug/debug/__init__.py deleted file mode 100644 index db91b74..0000000 --- a/src/lib/werkzeug/debug/__init__.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.debug - ~~~~~~~~~~~~~~ - - WSGI application traceback debugger. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import mimetypes -from os.path import join, dirname, basename, isfile -from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response -from werkzeug.debug.tbtools import get_current_traceback, render_console_html -from werkzeug.debug.console import Console -from werkzeug.security import gen_salt - - -#: import this here because it once was documented as being available -#: from this module. In case there are users left ... -from werkzeug.debug.repr import debug_repr - - -class _ConsoleFrame(object): - """Helper class so that we can reuse the frame console code for the - standalone console. - """ - - def __init__(self, namespace): - self.console = Console(namespace) - self.id = 0 - - -class DebuggedApplication(object): - """Enables debugging support for a given application:: - - from werkzeug.debug import DebuggedApplication - from myapp import app - app = DebuggedApplication(app, evalex=True) - - The `evalex` keyword argument allows evaluating expressions in a - traceback's frame context. - - .. versionadded:: 0.7 - The `lodgeit_url` parameter was added. - - :param app: the WSGI application to run debugged. - :param evalex: enable exception evaluation feature (interactive - debugging). This requires a non-forking server. - :param request_key: The key that points to the request object in ths - environment. This parameter is ignored in current - versions. - :param console_path: the URL for a general purpose console. - :param console_init_func: the function that is executed before starting - the general purpose console. The return value - is used as initial namespace. - :param show_hidden_frames: by default hidden traceback frames are skipped. - You can show them by setting this parameter - to `True`. - :param lodgeit_url: the base URL of the LodgeIt instance to use for - pasting tracebacks. - """ - - # this class is public - __module__ = 'werkzeug' - - def __init__(self, app, evalex=False, request_key='werkzeug.request', - console_path='/console', console_init_func=None, - show_hidden_frames=False, - lodgeit_url='http://paste.pocoo.org/'): - if not console_init_func: - console_init_func = dict - self.app = app - self.evalex = evalex - self.frames = {} - self.tracebacks = {} - self.request_key = request_key - self.console_path = console_path - self.console_init_func = console_init_func - self.show_hidden_frames = show_hidden_frames - self.lodgeit_url = lodgeit_url - self.secret = gen_salt(20) - - def debug_application(self, environ, start_response): - """Run the application and conserve the traceback frames.""" - app_iter = None - try: - app_iter = self.app(environ, start_response) - for item in app_iter: - yield item - if hasattr(app_iter, 'close'): - app_iter.close() - except Exception: - if hasattr(app_iter, 'close'): - app_iter.close() - traceback = get_current_traceback(skip=1, show_hidden_frames= - self.show_hidden_frames, - ignore_system_exceptions=True) - for frame in traceback.frames: - self.frames[frame.id] = frame - self.tracebacks[traceback.id] = traceback - - try: - start_response('500 INTERNAL SERVER ERROR', [ - ('Content-Type', 'text/html; charset=utf-8') - ]) - except Exception: - # if we end up here there has been output but an error - # occurred. in that situation we can do nothing fancy any - # more, better log something into the error log and fall - # back gracefully. - environ['wsgi.errors'].write( - 'Debugging middleware caught exception in streamed ' - 'response at a point where response headers were already ' - 'sent.\n') - else: - yield traceback.render_full(evalex=self.evalex, - lodgeit_url=self.lodgeit_url, - secret=self.secret) \ - .encode('utf-8', 'replace') - - traceback.log(environ['wsgi.errors']) - - def execute_command(self, request, command, frame): - """Execute a command in a console.""" - return Response(frame.console.eval(command), mimetype='text/html') - - def display_console(self, request): - """Display a standalone shell.""" - if 0 not in self.frames: - self.frames[0] = _ConsoleFrame(self.console_init_func()) - return Response(render_console_html(secret=self.secret), - mimetype='text/html') - - def paste_traceback(self, request, traceback): - """Paste the traceback and return a JSON response.""" - paste_id = traceback.paste(self.lodgeit_url) - return Response('{"url": "%sshow/%s/", "id": "%s"}' - % (self.lodgeit_url, paste_id, paste_id), - mimetype='application/json') - - def get_source(self, request, frame): - """Render the source viewer.""" - return Response(frame.render_source(), mimetype='text/html') - - def get_resource(self, request, filename): - """Return a static resource from the shared folder.""" - filename = join(dirname(__file__), 'shared', basename(filename)) - if isfile(filename): - mimetype = mimetypes.guess_type(filename)[0] \ - or 'application/octet-stream' - f = file(filename, 'rb') - try: - return Response(f.read(), mimetype=mimetype) - finally: - f.close() - return Response('Not Found', status=404) - - def __call__(self, environ, start_response): - """Dispatch the requests.""" - # important: don't ever access a function here that reads the incoming - # form data! Otherwise the application won't have access to that data - # any more! - request = Request(environ) - response = self.debug_application - if request.args.get('__debugger__') == 'yes': - cmd = request.args.get('cmd') - arg = request.args.get('f') - secret = request.args.get('s') - traceback = self.tracebacks.get(request.args.get('tb', type=int)) - frame = self.frames.get(request.args.get('frm', type=int)) - if cmd == 'resource' and arg: - response = self.get_resource(request, arg) - elif cmd == 'paste' and traceback is not None and \ - secret == self.secret: - response = self.paste_traceback(request, traceback) - elif cmd == 'source' and frame and self.secret == secret: - response = self.get_source(request, frame) - elif self.evalex and cmd is not None and frame is not None and \ - self.secret == secret: - response = self.execute_command(request, cmd, frame) - elif self.evalex and self.console_path is not None and \ - request.path == self.console_path: - response = self.display_console(request) - return response(environ, start_response) diff --git a/src/lib/werkzeug/debug/shared/debugger.js b/src/lib/werkzeug/debug/shared/debugger.js deleted file mode 100644 index 6468cba..0000000 --- a/src/lib/werkzeug/debug/shared/debugger.js +++ /dev/null @@ -1,200 +0,0 @@ -$(function() { - var sourceView = null; - - /** - * if we are in console mode, show the console. - */ - if (CONSOLE_MODE && EVALEX) { - openShell(null, $('div.console div.inner').empty(), 0); - } - - $('div.traceback div.frame').each(function() { - var - target = $('pre', this) - .click(function() { - sourceButton.click(); - }), - consoleNode = null, source = null, - frameID = this.id.substring(6); - - /** - * Add an interactive console to the frames - */ - if (EVALEX) - $('') - .attr('title', 'Open an interactive python shell in this frame') - .click(function() { - consoleNode = openShell(consoleNode, target, frameID); - return false; - }) - .prependTo(target); - - /** - * Show sourcecode - */ - var sourceButton = $('') - .attr('title', 'Display the sourcecode for this frame') - .click(function() { - if (!sourceView) - $('h2', sourceView = - $('

View Source

' + - '
') - .insertBefore('div.explanation')) - .css('cursor', 'pointer') - .click(function() { - sourceView.slideUp('fast'); - }); - $.get(document.location.pathname, {__debugger__: 'yes', cmd: - 'source', frm: frameID, s: SECRET}, function(data) { - $('table', sourceView) - .replaceWith(data); - if (!sourceView.is(':visible')) - sourceView.slideDown('fast', function() { - focusSourceBlock(); - }); - else - focusSourceBlock(); - }); - return false; - }) - .prependTo(target); - }); - - /** - * toggle traceback types on click. - */ - $('h2.traceback').click(function() { - $(this).next().slideToggle('fast'); - $('div.plain').slideToggle('fast'); - }).css('cursor', 'pointer'); - $('div.plain').hide(); - - /** - * Add extra info (this is here so that only users with JavaScript - * enabled see it.) - */ - $('span.nojavascript') - .removeClass('nojavascript') - .html('

To switch between the interactive traceback and the plaintext ' + - 'one, you can click on the "Traceback" headline. From the text ' + - 'traceback you can also create a paste of it. ' + (!EVALEX ? '' : - 'For code execution mouse-over the frame you want to debug and ' + - 'click on the console icon on the right side.' + - '

You can execute arbitrary Python code in the stack frames and ' + - 'there are some extra helpers available for introspection:' + - '

  • dump() shows all variables in the frame' + - '
  • dump(obj) dumps all that\'s known about the object
')); - - /** - * Add the pastebin feature - */ - $('div.plain form') - .submit(function() { - var label = $('input[type="submit"]', this); - var old_val = label.val(); - label.val('submitting...'); - $.ajax({ - dataType: 'json', - url: document.location.pathname, - data: {__debugger__: 'yes', tb: TRACEBACK, cmd: 'paste', - s: SECRET}, - success: function(data) { - $('div.plain span.pastemessage') - .removeClass('pastemessage') - .text('Paste created: ') - .append($('#' + data.id + '').attr('href', data.url)); - }, - error: function() { - alert('Error: Could not submit paste. No network connection?'); - label.val(old_val); - } - }); - return false; - }); - - // if we have javascript we submit by ajax anyways, so no need for the - // not scaling textarea. - var plainTraceback = $('div.plain textarea'); - plainTraceback.replaceWith($('
').text(plainTraceback.text()));
-});
-
-
-/**
- * Helper function for shell initialization
- */
-function openShell(consoleNode, target, frameID) {
-  if (consoleNode)
-    return consoleNode.slideToggle('fast');
-  consoleNode = $('
')
-    .appendTo(target.parent())
-    .hide()
-  var historyPos = 0, history = [''];
-  var output = $('
[console ready]
') - .appendTo(consoleNode); - var form = $('
>>>
') - .submit(function() { - var cmd = command.val(); - $.get(document.location.pathname, { - __debugger__: 'yes', cmd: cmd, frm: frameID, s: SECRET}, function(data) { - var tmp = $('
').html(data); - $('span.extended', tmp).each(function() { - var hidden = $(this).wrap('').hide(); - hidden - .parent() - .append($('  ') - .click(function() { - hidden.toggle(); - $(this).toggleClass('open') - return false; - })); - }); - output.append(tmp); - command.focus(); - consoleNode.scrollTop(command.position().top); - var old = history.pop(); - history.push(cmd); - if (typeof old != 'undefined') - history.push(old); - historyPos = history.length - 1; - }); - command.val(''); - return false; - }). - appendTo(consoleNode); - - var command = $('') - .appendTo(form) - .keydown(function(e) { - if (e.charCode == 100 && e.ctrlKey) { - output.text('--- screen cleared ---'); - return false; - } - else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) { - if (e.keyCode == 38 && historyPos > 0) - historyPos--; - else if (e.keyCode == 40 && historyPos < history.length) - historyPos++; - command.val(history[historyPos]); - return false; - } - }); - - return consoleNode.slideDown('fast', function() { - command.focus(); - }); -} - -/** - * Focus the current block in the source view. - */ -function focusSourceBlock() { - var tmp, line = $('table.source tr.current'); - for (var i = 0; i < 7; i++) { - tmp = line.prev(); - if (!(tmp && tmp.is('.in-frame'))) - break - line = tmp; - } - var container = $('div.sourceview'); - container.scrollTop(line.offset().top); -} diff --git a/src/lib/werkzeug/debug/shared/jquery.js b/src/lib/werkzeug/debug/shared/jquery.js deleted file mode 100644 index 8f3ca2e..0000000 --- a/src/lib/werkzeug/debug/shared/jquery.js +++ /dev/null @@ -1,167 +0,0 @@ -/*! - * jQuery JavaScript Library v1.4.4 - * http://jquery.com/ - * - * Copyright 2010, John Resig - * Dual licensed under the MIT or GPL Version 2 licenses. - * http://jquery.org/license - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * Copyright 2010, The Dojo Foundation - * Released under the MIT, BSD, and GPL Licenses. - * - * Date: Thu Nov 11 19:04:53 2010 -0500 - */ -(function(E,B){function ka(a,b,d){if(d===B&&a.nodeType===1){d=a.getAttribute("data-"+b);if(typeof d==="string"){try{d=d==="true"?true:d==="false"?false:d==="null"?null:!c.isNaN(d)?parseFloat(d):Ja.test(d)?c.parseJSON(d):d}catch(e){}c.data(a,b,d)}else d=B}return d}function U(){return false}function ca(){return true}function la(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ka(a){var b,d,e,f,h,l,k,o,x,r,A,C=[];f=[];h=c.data(this,this.nodeType?"events":"__events__");if(typeof h==="function")h= -h.events;if(!(a.liveFired===this||!h||!h.live||a.button&&a.type==="click")){if(a.namespace)A=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var J=h.live.slice(0);for(k=0;kd)break;a.currentTarget=f.elem;a.data=f.handleObj.data;a.handleObj=f.handleObj;A=f.handleObj.origHandler.apply(f.elem,arguments);if(A===false||a.isPropagationStopped()){d=f.level;if(A===false)b=false;if(a.isImmediatePropagationStopped())break}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(La, -"`").replace(Ma,"&")}function ma(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Na.test(b))return c.filter(b,e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function na(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this, -e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var l in e[h])c.event.add(this,h,e[h][l],e[h][l].data)}}})}function Oa(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}function oa(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?Pa:Qa,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a, -"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function da(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Ra.test(a)?e(a,h):da(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)?e(a,""):c.each(b,function(f,h){da(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(pa.concat.apply([],pa.slice(0,b)),function(){d[this]=a});return d}function qa(a){if(!ea[a]){var b=c("<"+ -a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";ea[a]=d}return ea[a]}function fa(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var t=E.document,c=function(){function a(){if(!b.isReady){try{t.documentElement.doScroll("left")}catch(j){setTimeout(a,1);return}b.ready()}}var b=function(j,s){return new b.fn.init(j,s)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,l=/\S/,k=/^\s+/,o=/\s+$/,x=/\W/,r=/\d/,A=/^<(\w+)\s*\/?>(?:<\/\1>)?$/, -C=/^[\],:{}\s]*$/,J=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,I=/(?:^|:|,)(?:\s*\[)+/g,L=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,i=/(msie) ([\w.]+)/,n=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false,q=[],u,y=Object.prototype.toString,F=Object.prototype.hasOwnProperty,M=Array.prototype.push,N=Array.prototype.slice,O=String.prototype.trim,D=Array.prototype.indexOf,R={};b.fn=b.prototype={init:function(j, -s){var v,z,H;if(!j)return this;if(j.nodeType){this.context=this[0]=j;this.length=1;return this}if(j==="body"&&!s&&t.body){this.context=t;this[0]=t.body;this.selector="body";this.length=1;return this}if(typeof j==="string")if((v=h.exec(j))&&(v[1]||!s))if(v[1]){H=s?s.ownerDocument||s:t;if(z=A.exec(j))if(b.isPlainObject(s)){j=[t.createElement(z[1])];b.fn.attr.call(j,s,true)}else j=[H.createElement(z[1])];else{z=b.buildFragment([v[1]],[H]);j=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this, -j)}else{if((z=t.getElementById(v[2]))&&z.parentNode){if(z.id!==v[2])return f.find(j);this.length=1;this[0]=z}this.context=t;this.selector=j;return this}else if(!s&&!x.test(j)){this.selector=j;this.context=t;j=t.getElementsByTagName(j);return b.merge(this,j)}else return!s||s.jquery?(s||f).find(j):b(s).find(j);else if(b.isFunction(j))return f.ready(j);if(j.selector!==B){this.selector=j.selector;this.context=j.context}return b.makeArray(j,this)},selector:"",jquery:"1.4.4",length:0,size:function(){return this.length}, -toArray:function(){return N.call(this,0)},get:function(j){return j==null?this.toArray():j<0?this.slice(j)[0]:this[j]},pushStack:function(j,s,v){var z=b();b.isArray(j)?M.apply(z,j):b.merge(z,j);z.prevObject=this;z.context=this.context;if(s==="find")z.selector=this.selector+(this.selector?" ":"")+v;else if(s)z.selector=this.selector+"."+s+"("+v+")";return z},each:function(j,s){return b.each(this,j,s)},ready:function(j){b.bindReady();if(b.isReady)j.call(t,b);else q&&q.push(j);return this},eq:function(j){return j=== --1?this.slice(j):this.slice(j,+j+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(j){return this.pushStack(b.map(this,function(s,v){return j.call(s,v,s)}))},end:function(){return this.prevObject||b(null)},push:M,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var j,s,v,z,H,G=arguments[0]||{},K=1,Q=arguments.length,ga=false; -if(typeof G==="boolean"){ga=G;G=arguments[1]||{};K=2}if(typeof G!=="object"&&!b.isFunction(G))G={};if(Q===K){G=this;--K}for(;K0))if(q){var s=0,v=q;for(q=null;j=v[s++];)j.call(t,b);b.fn.trigger&&b(t).trigger("ready").unbind("ready")}}},bindReady:function(){if(!p){p=true;if(t.readyState==="complete")return setTimeout(b.ready,1);if(t.addEventListener){t.addEventListener("DOMContentLoaded",u,false);E.addEventListener("load",b.ready,false)}else if(t.attachEvent){t.attachEvent("onreadystatechange",u);E.attachEvent("onload", -b.ready);var j=false;try{j=E.frameElement==null}catch(s){}t.documentElement.doScroll&&j&&a()}}},isFunction:function(j){return b.type(j)==="function"},isArray:Array.isArray||function(j){return b.type(j)==="array"},isWindow:function(j){return j&&typeof j==="object"&&"setInterval"in j},isNaN:function(j){return j==null||!r.test(j)||isNaN(j)},type:function(j){return j==null?String(j):R[y.call(j)]||"object"},isPlainObject:function(j){if(!j||b.type(j)!=="object"||j.nodeType||b.isWindow(j))return false;if(j.constructor&& -!F.call(j,"constructor")&&!F.call(j.constructor.prototype,"isPrototypeOf"))return false;for(var s in j);return s===B||F.call(j,s)},isEmptyObject:function(j){for(var s in j)return false;return true},error:function(j){throw j;},parseJSON:function(j){if(typeof j!=="string"||!j)return null;j=b.trim(j);if(C.test(j.replace(J,"@").replace(w,"]").replace(I,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(j):(new Function("return "+j))();else b.error("Invalid JSON: "+j)},noop:function(){},globalEval:function(j){if(j&& -l.test(j)){var s=t.getElementsByTagName("head")[0]||t.documentElement,v=t.createElement("script");v.type="text/javascript";if(b.support.scriptEval)v.appendChild(t.createTextNode(j));else v.text=j;s.insertBefore(v,s.firstChild);s.removeChild(v)}},nodeName:function(j,s){return j.nodeName&&j.nodeName.toUpperCase()===s.toUpperCase()},each:function(j,s,v){var z,H=0,G=j.length,K=G===B||b.isFunction(j);if(v)if(K)for(z in j){if(s.apply(j[z],v)===false)break}else for(;H
a";var f=d.getElementsByTagName("*"),h=d.getElementsByTagName("a")[0],l=t.createElement("select"), -k=l.appendChild(t.createElement("option"));if(!(!f||!f.length||!h)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(h.getAttribute("style")),hrefNormalized:h.getAttribute("href")==="/a",opacity:/^0.55$/.test(h.style.opacity),cssFloat:!!h.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:k.selected,deleteExpando:true,optDisabled:false,checkClone:false, -scriptEval:false,noCloneEvent:true,boxModel:null,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableHiddenOffsets:true};l.disabled=true;c.support.optDisabled=!k.disabled;b.type="text/javascript";try{b.appendChild(t.createTextNode("window."+e+"=1;"))}catch(o){}a.insertBefore(b,a.firstChild);if(E[e]){c.support.scriptEval=true;delete E[e]}try{delete b.test}catch(x){c.support.deleteExpando=false}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function r(){c.support.noCloneEvent= -false;d.detachEvent("onclick",r)});d.cloneNode(true).fireEvent("onclick")}d=t.createElement("div");d.innerHTML="";a=t.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var r=t.createElement("div");r.style.width=r.style.paddingLeft="1px";t.body.appendChild(r);c.boxModel=c.support.boxModel=r.offsetWidth===2;if("zoom"in r.style){r.style.display="inline";r.style.zoom= -1;c.support.inlineBlockNeedsLayout=r.offsetWidth===2;r.style.display="";r.innerHTML="
";c.support.shrinkWrapBlocks=r.offsetWidth!==2}r.innerHTML="
t
";var A=r.getElementsByTagName("td");c.support.reliableHiddenOffsets=A[0].offsetHeight===0;A[0].style.display="";A[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&A[0].offsetHeight===0;r.innerHTML="";t.body.removeChild(r).style.display= -"none"});a=function(r){var A=t.createElement("div");r="on"+r;var C=r in A;if(!C){A.setAttribute(r,"return;");C=typeof A[r]==="function"}return C};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();var ra={},Ja=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?ra:a;var e=a.nodeType,f=e?a[c.expando]:null,h= -c.cache;if(!(e&&!f&&typeof b==="string"&&d===B)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]=c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==B)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?ra:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando); -else if(d)delete f[e];else for(var l in a)delete a[l]}},acceptData:function(a){if(a.nodeName){var b=c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){var d=null;if(typeof a==="undefined"){if(this.length){var e=this[0].attributes,f;d=c.data(this[0]);for(var h=0,l=e.length;h-1)return true;return false},val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one"; -if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h=0;else if(c.nodeName(this,"select")){var A=c.makeArray(r);c("option",this).each(function(){this.selected=c.inArray(c(this).val(),A)>=0});if(!A.length)this.selectedIndex=-1}else this.value=r}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true}, -attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return B;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==B;b=e&&c.props[b]||b;var h=Ta.test(b);if((b in a||a[b]!==B)&&e&&!h){if(f){b==="type"&&Ua.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&& -b.specified?b.value:Va.test(a.nodeName)||Wa.test(a.nodeName)&&a.href?0:B;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return B;a=!c.support.hrefNormalized&&e&&h?a.getAttribute(b,2):a.getAttribute(b);return a===null?B:a}});var X=/\.(.*)$/,ia=/^(?:textarea|input|select)$/i,La=/\./g,Ma=/ /g,Xa=/[^\w\s.|`]/g,Ya=function(a){return a.replace(Xa,"\\$&")},ua={focusin:0,focusout:0}; -c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;else if(!d)return;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var l=a.nodeType?"events":"__events__",k=h[l],o=h.handle;if(typeof k==="function"){o=k.handle;k=k.events}else if(!k){a.nodeType||(h[l]=h=function(){});h.events=k={}}if(!o)h.handle=o=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(o.elem, -arguments):B};o.elem=a;b=b.split(" ");for(var x=0,r;l=b[x++];){h=f?c.extend({},f):{handler:d,data:e};if(l.indexOf(".")>-1){r=l.split(".");l=r.shift();h.namespace=r.slice(0).sort().join(".")}else{r=[];h.namespace=""}h.type=l;if(!h.guid)h.guid=d.guid;var A=k[l],C=c.event.special[l]||{};if(!A){A=k[l]=[];if(!C.setup||C.setup.call(a,e,r,o)===false)if(a.addEventListener)a.addEventListener(l,o,false);else a.attachEvent&&a.attachEvent("on"+l,o)}if(C.add){C.add.call(a,h);if(!h.handler.guid)h.handler.guid= -d.guid}A.push(h);c.event.global[l]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,l=0,k,o,x,r,A,C,J=a.nodeType?"events":"__events__",w=c.data(a),I=w&&w[J];if(w&&I){if(typeof I==="function"){w=I;I=I.events}if(b&&b.type){d=b.handler;b=b.type}if(!b||typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in I)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[l++];){r=f;k=f.indexOf(".")<0;o=[];if(!k){o=f.split(".");f=o.shift();x=RegExp("(^|\\.)"+ -c.map(o.slice(0).sort(),Ya).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(A=I[f])if(d){r=c.event.special[f]||{};for(h=e||0;h=0){a.type=f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType=== -8)return B;a.result=B;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)===false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){var l;e=a.target;var k=f.replace(X,""),o=c.nodeName(e,"a")&&k=== -"click",x=c.event.special[k]||{};if((!x._default||x._default.call(d,a)===false)&&!o&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[k]){if(l=e["on"+k])e["on"+k]=null;c.event.triggered=true;e[k]()}}catch(r){}if(l)e["on"+k]=l;c.event.triggered=false}}},handle:function(a){var b,d,e,f;d=[];var h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+ -d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var l=d.length;f-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ia.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=xa(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===B||f===e))if(e!=null||f){a.type="change";a.liveFired= -B;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",xa(a))}},setup:function(){if(this.type=== -"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ia.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ia.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}t.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){ua[b]++===0&&t.addEventListener(a,d,true)},teardown:function(){--ua[b]=== -0&&t.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=B}var l=b==="one"?c.proxy(f,function(o){c(this).unbind(o,l);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var k=this.length;h0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}}); -(function(){function a(g,i,n,m,p,q){p=0;for(var u=m.length;p0){F=y;break}}y=y[g]}m[p]=F}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,l=true;[0,0].sort(function(){l=false;return 0});var k=function(g,i,n,m){n=n||[];var p=i=i||t;if(i.nodeType!==1&&i.nodeType!==9)return[];if(!g||typeof g!=="string")return n;var q,u,y,F,M,N=true,O=k.isXML(i),D=[],R=g;do{d.exec("");if(q=d.exec(R)){R=q[3];D.push(q[1]);if(q[2]){F=q[3]; -break}}}while(q);if(D.length>1&&x.exec(g))if(D.length===2&&o.relative[D[0]])u=L(D[0]+D[1],i);else for(u=o.relative[D[0]]?[i]:k(D.shift(),i);D.length;){g=D.shift();if(o.relative[g])g+=D.shift();u=L(g,u)}else{if(!m&&D.length>1&&i.nodeType===9&&!O&&o.match.ID.test(D[0])&&!o.match.ID.test(D[D.length-1])){q=k.find(D.shift(),i,O);i=q.expr?k.filter(q.expr,q.set)[0]:q.set[0]}if(i){q=m?{expr:D.pop(),set:C(m)}:k.find(D.pop(),D.length===1&&(D[0]==="~"||D[0]==="+")&&i.parentNode?i.parentNode:i,O);u=q.expr?k.filter(q.expr, -q.set):q.set;if(D.length>0)y=C(u);else N=false;for(;D.length;){q=M=D.pop();if(o.relative[M])q=D.pop();else M="";if(q==null)q=i;o.relative[M](y,q,O)}}else y=[]}y||(y=u);y||k.error(M||g);if(f.call(y)==="[object Array]")if(N)if(i&&i.nodeType===1)for(g=0;y[g]!=null;g++){if(y[g]&&(y[g]===true||y[g].nodeType===1&&k.contains(i,y[g])))n.push(u[g])}else for(g=0;y[g]!=null;g++)y[g]&&y[g].nodeType===1&&n.push(u[g]);else n.push.apply(n,y);else C(y,n);if(F){k(F,p,n,m);k.uniqueSort(n)}return n};k.uniqueSort=function(g){if(w){h= -l;g.sort(w);if(h)for(var i=1;i0};k.find=function(g,i,n){var m;if(!g)return[];for(var p=0,q=o.order.length;p":function(g,i){var n,m=typeof i==="string",p=0,q=g.length;if(m&&!/\W/.test(i))for(i=i.toLowerCase();p=0))n||m.push(u);else if(n)i[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var i=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=i[1]+(i[2]||1)-0;g[3]=i[3]-0}g[0]=e++;return g},ATTR:function(g,i,n, -m,p,q){i=g[1].replace(/\\/g,"");if(!q&&o.attrMap[i])g[1]=o.attrMap[i];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,i,n,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=k(g[3],null,null,i);else{g=k.filter(g[3],i,n,true^p);n||m.push.apply(m,g);return false}else if(o.match.POS.test(g[0])||o.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled=== -true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,i,n){return!!k(n[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"=== -g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,i){return i===0},last:function(g,i,n,m){return i===m.length-1},even:function(g,i){return i%2===0},odd:function(g,i){return i%2===1},lt:function(g,i,n){return in[3]-0},nth:function(g,i,n){return n[3]- -0===i},eq:function(g,i,n){return n[3]-0===i}},filter:{PSEUDO:function(g,i,n,m){var p=i[1],q=o.filters[p];if(q)return q(g,n,i,m);else if(p==="contains")return(g.textContent||g.innerText||k.getText([g])||"").indexOf(i[3])>=0;else if(p==="not"){i=i[3];n=0;for(m=i.length;n=0}},ID:function(g,i){return g.nodeType===1&&g.getAttribute("id")===i},TAG:function(g,i){return i==="*"&&g.nodeType===1||g.nodeName.toLowerCase()=== -i},CLASS:function(g,i){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(i)>-1},ATTR:function(g,i){var n=i[1];n=o.attrHandle[n]?o.attrHandle[n](g):g[n]!=null?g[n]:g.getAttribute(n);var m=n+"",p=i[2],q=i[4];return n==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&n!==false:p==="!="?m!==q:p==="^="?m.indexOf(q)===0:p==="$="?m.substr(m.length-q.length)===q:p==="|="?m===q||m.substr(0,q.length+1)===q+"-":false},POS:function(g,i,n,m){var p=o.setFilters[i[2]]; -if(p)return p(g,n,i,m)}}},x=o.match.POS,r=function(g,i){return"\\"+(i-0+1)},A;for(A in o.match){o.match[A]=RegExp(o.match[A].source+/(?![^\[]*\])(?![^\(]*\))/.source);o.leftMatch[A]=RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[A].source.replace(/\\(\d+)/g,r))}var C=function(g,i){g=Array.prototype.slice.call(g,0);if(i){i.push.apply(i,g);return i}return g};try{Array.prototype.slice.call(t.documentElement.childNodes,0)}catch(J){C=function(g,i){var n=0,m=i||[];if(f.call(g)==="[object Array]")Array.prototype.push.apply(m, -g);else if(typeof g.length==="number")for(var p=g.length;n";n.insertBefore(g,n.firstChild);if(t.getElementById(i)){o.find.ID=function(m,p,q){if(typeof p.getElementById!=="undefined"&&!q)return(p=p.getElementById(m[1]))?p.id===m[1]||typeof p.getAttributeNode!=="undefined"&&p.getAttributeNode("id").nodeValue===m[1]?[p]:B:[]};o.filter.ID=function(m,p){var q=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&q&&q.nodeValue===p}}n.removeChild(g); -n=g=null})();(function(){var g=t.createElement("div");g.appendChild(t.createComment(""));if(g.getElementsByTagName("*").length>0)o.find.TAG=function(i,n){var m=n.getElementsByTagName(i[1]);if(i[1]==="*"){for(var p=[],q=0;m[q];q++)m[q].nodeType===1&&p.push(m[q]);m=p}return m};g.innerHTML="";if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")o.attrHandle.href=function(i){return i.getAttribute("href",2)};g=null})();t.querySelectorAll&& -function(){var g=k,i=t.createElement("div");i.innerHTML="

";if(!(i.querySelectorAll&&i.querySelectorAll(".TEST").length===0)){k=function(m,p,q,u){p=p||t;m=m.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!u&&!k.isXML(p))if(p.nodeType===9)try{return C(p.querySelectorAll(m),q)}catch(y){}else if(p.nodeType===1&&p.nodeName.toLowerCase()!=="object"){var F=p.getAttribute("id"),M=F||"__sizzle__";F||p.setAttribute("id",M);try{return C(p.querySelectorAll("#"+M+" "+m),q)}catch(N){}finally{F|| -p.removeAttribute("id")}}return g(m,p,q,u)};for(var n in g)k[n]=g[n];i=null}}();(function(){var g=t.documentElement,i=g.matchesSelector||g.mozMatchesSelector||g.webkitMatchesSelector||g.msMatchesSelector,n=false;try{i.call(t.documentElement,"[test!='']:sizzle")}catch(m){n=true}if(i)k.matchesSelector=function(p,q){q=q.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!k.isXML(p))try{if(n||!o.match.PSEUDO.test(q)&&!/!=/.test(q))return i.call(p,q)}catch(u){}return k(q,null,null,[p]).length>0}})();(function(){var g= -t.createElement("div");g.innerHTML="
";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){o.order.splice(1,0,"CLASS");o.find.CLASS=function(i,n,m){if(typeof n.getElementsByClassName!=="undefined"&&!m)return n.getElementsByClassName(i[1])};g=null}}})();k.contains=t.documentElement.contains?function(g,i){return g!==i&&(g.contains?g.contains(i):true)}:t.documentElement.compareDocumentPosition? -function(g,i){return!!(g.compareDocumentPosition(i)&16)}:function(){return false};k.isXML=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false};var L=function(g,i){for(var n,m=[],p="",q=i.nodeType?[i]:i;n=o.match.PSEUDO.exec(g);){p+=n[0];g=g.replace(o.match.PSEUDO,"")}g=o.relative[g]?g+"*":g;n=0;for(var u=q.length;n0)for(var h=d;h0},closest:function(a,b){var d=[],e,f,h=this[0];if(c.isArray(a)){var l,k={},o=1;if(h&&a.length){e=0;for(f=a.length;e-1:c(h).is(e))d.push({selector:l,elem:h,level:o})}h= -h.parentNode;o++}}return d}l=cb.test(a)?c(a,b||this.context):null;e=0;for(f=this.length;e-1:c.find.matchesSelector(h,a)){d.push(h);break}else{h=h.parentNode;if(!h||!h.ownerDocument||h===b)break}d=d.length>1?c.unique(d):d;return this.pushStack(d,"closest",a)},index:function(a){if(!a||typeof a==="string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var d=typeof a==="string"?c(a,b||this.context): -c.makeArray(a),e=c.merge(this.get(),d);return this.pushStack(!d[0]||!d[0].parentNode||d[0].parentNode.nodeType===11||!e[0]||!e[0].parentNode||e[0].parentNode.nodeType===11?e:c.unique(e))},andSelf:function(){return this.add(this.prevObject)}});c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a, -2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a, -b){c.fn[a]=function(d,e){var f=c.map(this,b,d);Za.test(a)||(e=d);if(e&&typeof e==="string")f=c.filter(e,f);f=this.length>1?c.unique(f):f;if((this.length>1||ab.test(e))&&$a.test(a))f=f.reverse();return this.pushStack(f,a,bb.call(arguments).join(","))}});c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return b.length===1?c.find.matchesSelector(b[0],a)?[b[0]]:[]:c.find.matches(a,b)},dir:function(a,b,d){var e=[];for(a=a[b];a&&a.nodeType!==9&&(d===B||a.nodeType!==1||!c(a).is(d));){a.nodeType===1&& -e.push(a);a=a[b]}return e},nth:function(a,b,d){b=b||1;for(var e=0;a;a=a[d])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var za=/ jQuery\d+="(?:\d+|null)"/g,$=/^\s+/,Aa=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Ba=/<([\w:]+)/,db=/\s]+\/)>/g,P={option:[1, -""],legend:[1,"
","
"],thead:[1,"","
"],tr:[2,"","
"],td:[3,"","
"],col:[2,"","
"],area:[1,"",""],_default:[0,"",""]};P.optgroup=P.option;P.tbody=P.tfoot=P.colgroup=P.caption=P.thead;P.th=P.td;if(!c.support.htmlSerialize)P._default=[1,"div
","
"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d= -c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==B)return this.empty().append((this[0]&&this[0].ownerDocument||t).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this}, -wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})}, -prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b, -this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,e;(e=this[d])!=null;d++)if(!a||c.filter(a,[e]).length){if(!b&&e.nodeType===1){c.cleanData(e.getElementsByTagName("*"));c.cleanData([e])}e.parentNode&&e.parentNode.removeChild(e)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild); -return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,e=this.ownerDocument;if(!d){d=e.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(za,"").replace(fb,'="$1">').replace($,"")],e)[0]}else return this.cloneNode(true)});if(a===true){na(this,b);na(this.find("*"),b.find("*"))}return b},html:function(a){if(a===B)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(za,""):null; -else if(typeof a==="string"&&!Ca.test(a)&&(c.support.leadingWhitespace||!$.test(a))&&!P[(Ba.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Aa,"<$1>");try{for(var b=0,d=this.length;b0||e.cacheable||this.length>1?h.cloneNode(true):h)}k.length&&c.each(k,Oa)}return this}});c.buildFragment=function(a,b,d){var e,f,h;b=b&&b[0]?b[0].ownerDocument||b[0]:t;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===t&&!Ca.test(a[0])&&(c.support.checkClone||!Da.test(a[0]))){f=true;if(h=c.fragments[a[0]])if(h!==1)e=h}if(!e){e=b.createDocumentFragment();c.clean(a,b,e,d)}if(f)c.fragments[a[0]]=h?e:1;return{fragment:e,cacheable:f}};c.fragments={};c.each({appendTo:"append", -prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var e=[];d=c(d);var f=this.length===1&&this[0].parentNode;if(f&&f.nodeType===11&&f.childNodes.length===1&&d.length===1){d[b](this[0]);return this}else{f=0;for(var h=d.length;f0?this.clone(true):this).get();c(d[f])[b](l);e=e.concat(l)}return this.pushStack(e,a,d.selector)}}});c.extend({clean:function(a,b,d,e){b=b||t;if(typeof b.createElement==="undefined")b=b.ownerDocument|| -b[0]&&b[0].ownerDocument||t;for(var f=[],h=0,l;(l=a[h])!=null;h++){if(typeof l==="number")l+="";if(l){if(typeof l==="string"&&!eb.test(l))l=b.createTextNode(l);else if(typeof l==="string"){l=l.replace(Aa,"<$1>");var k=(Ba.exec(l)||["",""])[1].toLowerCase(),o=P[k]||P._default,x=o[0],r=b.createElement("div");for(r.innerHTML=o[1]+l+o[2];x--;)r=r.lastChild;if(!c.support.tbody){x=db.test(l);k=k==="table"&&!x?r.firstChild&&r.firstChild.childNodes:o[1]===""&&!x?r.childNodes:[];for(o=k.length- -1;o>=0;--o)c.nodeName(k[o],"tbody")&&!k[o].childNodes.length&&k[o].parentNode.removeChild(k[o])}!c.support.leadingWhitespace&&$.test(l)&&r.insertBefore(b.createTextNode($.exec(l)[0]),r.firstChild);l=r.childNodes}if(l.nodeType)f.push(l);else f=c.merge(f,l)}}if(d)for(h=0;f[h];h++)if(e&&c.nodeName(f[h],"script")&&(!f[h].type||f[h].type.toLowerCase()==="text/javascript"))e.push(f[h].parentNode?f[h].parentNode.removeChild(f[h]):f[h]);else{f[h].nodeType===1&&f.splice.apply(f,[h+1,0].concat(c.makeArray(f[h].getElementsByTagName("script")))); -d.appendChild(f[h])}return f},cleanData:function(a){for(var b,d,e=c.cache,f=c.event.special,h=c.support.deleteExpando,l=0,k;(k=a[l])!=null;l++)if(!(k.nodeName&&c.noData[k.nodeName.toLowerCase()]))if(d=k[c.expando]){if((b=e[d])&&b.events)for(var o in b.events)f[o]?c.event.remove(k,o):c.removeEvent(k,o,b.handle);if(h)delete k[c.expando];else k.removeAttribute&&k.removeAttribute(c.expando);delete e[d]}}});var Ea=/alpha\([^)]*\)/i,gb=/opacity=([^)]*)/,hb=/-([a-z])/ig,ib=/([A-Z])/g,Fa=/^-?\d+(?:px)?$/i, -jb=/^-?\d/,kb={position:"absolute",visibility:"hidden",display:"block"},Pa=["Left","Right"],Qa=["Top","Bottom"],W,Ga,aa,lb=function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){if(arguments.length===2&&b===B)return this;return c.access(this,a,b,true,function(d,e,f){return f!==B?c.style(d,e,f):c.css(d,e)})};c.extend({cssHooks:{opacity:{get:function(a,b){if(b){var d=W(a,"opacity","opacity");return d===""?"1":d}else return a.style.opacity}}},cssNumber:{zIndex:true,fontWeight:true,opacity:true, -zoom:true,lineHeight:true},cssProps:{"float":c.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,d,e){if(!(!a||a.nodeType===3||a.nodeType===8||!a.style)){var f,h=c.camelCase(b),l=a.style,k=c.cssHooks[h];b=c.cssProps[h]||h;if(d!==B){if(!(typeof d==="number"&&isNaN(d)||d==null)){if(typeof d==="number"&&!c.cssNumber[h])d+="px";if(!k||!("set"in k)||(d=k.set(a,d))!==B)try{l[b]=d}catch(o){}}}else{if(k&&"get"in k&&(f=k.get(a,false,e))!==B)return f;return l[b]}}},css:function(a,b,d){var e,f=c.camelCase(b), -h=c.cssHooks[f];b=c.cssProps[f]||f;if(h&&"get"in h&&(e=h.get(a,true,d))!==B)return e;else if(W)return W(a,b,f)},swap:function(a,b,d){var e={},f;for(f in b){e[f]=a.style[f];a.style[f]=b[f]}d.call(a);for(f in b)a.style[f]=e[f]},camelCase:function(a){return a.replace(hb,lb)}});c.curCSS=c.css;c.each(["height","width"],function(a,b){c.cssHooks[b]={get:function(d,e,f){var h;if(e){if(d.offsetWidth!==0)h=oa(d,b,f);else c.swap(d,kb,function(){h=oa(d,b,f)});if(h<=0){h=W(d,b,b);if(h==="0px"&&aa)h=aa(d,b,b); -if(h!=null)return h===""||h==="auto"?"0px":h}if(h<0||h==null){h=d.style[b];return h===""||h==="auto"?"0px":h}return typeof h==="string"?h:h+"px"}},set:function(d,e){if(Fa.test(e)){e=parseFloat(e);if(e>=0)return e+"px"}else return e}}});if(!c.support.opacity)c.cssHooks.opacity={get:function(a,b){return gb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var d=a.style;d.zoom=1;var e=c.isNaN(b)?"":"alpha(opacity="+b*100+")",f= -d.filter||"";d.filter=Ea.test(f)?f.replace(Ea,e):d.filter+" "+e}};if(t.defaultView&&t.defaultView.getComputedStyle)Ga=function(a,b,d){var e;d=d.replace(ib,"-$1").toLowerCase();if(!(b=a.ownerDocument.defaultView))return B;if(b=b.getComputedStyle(a,null)){e=b.getPropertyValue(d);if(e===""&&!c.contains(a.ownerDocument.documentElement,a))e=c.style(a,d)}return e};if(t.documentElement.currentStyle)aa=function(a,b){var d,e,f=a.currentStyle&&a.currentStyle[b],h=a.style;if(!Fa.test(f)&&jb.test(f)){d=h.left; -e=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;h.left=b==="fontSize"?"1em":f||0;f=h.pixelLeft+"px";h.left=d;a.runtimeStyle.left=e}return f===""?"auto":f};W=Ga||aa;if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=a.offsetHeight;return a.offsetWidth===0&&b===0||!c.support.reliableHiddenOffsets&&(a.style.display||c.css(a,"display"))==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var mb=c.now(),nb=/)<[^<]*)*<\/script>/gi, -ob=/^(?:select|textarea)/i,pb=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,qb=/^(?:GET|HEAD)$/,Ra=/\[\]$/,T=/\=\?(&|$)/,ja=/\?/,rb=/([?&])_=[^&]*/,sb=/^(\w+:)?\/\/([^\/?#]+)/,tb=/%20/g,ub=/#.*$/,Ha=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!=="string"&&Ha)return Ha.apply(this,arguments);else if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}e="GET";if(b)if(c.isFunction(b)){d=b;b=null}else if(typeof b=== -"object"){b=c.param(b,c.ajaxSettings.traditional);e="POST"}var h=this;c.ajax({url:a,type:e,dataType:"html",data:b,complete:function(l,k){if(k==="success"||k==="notmodified")h.html(f?c("
").append(l.responseText.replace(nb,"")).find(f):l.responseText);d&&h.each(d,[l.responseText,k,l])}});return this},serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&& -!this.disabled&&(this.checked||ob.test(this.nodeName)||pb.test(this.type))}).map(function(a,b){var d=c(this).val();return d==null?null:c.isArray(d)?c.map(d,function(e){return{name:b.name,value:e}}):{name:b.name,value:d}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:e})}, -getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:e})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return new E.XMLHttpRequest},accepts:{xml:"application/xml, text/xml",html:"text/html", -script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},ajax:function(a){var b=c.extend(true,{},c.ajaxSettings,a),d,e,f,h=b.type.toUpperCase(),l=qb.test(h);b.url=b.url.replace(ub,"");b.context=a&&a.context!=null?a.context:b;if(b.data&&b.processData&&typeof b.data!=="string")b.data=c.param(b.data,b.traditional);if(b.dataType==="jsonp"){if(h==="GET")T.test(b.url)||(b.url+=(ja.test(b.url)?"&":"?")+(b.jsonp||"callback")+"=?");else if(!b.data|| -!T.test(b.data))b.data=(b.data?b.data+"&":"")+(b.jsonp||"callback")+"=?";b.dataType="json"}if(b.dataType==="json"&&(b.data&&T.test(b.data)||T.test(b.url))){d=b.jsonpCallback||"jsonp"+mb++;if(b.data)b.data=(b.data+"").replace(T,"="+d+"$1");b.url=b.url.replace(T,"="+d+"$1");b.dataType="script";var k=E[d];E[d]=function(m){if(c.isFunction(k))k(m);else{E[d]=B;try{delete E[d]}catch(p){}}f=m;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);r&&r.removeChild(A)}}if(b.dataType==="script"&&b.cache===null)b.cache= -false;if(b.cache===false&&l){var o=c.now(),x=b.url.replace(rb,"$1_="+o);b.url=x+(x===b.url?(ja.test(b.url)?"&":"?")+"_="+o:"")}if(b.data&&l)b.url+=(ja.test(b.url)?"&":"?")+b.data;b.global&&c.active++===0&&c.event.trigger("ajaxStart");o=(o=sb.exec(b.url))&&(o[1]&&o[1].toLowerCase()!==location.protocol||o[2].toLowerCase()!==location.host);if(b.dataType==="script"&&h==="GET"&&o){var r=t.getElementsByTagName("head")[0]||t.documentElement,A=t.createElement("script");if(b.scriptCharset)A.charset=b.scriptCharset; -A.src=b.url;if(!d){var C=false;A.onload=A.onreadystatechange=function(){if(!C&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){C=true;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);A.onload=A.onreadystatechange=null;r&&A.parentNode&&r.removeChild(A)}}}r.insertBefore(A,r.firstChild);return B}var J=false,w=b.xhr();if(w){b.username?w.open(h,b.url,b.async,b.username,b.password):w.open(h,b.url,b.async);try{if(b.data!=null&&!l||a&&a.contentType)w.setRequestHeader("Content-Type", -b.contentType);if(b.ifModified){c.lastModified[b.url]&&w.setRequestHeader("If-Modified-Since",c.lastModified[b.url]);c.etag[b.url]&&w.setRequestHeader("If-None-Match",c.etag[b.url])}o||w.setRequestHeader("X-Requested-With","XMLHttpRequest");w.setRequestHeader("Accept",b.dataType&&b.accepts[b.dataType]?b.accepts[b.dataType]+", */*; q=0.01":b.accepts._default)}catch(I){}if(b.beforeSend&&b.beforeSend.call(b.context,w,b)===false){b.global&&c.active--===1&&c.event.trigger("ajaxStop");w.abort();return false}b.global&& -c.triggerGlobal(b,"ajaxSend",[w,b]);var L=w.onreadystatechange=function(m){if(!w||w.readyState===0||m==="abort"){J||c.handleComplete(b,w,e,f);J=true;if(w)w.onreadystatechange=c.noop}else if(!J&&w&&(w.readyState===4||m==="timeout")){J=true;w.onreadystatechange=c.noop;e=m==="timeout"?"timeout":!c.httpSuccess(w)?"error":b.ifModified&&c.httpNotModified(w,b.url)?"notmodified":"success";var p;if(e==="success")try{f=c.httpData(w,b.dataType,b)}catch(q){e="parsererror";p=q}if(e==="success"||e==="notmodified")d|| -c.handleSuccess(b,w,e,f);else c.handleError(b,w,e,p);d||c.handleComplete(b,w,e,f);m==="timeout"&&w.abort();if(b.async)w=null}};try{var g=w.abort;w.abort=function(){w&&Function.prototype.call.call(g,w);L("abort")}}catch(i){}b.async&&b.timeout>0&&setTimeout(function(){w&&!J&&L("timeout")},b.timeout);try{w.send(l||b.data==null?null:b.data)}catch(n){c.handleError(b,w,null,n);c.handleComplete(b,w,e,f)}b.async||L();return w}},param:function(a,b){var d=[],e=function(h,l){l=c.isFunction(l)?l():l;d[d.length]= -encodeURIComponent(h)+"="+encodeURIComponent(l)};if(b===B)b=c.ajaxSettings.traditional;if(c.isArray(a)||a.jquery)c.each(a,function(){e(this.name,this.value)});else for(var f in a)da(f,a[f],b,e);return d.join("&").replace(tb,"+")}});c.extend({active:0,lastModified:{},etag:{},handleError:function(a,b,d,e){a.error&&a.error.call(a.context,b,d,e);a.global&&c.triggerGlobal(a,"ajaxError",[b,a,e])},handleSuccess:function(a,b,d,e){a.success&&a.success.call(a.context,e,d,b);a.global&&c.triggerGlobal(a,"ajaxSuccess", -[b,a])},handleComplete:function(a,b,d){a.complete&&a.complete.call(a.context,b,d);a.global&&c.triggerGlobal(a,"ajaxComplete",[b,a]);a.global&&c.active--===1&&c.event.trigger("ajaxStop")},triggerGlobal:function(a,b,d){(a.context&&a.context.url==null?c(a.context):c.event).trigger(b,d)},httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===1223}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"), -e=a.getResponseHeader("Etag");if(d)c.lastModified[b]=d;if(e)c.etag[b]=e;return a.status===304},httpData:function(a,b,d){var e=a.getResponseHeader("content-type")||"",f=b==="xml"||!b&&e.indexOf("xml")>=0;a=f?a.responseXML:a.responseText;f&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b==="json"||!b&&e.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&e.indexOf("javascript")>=0)c.globalEval(a);return a}}); -if(E.ActiveXObject)c.ajaxSettings.xhr=function(){if(E.location.protocol!=="file:")try{return new E.XMLHttpRequest}catch(a){}try{return new E.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}};c.support.ajax=!!c.ajaxSettings.xhr();var ea={},vb=/^(?:toggle|show|hide)$/,wb=/^([+\-]=)?([\d+.\-]+)(.*)$/,ba,pa=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b,d){if(a||a===0)return this.animate(S("show", -3),a,b,d);else{d=0;for(var e=this.length;d=0;e--)if(d[e].elem===this){b&&d[e](true);d.splice(e,1)}});b||this.dequeue();return this}});c.each({slideDown:S("show",1),slideUp:S("hide",1),slideToggle:S("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){c.fn[a]=function(d,e,f){return this.animate(b, -d,e,f)}});c.extend({speed:function(a,b,d){var e=a&&typeof a==="object"?c.extend({},a):{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};e.duration=c.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in c.fx.speeds?c.fx.speeds[e.duration]:c.fx.speeds._default;e.old=e.complete;e.complete=function(){e.queue!==false&&c(this).dequeue();c.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,d,e){return d+e*a},swing:function(a,b,d,e){return(-Math.cos(a* -Math.PI)/2+0.5)*e+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||c.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(c.css(this.elem,this.prop));return a&&a>-1E4?a:0},custom:function(a,b,d){function e(l){return f.step(l)} -var f=this,h=c.fx;this.startTime=c.now();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;this.pos=this.state=0;e.elem=this.elem;if(e()&&c.timers.push(e)&&!ba)ba=setInterval(h.tick,h.interval)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true; -this.custom(this.cur(),0)},step:function(a){var b=c.now(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var e in this.options.curAnim)if(this.options.curAnim[e]!==true)d=false;if(d){if(this.options.overflow!=null&&!c.support.shrinkWrapBlocks){var f=this.elem,h=this.options;c.each(["","X","Y"],function(k,o){f.style["overflow"+o]=h.overflow[k]})}this.options.hide&&c(this.elem).hide();if(this.options.hide|| -this.options.show)for(var l in this.options.curAnim)c.style(this.elem,l,this.options.orig[l]);this.options.complete.call(this.elem)}return false}else{a=b-this.startTime;this.state=a/this.options.duration;b=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||b](this.state,a,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a= -c.timers,b=0;b-1;e={};var x={};if(o)x=f.position();l=o?x.top:parseInt(l,10)||0;k=o?x.left:parseInt(k,10)||0;if(c.isFunction(b))b=b.call(a,d,h);if(b.top!=null)e.top=b.top-h.top+l;if(b.left!=null)e.left=b.left-h.left+k;"using"in b?b.using.call(a, -e):f.css(e)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),e=Ia.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.css(a,"marginTop"))||0;d.left-=parseFloat(c.css(a,"marginLeft"))||0;e.top+=parseFloat(c.css(b[0],"borderTopWidth"))||0;e.left+=parseFloat(c.css(b[0],"borderLeftWidth"))||0;return{top:d.top-e.top,left:d.left-e.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||t.body;a&&!Ia.test(a.nodeName)&& -c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(e){var f=this[0],h;if(!f)return null;if(e!==B)return this.each(function(){if(h=fa(this))h.scrollTo(!a?e:c(h).scrollLeft(),a?e:c(h).scrollTop());else this[d]=e});else return(h=fa(f))?"pageXOffset"in h?h[a?"pageYOffset":"pageXOffset"]:c.support.boxModel&&h.document.documentElement[d]||h.document.body[d]:f[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase(); -c.fn["inner"+b]=function(){return this[0]?parseFloat(c.css(this[0],d,"padding")):null};c.fn["outer"+b]=function(e){return this[0]?parseFloat(c.css(this[0],d,e?"margin":"border")):null};c.fn[d]=function(e){var f=this[0];if(!f)return e==null?null:this;if(c.isFunction(e))return this.each(function(l){var k=c(this);k[d](e.call(this,l,k[d]()))});if(c.isWindow(f))return f.document.compatMode==="CSS1Compat"&&f.document.documentElement["client"+b]||f.document.body["client"+b];else if(f.nodeType===9)return Math.max(f.documentElement["client"+ -b],f.body["scroll"+b],f.documentElement["scroll"+b],f.body["offset"+b],f.documentElement["offset"+b]);else if(e===B){f=c.css(f,d);var h=parseFloat(f);return c.isNaN(h)?f:h}else return this.css(d,typeof e==="string"?e:e+"px")}})})(window); diff --git a/src/lib/werkzeug/exceptions.py b/src/lib/werkzeug/exceptions.py deleted file mode 100644 index b9b43c9..0000000 --- a/src/lib/werkzeug/exceptions.py +++ /dev/null @@ -1,536 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.exceptions - ~~~~~~~~~~~~~~~~~~~ - - This module implements a number of Python exceptions you can raise from - within your views to trigger a standard non-200 response. - - - Usage Example - ------------- - - :: - - from werkzeug.wrappers import BaseRequest - from werkzeug.wsgi import responder - from werkzeug.exceptions import HTTPException, NotFound - - def view(request): - raise NotFound() - - @responder - def application(environ, start_response): - request = BaseRequest(environ) - try: - return view(request) - except HTTPException, e: - return e - - - As you can see from this example those exceptions are callable WSGI - applications. Because of Python 2.4 compatibility those do not extend - from the response objects but only from the python exception class. - - As a matter of fact they are not Werkzeug response objects. However you - can get a response object by calling ``get_response()`` on a HTTP - exception. - - Keep in mind that you have to pass an environment to ``get_response()`` - because some errors fetch additional information from the WSGI - environment. - - If you want to hook in a different exception page to say, a 404 status - code, you can add a second except for a specific subclass of an error:: - - @responder - def application(environ, start_response): - request = BaseRequest(environ) - try: - return view(request) - except NotFound, e: - return not_found(request) - except HTTPException, e: - return e - - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import sys -from werkzeug._internal import HTTP_STATUS_CODES, _get_environ - - -class HTTPException(Exception): - """ - Baseclass for all HTTP exceptions. This exception can be called as WSGI - application to render a default error page or you can catch the subclasses - of it independently and render nicer error messages. - """ - - code = None - description = None - - def __init__(self, description=None): - Exception.__init__(self, '%d %s' % (self.code, self.name)) - if description is not None: - self.description = description - - @classmethod - def wrap(cls, exception, name=None): - """This method returns a new subclass of the exception provided that - also is a subclass of `BadRequest`. - """ - class newcls(cls, exception): - def __init__(self, arg=None, description=None): - cls.__init__(self, description) - exception.__init__(self, arg) - newcls.__module__ = sys._getframe(1).f_globals.get('__name__') - newcls.__name__ = name or cls.__name__ + exception.__name__ - return newcls - - @property - def name(self): - """The status name.""" - return HTTP_STATUS_CODES[self.code] - - def get_description(self, environ): - """Get the description.""" - environ = _get_environ(environ) - return self.description - - def get_body(self, environ): - """Get the HTML body.""" - return ( - '\n' - '%(code)s %(name)s\n' - '

%(name)s

\n' - '%(description)s\n' - ) % { - 'code': self.code, - 'name': escape(self.name), - 'description': self.get_description(environ) - } - - def get_headers(self, environ): - """Get a list of headers.""" - return [('Content-Type', 'text/html')] - - def get_response(self, environ): - """Get a response object. - - :param environ: the environ for the request. - :return: a :class:`BaseResponse` object or a subclass thereof. - """ - # lazily imported for various reasons. For one, we can use the exceptions - # with custom responses (testing exception instances against types) and - # so we don't ever have to import the wrappers, but also because there - # are circular dependencies when bootstrapping the module. - environ = _get_environ(environ) - from werkzeug.wrappers import BaseResponse - headers = self.get_headers(environ) - return BaseResponse(self.get_body(environ), self.code, headers) - - def __call__(self, environ, start_response): - """Call the exception as WSGI application. - - :param environ: the WSGI environment. - :param start_response: the response callable provided by the WSGI - server. - """ - response = self.get_response(environ) - return response(environ, start_response) - - def __str__(self): - return unicode(self).encode('utf-8') - - def __unicode__(self): - if 'description' in self.__dict__: - txt = self.description - else: - txt = self.name - return '%d: %s' % (self.code, txt) - - def __repr__(self): - return '<%s \'%s\'>' % (self.__class__.__name__, self) - - -class _ProxyException(HTTPException): - """An HTTP exception that expands renders a WSGI application on error.""" - - def __init__(self, response): - Exception.__init__(self, 'proxy exception for %r' % response) - self.response = response - - def get_response(self, environ): - return self.response - - -class BadRequest(HTTPException): - """*400* `Bad Request` - - Raise if the browser sends something to the application the application - or server cannot handle. - """ - code = 400 - description = ( - '

The browser (or proxy) sent a request that this server could ' - 'not understand.

' - ) - - -class ClientDisconnected(BadRequest): - """Internal exception that is raised if Werkzeug detects a disconnected - client. Since the client is already gone at that point attempting to - send the error message to the client might not work and might ultimately - result in another exception in the server. Mainly this is here so that - it is silenced by default as far as Werkzeug is concerned. - - Since disconnections cannot be reliably detected and are unspecified - by WSGI to a large extend this might or might not be raised if a client - is gone. - - .. versionadded:: 0.8 - """ - - -class Unauthorized(HTTPException): - """*401* `Unauthorized` - - Raise if the user is not authorized. Also used if you want to use HTTP - basic auth. - """ - code = 401 - description = ( - '

The server could not verify that you are authorized to access ' - 'the URL requested. You either supplied the wrong credentials (e.g. ' - 'a bad password), or your browser doesn\'t understand how to supply ' - 'the credentials required.

In case you are allowed to request ' - 'the document, please check your user-id and password and try ' - 'again.

' - ) - - -class Forbidden(HTTPException): - """*403* `Forbidden` - - Raise if the user doesn't have the permission for the requested resource - but was authenticated. - """ - code = 403 - description = ( - '

You don\'t have the permission to access the requested resource. ' - 'It is either read-protected or not readable by the server.

' - ) - - -class NotFound(HTTPException): - """*404* `Not Found` - - Raise if a resource does not exist and never existed. - """ - code = 404 - description = ( - '

The requested URL was not found on the server.

' - '

If you entered the URL manually please check your spelling and ' - 'try again.

' - ) - - -class MethodNotAllowed(HTTPException): - """*405* `Method Not Allowed` - - Raise if the server used a method the resource does not handle. For - example `POST` if the resource is view only. Especially useful for REST. - - The first argument for this exception should be a list of allowed methods. - Strictly speaking the response would be invalid if you don't provide valid - methods in the header which you can do with that list. - """ - code = 405 - - def __init__(self, valid_methods=None, description=None): - """Takes an optional list of valid http methods - starting with werkzeug 0.3 the list will be mandatory.""" - HTTPException.__init__(self, description) - self.valid_methods = valid_methods - - def get_headers(self, environ): - headers = HTTPException.get_headers(self, environ) - if self.valid_methods: - headers.append(('Allow', ', '.join(self.valid_methods))) - return headers - - def get_description(self, environ): - m = escape(environ.get('REQUEST_METHOD', 'GET')) - return '

The method %s is not allowed for the requested URL.

' % m - - -class NotAcceptable(HTTPException): - """*406* `Not Acceptable` - - Raise if the server can't return any content conforming to the - `Accept` headers of the client. - """ - code = 406 - - description = ( - '

The resource identified by the request is only capable of ' - 'generating response entities which have content characteristics ' - 'not acceptable according to the accept headers sent in the ' - 'request.

' - ) - - -class RequestTimeout(HTTPException): - """*408* `Request Timeout` - - Raise to signalize a timeout. - """ - code = 408 - description = ( - '

The server closed the network connection because the browser ' - 'didn\'t finish the request within the specified time.

' - ) - - -class Conflict(HTTPException): - """*409* `Conflict` - - Raise to signal that a request cannot be completed because it conflicts - with the current state on the server. - - .. versionadded:: 0.7 - """ - code = 409 - description = ( - '

A conflict happened while processing the request. The resource ' - 'might have been modified while the request was being processed.' - ) - - -class Gone(HTTPException): - """*410* `Gone` - - Raise if a resource existed previously and went away without new location. - """ - code = 410 - description = ( - '

The requested URL is no longer available on this server and ' - 'there is no forwarding address.

If you followed a link ' - 'from a foreign page, please contact the author of this page.' - ) - - -class LengthRequired(HTTPException): - """*411* `Length Required` - - Raise if the browser submitted data but no ``Content-Length`` header which - is required for the kind of processing the server does. - """ - code = 411 - description = ( - '

A request with this method requires a valid Content-' - 'Length header.

' - ) - - -class PreconditionFailed(HTTPException): - """*412* `Precondition Failed` - - Status code used in combination with ``If-Match``, ``If-None-Match``, or - ``If-Unmodified-Since``. - """ - code = 412 - description = ( - '

The precondition on the request for the URL failed positive ' - 'evaluation.

' - ) - - -class RequestEntityTooLarge(HTTPException): - """*413* `Request Entity Too Large` - - The status code one should return if the data submitted exceeded a given - limit. - """ - code = 413 - description = ( - '

The data value transmitted exceeds the capacity limit.

' - ) - - -class RequestURITooLarge(HTTPException): - """*414* `Request URI Too Large` - - Like *413* but for too long URLs. - """ - code = 414 - description = ( - '

The length of the requested URL exceeds the capacity limit ' - 'for this server. The request cannot be processed.

' - ) - - -class UnsupportedMediaType(HTTPException): - """*415* `Unsupported Media Type` - - The status code returned if the server is unable to handle the media type - the client transmitted. - """ - code = 415 - description = ( - '

The server does not support the media type transmitted in ' - 'the request.

' - ) - - -class RequestedRangeNotSatisfiable(HTTPException): - """*416* `Requested Range Not Satisfiable` - - The client asked for a part of the file that lies beyond the end - of the file. - - .. versionadded:: 0.7 - """ - code = 416 - description = ( - '

The server cannot provide the requested range.' - ) - - -class ExpectationFailed(HTTPException): - """*417* `Expectation Failed` - - The server cannot meet the requirements of the Expect request-header. - - .. versionadded:: 0.7 - """ - code = 417 - description = ( - '

The server could not meet the requirements of the Expect header' - ) - - -class ImATeapot(HTTPException): - """*418* `I'm a teapot` - - The server should return this if it is a teapot and someone attempted - to brew coffee with it. - - .. versionadded:: 0.7 - """ - code = 418 - description = ( - '

This server is a teapot, not a coffee machine' - ) - - -class InternalServerError(HTTPException): - """*500* `Internal Server Error` - - Raise if an internal server error occurred. This is a good fallback if an - unknown error occurred in the dispatcher. - """ - code = 500 - description = ( - '

The server encountered an internal error and was unable to ' - 'complete your request. Either the server is overloaded or there ' - 'is an error in the application.

' - ) - - -class NotImplemented(HTTPException): - """*501* `Not Implemented` - - Raise if the application does not support the action requested by the - browser. - """ - code = 501 - description = ( - '

The server does not support the action requested by the ' - 'browser.

' - ) - - -class BadGateway(HTTPException): - """*502* `Bad Gateway` - - If you do proxying in your application you should return this status code - if you received an invalid response from the upstream server it accessed - in attempting to fulfill the request. - """ - code = 502 - description = ( - '

The proxy server received an invalid response from an upstream ' - 'server.

' - ) - - -class ServiceUnavailable(HTTPException): - """*503* `Service Unavailable` - - Status code you should return if a service is temporarily unavailable. - """ - code = 503 - description = ( - '

The server is temporarily unable to service your request due to ' - 'maintenance downtime or capacity problems. Please try again ' - 'later.

' - ) - - -default_exceptions = {} -__all__ = ['HTTPException'] - -def _find_exceptions(): - for name, obj in globals().iteritems(): - try: - if getattr(obj, 'code', None) is not None: - default_exceptions[obj.code] = obj - __all__.append(obj.__name__) - except TypeError: # pragma: no cover - continue -_find_exceptions() -del _find_exceptions - - -#: raised by the request functions if they were unable to decode the -#: incoming data properly. -HTTPUnicodeError = BadRequest.wrap(UnicodeError, 'HTTPUnicodeError') - - -class Aborter(object): - """ - When passed a dict of code -> exception items it can be used as - callable that raises exceptions. If the first argument to the - callable is an integer it will be looked up in the mapping, if it's - a WSGI application it will be raised in a proxy exception. - - The rest of the arguments are forwarded to the exception constructor. - """ - - def __init__(self, mapping=None, extra=None): - if mapping is None: - mapping = default_exceptions - self.mapping = dict(mapping) - if extra is not None: - self.mapping.update(extra) - - def __call__(self, code, *args, **kwargs): - if not args and not kwargs and not isinstance(code, (int, long)): - raise _ProxyException(code) - if code not in self.mapping: - raise LookupError('no exception for %r' % code) - raise self.mapping[code](*args, **kwargs) - -abort = Aborter() - - -#: an exception that is used internally to signal both a key error and a -#: bad request. Used by a lot of the datastructures. -BadRequestKeyError = BadRequest.wrap(KeyError) - - -# imported here because of circular dependencies of werkzeug.utils -from werkzeug.utils import escape diff --git a/src/lib/werkzeug/formparser.py b/src/lib/werkzeug/formparser.py deleted file mode 100644 index cd57620..0000000 --- a/src/lib/werkzeug/formparser.py +++ /dev/null @@ -1,463 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.formparser - ~~~~~~~~~~~~~~~~~~~ - - This module implements the form parsing. It supports url-encoded forms - as well as non-nested multipart uploads. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -from cStringIO import StringIO -from tempfile import TemporaryFile -from itertools import chain, repeat -from functools import update_wrapper - -from werkzeug._internal import _decode_unicode, _empty_stream -from werkzeug.urls import url_decode_stream -from werkzeug.wsgi import LimitedStream, make_line_iter -from werkzeug.exceptions import RequestEntityTooLarge -from werkzeug.datastructures import Headers, FileStorage, MultiDict -from werkzeug.http import parse_options_header - - -#: an iterator that yields empty strings -_empty_string_iter = repeat('') - -#: a regular expression for multipart boundaries -_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$') - -#: supported http encodings that are also available in python we support -#: for multipart messages. -_supported_multipart_encodings = frozenset(['base64', 'quoted-printable']) - - -def default_stream_factory(total_content_length, filename, content_type, - content_length=None): - """The stream factory that is used per default.""" - if total_content_length > 1024 * 500: - return TemporaryFile('wb+') - return StringIO() - - -def parse_form_data(environ, stream_factory=None, charset='utf-8', - errors='replace', max_form_memory_size=None, - max_content_length=None, cls=None, - silent=True): - """Parse the form data in the environ and return it as tuple in the form - ``(stream, form, files)``. You should only call this method if the - transport method is `POST`, `PUT`, or `PATCH`. - - If the mimetype of the data transmitted is `multipart/form-data` the - files multidict will be filled with `FileStorage` objects. If the - mimetype is unknown the input stream is wrapped and returned as first - argument, else the stream is empty. - - This is a shortcut for the common usage of :class:`FormDataParser`. - - Have a look at :ref:`dealing-with-request-data` for more details. - - .. versionadded:: 0.5 - The `max_form_memory_size`, `max_content_length` and - `cls` parameters were added. - - .. versionadded:: 0.5.1 - The optional `silent` flag was added. - - :param environ: the WSGI environment to be used for parsing. - :param stream_factory: An optional callable that returns a new read and - writeable file descriptor. This callable works - the same as :meth:`~BaseResponse._get_file_stream`. - :param charset: The character set for URL and url encoded form data. - :param errors: The encoding error behavior. - :param max_form_memory_size: the maximum number of bytes to be accepted for - in-memory stored form data. If the data - exceeds the value specified an - :exc:`~exceptions.RequestURITooLarge` - exception is raised. - :param max_content_length: If this is provided and the transmitted data - is longer than this value an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param silent: If set to False parsing errors will not be caught. - :return: A tuple in the form ``(stream, form, files)``. - """ - return FormDataParser(stream_factory, charset, errors, - max_form_memory_size, max_content_length, - cls, silent).parse_from_environ(environ) - - -def exhaust_stream(f): - """Helper decorator for methods that exhausts the stream on return.""" - def wrapper(self, stream, *args, **kwargs): - try: - return f(self, stream, *args, **kwargs) - finally: - stream.exhaust() - return update_wrapper(wrapper, f) - - -class FormDataParser(object): - """This class implements parsing of form data for Werkzeug. By itself - it can parse multipart and url encoded form data. It can be subclasses - and extended but for most mimetypes it is a better idea to use the - untouched stream and expose it as separate attributes on a request - object. - - .. versionadded:: 0.8 - - :param stream_factory: An optional callable that returns a new read and - writeable file descriptor. This callable works - the same as :meth:`~BaseResponse._get_file_stream`. - :param charset: The character set for URL and url encoded form data. - :param errors: The encoding error behavior. - :param max_form_memory_size: the maximum number of bytes to be accepted for - in-memory stored form data. If the data - exceeds the value specified an - :exc:`~exceptions.RequestURITooLarge` - exception is raised. - :param max_content_length: If this is provided and the transmitted data - is longer than this value an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param silent: If set to False parsing errors will not be caught. - """ - - def __init__(self, stream_factory=None, charset='utf-8', - errors='replace', max_form_memory_size=None, - max_content_length=None, cls=None, - silent=True): - if stream_factory is None: - stream_factory = default_stream_factory - self.stream_factory = stream_factory - self.charset = charset - self.errors = errors - self.max_form_memory_size = max_form_memory_size - self.max_content_length = max_content_length - if cls is None: - cls = MultiDict - self.cls = cls - self.silent = silent - - def get_parse_func(self, mimetype, options): - return self.parse_functions.get(mimetype) - - def parse_from_environ(self, environ): - """Parses the information from the environment as form data. - - :param environ: the WSGI environment to be used for parsing. - :return: A tuple in the form ``(stream, form, files)``. - """ - content_type = environ.get('CONTENT_TYPE', '') - mimetype, options = parse_options_header(content_type) - try: - content_length = int(environ['CONTENT_LENGTH']) - except (KeyError, ValueError): - content_length = 0 - stream = environ['wsgi.input'] - return self.parse(stream, mimetype, content_length, options) - - def parse(self, stream, mimetype, content_length, options=None): - """Parses the information from the given stream, mimetype, - content length and mimetype parameters. - - :param stream: an input stream - :param mimetype: the mimetype of the data - :param content_length: the content length of the incoming data - :param options: optional mimetype parameters (used for - the multipart boundary for instance) - :return: A tuple in the form ``(stream, form, files)``. - """ - if self.max_content_length is not None and \ - content_length > self.max_content_length: - raise RequestEntityTooLarge() - if options is None: - options = {} - input_stream = LimitedStream(stream, content_length) - - parse_func = self.get_parse_func(mimetype, options) - if parse_func is not None: - try: - return parse_func(self, input_stream, mimetype, - content_length, options) - except ValueError: - if not self.silent: - raise - return input_stream, self.cls(), self.cls() - - @exhaust_stream - def _parse_multipart(self, stream, mimetype, content_length, options): - parser = MultiPartParser(self.stream_factory, self.charset, self.errors, - max_form_memory_size=self.max_form_memory_size, - cls=self.cls) - form, files = parser.parse(stream, options.get('boundary'), - content_length) - return _empty_stream, form, files - - @exhaust_stream - def _parse_urlencoded(self, stream, mimetype, content_length, options): - if self.max_form_memory_size is not None and \ - content_length > self.max_form_memory_size: - raise RequestEntityTooLarge() - form = url_decode_stream(stream, self.charset, - errors=self.errors, cls=self.cls) - return _empty_stream, form, self.cls() - - #: mapping of mimetypes to parsing functions - parse_functions = { - 'multipart/form-data': _parse_multipart, - 'application/x-www-form-urlencoded': _parse_urlencoded, - 'application/x-url-encoded': _parse_urlencoded - } - - -def is_valid_multipart_boundary(boundary): - """Checks if the string given is a valid multipart boundary.""" - return _multipart_boundary_re.match(boundary) is not None - - -def _line_parse(line): - """Removes line ending characters and returns a tuple (`stripped_line`, - `is_terminated`). - """ - if line[-2:] == '\r\n': - return line[:-2], True - elif line[-1:] in '\r\n': - return line[:-1], True - return line, False - - -def parse_multipart_headers(iterable): - """Parses multipart headers from an iterable that yields lines (including - the trailing newline symbol. The iterable has to be newline terminated: - - >>> parse_multipart_headers(['Foo: Bar\r\n', 'Test: Blub\r\n', - ... '\r\n', 'More data']) - Headers([('Foo', 'Bar'), ('Test', 'Blub')]) - - :param iterable: iterable of strings that are newline terminated - """ - result = [] - for line in iterable: - line, line_terminated = _line_parse(line) - if not line_terminated: - raise ValueError('unexpected end of line in multipart header') - if not line: - break - elif line[0] in ' \t' and result: - key, value = result[-1] - result[-1] = (key, value + '\n ' + line[1:]) - else: - parts = line.split(':', 1) - if len(parts) == 2: - result.append((parts[0].strip(), parts[1].strip())) - - # we link the list to the headers, no need to create a copy, the - # list was not shared anyways. - return Headers.linked(result) - - -class MultiPartParser(object): - - def __init__(self, stream_factory=None, charset='utf-8', errors='replace', - max_form_memory_size=None, cls=None, buffer_size=10 * 1024): - self.stream_factory = stream_factory - self.charset = charset - self.errors = errors - self.max_form_memory_size = max_form_memory_size - if stream_factory is None: - stream_factory = default_stream_factory - if cls is None: - cls = MultiDict - self.cls = cls - - # make sure the buffer size is divisible by four so that we can base64 - # decode chunk by chunk - assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4' - # also the buffer size has to be at least 1024 bytes long or long headers - # will freak out the system - assert buffer_size >= 1024, 'buffer size has to be at least 1KB' - - self.buffer_size = buffer_size - - def _fix_ie_filename(self, filename): - """Internet Explorer 6 transmits the full file name if a file is - uploaded. This function strips the full path if it thinks the - filename is Windows-like absolute. - """ - if filename[1:3] == ':\\' or filename[:2] == '\\\\': - return filename.split('\\')[-1] - return filename - - def _find_terminator(self, iterator): - """The terminator might have some additional newlines before it. - There is at least one application that sends additional newlines - before headers (the python setuptools package). - """ - for line in iterator: - if not line: - break - line = line.strip() - if line: - return line - return '' - - def fail(self, message): - raise ValueError(message) - - def get_part_encoding(self, headers): - transfer_encoding = headers.get('content-transfer-encoding') - if transfer_encoding is not None and \ - transfer_encoding in _supported_multipart_encodings: - return transfer_encoding - - def get_part_charset(self, headers): - # Figure out input charset for current part - content_type = headers.get('content-type') - if content_type: - mimetype, ct_params = parse_options_header(content_type) - return ct_params.get('charset', self.charset) - return self.charset - - def start_file_streaming(self, filename, headers, total_content_length): - filename = _decode_unicode(filename, self.charset, self.errors) - filename = self._fix_ie_filename(filename) - content_type = headers.get('content_type') - try: - content_length = int(headers['content-length']) - except (KeyError, ValueError): - content_length = 0 - container = self.stream_factory(total_content_length, content_type, - filename, content_length) - return filename, container - - def in_memory_threshold_reached(self, bytes): - raise RequestEntityTooLarge() - - def validate_boundary(self, boundary): - if not boundary: - self.fail('Missing boundary') - if not is_valid_multipart_boundary(boundary): - self.fail('Invalid boundary: %s' % boundary) - if len(boundary) > self.buffer_size: # pragma: no cover - # this should never happen because we check for a minimum size - # of 1024 and boundaries may not be longer than 200. The only - # situation when this happen is for non debug builds where - # the assert i skipped. - self.fail('Boundary longer than buffer size') - - def parse(self, file, boundary, content_length): - next_part = '--' + boundary - last_part = next_part + '--' - - form = [] - files = [] - in_memory = 0 - - iterator = chain(make_line_iter(file, limit=content_length, - buffer_size=self.buffer_size), - _empty_string_iter) - - terminator = self._find_terminator(iterator) - if terminator != next_part: - self.fail('Expected boundary at start of multipart data') - - while terminator != last_part: - headers = parse_multipart_headers(iterator) - - disposition = headers.get('content-disposition') - if disposition is None: - self.fail('Missing Content-Disposition header') - disposition, extra = parse_options_header(disposition) - transfer_encoding = self.get_part_encoding(headers) - name = extra.get('name') - filename = extra.get('filename') - part_charset = self.get_part_charset(headers) - - # if no content type is given we stream into memory. A list is - # used as a temporary container. - if filename is None: - is_file = False - container = [] - _write = container.append - guard_memory = self.max_form_memory_size is not None - - # otherwise we parse the rest of the headers and ask the stream - # factory for something we can write in. - else: - is_file = True - guard_memory = False - filename, container = self.start_file_streaming( - filename, headers, content_length) - _write = container.write - - buf = '' - for line in iterator: - if not line: - self.fail('unexpected end of stream') - - if line[:2] == '--': - terminator = line.rstrip() - if terminator in (next_part, last_part): - break - - if transfer_encoding is not None: - try: - line = line.decode(transfer_encoding) - except Exception: - self.fail('could not decode transfer encoded chunk') - - # we have something in the buffer from the last iteration. - # this is usually a newline delimiter. - if buf: - _write(buf) - buf = '' - - # If the line ends with windows CRLF we write everything except - # the last two bytes. In all other cases however we write - # everything except the last byte. If it was a newline, that's - # fine, otherwise it does not matter because we will write it - # the next iteration. this ensures we do not write the - # final newline into the stream. That way we do not have to - # truncate the stream. However we do have to make sure that - # if something else than a newline is in there we write it - # out. - if line[-2:] == '\r\n': - buf = '\r\n' - cutoff = -2 - else: - buf = line[-1] - cutoff = -1 - _write(line[:cutoff]) - - # if we write into memory and there is a memory size limit we - # count the number of bytes in memory and raise an exception if - # there is too much data in memory. - if guard_memory: - in_memory += len(line) - if in_memory > self.max_form_memory_size: - self.in_memory_threshold_reached(in_memory) - else: # pragma: no cover - raise ValueError('unexpected end of part') - - # if we have a leftover in the buffer that is not a newline - # character we have to flush it, otherwise we will chop of - # certain values. - if buf not in ('', '\r', '\n', '\r\n'): - _write(buf) - - if is_file: - container.seek(0) - files.append((name, FileStorage(container, filename, name, - headers=headers))) - else: - form.append((name, _decode_unicode(''.join(container), - part_charset, self.errors))) - - return self.cls(form), self.cls(files) diff --git a/src/lib/werkzeug/http.py b/src/lib/werkzeug/http.py deleted file mode 100644 index 23e1328..0000000 --- a/src/lib/werkzeug/http.py +++ /dev/null @@ -1,841 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.http - ~~~~~~~~~~~~~ - - Werkzeug comes with a bunch of utilities that help Werkzeug to deal with - HTTP data. Most of the classes and functions provided by this module are - used by the wrappers, but they are useful on their own, too, especially if - the response and request objects are not used. - - This covers some of the more HTTP centric features of WSGI, some other - utilities such as cookie handling are documented in the `werkzeug.utils` - module. - - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -from time import time -try: - from email.utils import parsedate_tz -except ImportError: # pragma: no cover - from email.Utils import parsedate_tz -from urllib2 import parse_http_list as _parse_list_header -from datetime import datetime, timedelta -try: - from hashlib import md5 -except ImportError: # pragma: no cover - from md5 import new as md5 - - -#: HTTP_STATUS_CODES is "exported" from this module. -#: XXX: move to werkzeug.consts or something -from werkzeug._internal import HTTP_STATUS_CODES, _dump_date, \ - _ExtendedCookie, _ExtendedMorsel, _decode_unicode - - -_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?') -_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - '^_`abcdefghijklmnopqrstuvwxyz|~') -_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)') -_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t') -_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' -_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*(?:=\s*([^;]+|%s))?\s*' % - (_quoted_string_re, _quoted_string_re)) - -_entity_headers = frozenset([ - 'allow', 'content-encoding', 'content-language', 'content-length', - 'content-location', 'content-md5', 'content-range', 'content-type', - 'expires', 'last-modified' -]) -_hop_by_hop_headers = frozenset([ - 'connection', 'keep-alive', 'proxy-authenticate', - 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', - 'upgrade' -]) - - -def quote_header_value(value, extra_chars='', allow_token=True): - """Quote a header value if necessary. - - .. versionadded:: 0.5 - - :param value: the value to quote. - :param extra_chars: a list of extra characters to skip quoting. - :param allow_token: if this is enabled token values are returned - unchanged. - """ - value = str(value) - if allow_token: - token_chars = _token_chars | set(extra_chars) - if set(value).issubset(token_chars): - return value - return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') - - -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - .. versionadded:: 0.5 - - :param value: the header value to unquote. - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != '\\\\': - return value.replace('\\\\', '\\').replace('\\"', '"') - return value - - -def dump_options_header(header, options): - """The reverse function to :func:`parse_options_header`. - - :param header: the header to dump - :param options: a dict of options to append. - """ - segments = [] - if header is not None: - segments.append(header) - for key, value in options.iteritems(): - if value is None: - segments.append(key) - else: - segments.append('%s=%s' % (key, quote_header_value(value))) - return '; '.join(segments) - - -def dump_header(iterable, allow_token=True): - """Dump an HTTP header again. This is the reversal of - :func:`parse_list_header`, :func:`parse_set_header` and - :func:`parse_dict_header`. This also quotes strings that include an - equals sign unless you pass it as dict of key, value pairs. - - >>> dump_header({'foo': 'bar baz'}) - 'foo="bar baz"' - >>> dump_header(('foo', 'bar baz')) - 'foo, "bar baz"' - - :param iterable: the iterable or dict of values to quote. - :param allow_token: if set to `False` tokens as values are disallowed. - See :func:`quote_header_value` for more details. - """ - if isinstance(iterable, dict): - items = [] - for key, value in iterable.iteritems(): - if value is None: - items.append(key) - else: - items.append('%s=%s' % ( - key, - quote_header_value(value, allow_token=allow_token) - )) - else: - items = [quote_header_value(x, allow_token=allow_token) - for x in iterable] - return ', '.join(items) - - -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -def parse_dict_header(value): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict: - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - :param value: a string with a dict header. - :return: :class:`dict` - """ - result = {} - for item in _parse_list_header(value): - if '=' not in item: - result[item] = None - continue - name, value = item.split('=', 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -def parse_options_header(value): - """Parse a ``Content-Type`` like header into a tuple with the content - type and the options: - - >>> parse_options_header('Content-Type: text/html; mimetype=text/html') - ('Content-Type:', {'mimetype': 'text/html'}) - - This should not be used to parse ``Cache-Control`` like headers that use - a slightly different format. For these headers use the - :func:`parse_dict_header` function. - - .. versionadded:: 0.5 - - :param value: the header to parse. - :return: (str, options) - """ - def _tokenize(string): - for match in _option_header_piece_re.finditer(string): - key, value = match.groups() - key = unquote_header_value(key) - if value is not None: - value = unquote_header_value(value, key == 'filename') - yield key, value - - if not value: - return '', {} - - parts = _tokenize(';' + value) - name = parts.next()[0] - extra = dict(parts) - return name, extra - - -def parse_accept_header(value, cls=None): - """Parses an HTTP Accept-* header. This does not implement a complete - valid algorithm but one that supports at least value and quality - extraction. - - Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` - tuples sorted by the quality with some additional accessor methods). - - The second parameter can be a subclass of :class:`Accept` that is created - with the parsed values and returned. - - :param value: the accept header string to be parsed. - :param cls: the wrapper class for the return value (can be - :class:`Accept` or a subclass thereof) - :return: an instance of `cls`. - """ - if cls is None: - cls = Accept - - if not value: - return cls(None) - - result = [] - for match in _accept_re.finditer(value): - quality = match.group(2) - if not quality: - quality = 1 - else: - quality = max(min(float(quality), 1), 0) - result.append((match.group(1), quality)) - return cls(result) - - -def parse_cache_control_header(value, on_update=None, cls=None): - """Parse a cache control header. The RFC differs between response and - request cache control, this method does not. It's your responsibility - to not use the wrong control statements. - - .. versionadded:: 0.5 - The `cls` was added. If not specified an immutable - :class:`~werkzeug.datastructures.RequestCacheControl` is returned. - - :param value: a cache control header to be parsed. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.CacheControl` - object is changed. - :param cls: the class for the returned object. By default - :class:`~werkzeug.datastructures.RequestCacheControl` is used. - :return: a `cls` object. - """ - if cls is None: - cls = RequestCacheControl - if not value: - return cls(None, on_update) - return cls(parse_dict_header(value), on_update) - - -def parse_set_header(value, on_update=None): - """Parse a set-like header and return a - :class:`~werkzeug.datastructures.HeaderSet` object: - - >>> hs = parse_set_header('token, "quoted value"') - - The return value is an object that treats the items case-insensitively - and keeps the order of the items: - - >>> 'TOKEN' in hs - True - >>> hs.index('quoted value') - 1 - >>> hs - HeaderSet(['token', 'quoted value']) - - To create a header from the :class:`HeaderSet` again, use the - :func:`dump_header` function. - - :param value: a set header to be parsed. - :param on_update: an optional callable that is called every time a - value on the :class:`~werkzeug.datastructures.HeaderSet` - object is changed. - :return: a :class:`~werkzeug.datastructures.HeaderSet` - """ - if not value: - return HeaderSet(None, on_update) - return HeaderSet(parse_list_header(value), on_update) - - -def parse_authorization_header(value): - """Parse an HTTP basic/digest authorization header transmitted by the web - browser. The return value is either `None` if the header was invalid or - not given, otherwise an :class:`~werkzeug.datastructures.Authorization` - object. - - :param value: the authorization header to parse. - :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. - """ - if not value: - return - try: - auth_type, auth_info = value.split(None, 1) - auth_type = auth_type.lower() - except ValueError: - return - if auth_type == 'basic': - try: - username, password = auth_info.decode('base64').split(':', 1) - except Exception, e: - return - return Authorization('basic', {'username': username, - 'password': password}) - elif auth_type == 'digest': - auth_map = parse_dict_header(auth_info) - for key in 'username', 'realm', 'nonce', 'uri', 'response': - if not key in auth_map: - return - if 'qop' in auth_map: - if not auth_map.get('nc') or not auth_map.get('cnonce'): - return - return Authorization('digest', auth_map) - - -def parse_www_authenticate_header(value, on_update=None): - """Parse an HTTP WWW-Authenticate header into a - :class:`~werkzeug.datastructures.WWWAuthenticate` object. - - :param value: a WWW-Authenticate header to parse. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.WWWAuthenticate` - object is changed. - :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object. - """ - if not value: - return WWWAuthenticate(on_update=on_update) - try: - auth_type, auth_info = value.split(None, 1) - auth_type = auth_type.lower() - except (ValueError, AttributeError): - return WWWAuthenticate(value.strip().lower(), on_update=on_update) - return WWWAuthenticate(auth_type, parse_dict_header(auth_info), - on_update) - - -def parse_if_range_header(value): - """Parses an if-range header which can be an etag or a date. Returns - a :class:`~werkzeug.datastructures.IfRange` object. - - .. versionadded:: 0.7 - """ - if not value: - return IfRange() - date = parse_date(value) - if date is not None: - return IfRange(date=date) - # drop weakness information - return IfRange(unquote_etag(value)[0]) - - -def parse_range_header(value, make_inclusive=True): - """Parses a range header into a :class:`~werkzeug.datastructures.Range` - object. If the header is missing or malformed `None` is returned. - `ranges` is a list of ``(start, stop)`` tuples where the ranges are - non-inclusive. - - .. versionadded:: 0.7 - """ - if not value or '=' not in value: - return None - - ranges = [] - last_end = 0 - units, rng = value.split('=', 1) - units = units.strip().lower() - - for item in rng.split(','): - item = item.strip() - if '-' not in item: - return None - if item.startswith('-'): - if last_end < 0: - return None - begin = int(item) - end = None - last_end = -1 - elif '-' in item: - begin, end = item.split('-', 1) - begin = int(begin) - if begin < last_end or last_end < 0: - return None - if end: - end = int(end) + 1 - if begin >= end: - return None - else: - end = None - last_end = end - ranges.append((begin, end)) - - return Range(units, ranges) - - -def parse_content_range_header(value, on_update=None): - """Parses a range header into a - :class:`~werkzeug.datastructures.ContentRange` object or `None` if - parsing is not possible. - - .. versionadded:: 0.7 - - :param value: a content range header to be parsed. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.ContentRange` - object is changed. - """ - if value is None: - return None - try: - units, rangedef = (value or '').strip().split(None, 1) - except ValueError: - return None - - if '/' not in rangedef: - return None - rng, length = rangedef.split('/', 1) - if length == '*': - length = None - elif length.isdigit(): - length = int(length) - else: - return None - - if rng == '*': - return ContentRange(units, None, None, length, on_update=on_update) - elif '-' not in rng: - return None - - start, stop = rng.split('-', 1) - try: - start = int(start) - stop = int(stop) + 1 - except ValueError: - return None - - if is_byte_range_valid(start, stop, length): - return ContentRange(units, start, stop, length, on_update=on_update) - - -def quote_etag(etag, weak=False): - """Quote an etag. - - :param etag: the etag to quote. - :param weak: set to `True` to tag it "weak". - """ - if '"' in etag: - raise ValueError('invalid etag') - etag = '"%s"' % etag - if weak: - etag = 'w/' + etag - return etag - - -def unquote_etag(etag): - """Unquote a single etag: - - >>> unquote_etag('w/"bar"') - ('bar', True) - >>> unquote_etag('"bar"') - ('bar', False) - - :param etag: the etag identifier to unquote. - :return: a ``(etag, weak)`` tuple. - """ - if not etag: - return None, None - etag = etag.strip() - weak = False - if etag[:2] in ('w/', 'W/'): - weak = True - etag = etag[2:] - if etag[:1] == etag[-1:] == '"': - etag = etag[1:-1] - return etag, weak - - -def parse_etags(value): - """Parse an etag header. - - :param value: the tag header to parse - :return: an :class:`~werkzeug.datastructures.ETags` object. - """ - if not value: - return ETags() - strong = [] - weak = [] - end = len(value) - pos = 0 - while pos < end: - match = _etag_re.match(value, pos) - if match is None: - break - is_weak, quoted, raw = match.groups() - if raw == '*': - return ETags(star_tag=True) - elif quoted: - raw = quoted - if is_weak: - weak.append(raw) - else: - strong.append(raw) - pos = match.end() - return ETags(strong, weak) - - -def generate_etag(data): - """Generate an etag for some data.""" - return md5(data).hexdigest() - - -def parse_date(value): - """Parse one of the following date formats into a datetime object: - - .. sourcecode:: text - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - - If parsing fails the return value is `None`. - - :param value: a string with a supported date format. - :return: a :class:`datetime.datetime` object. - """ - if value: - t = parsedate_tz(value.strip()) - if t is not None: - try: - year = t[0] - # unfortunately that function does not tell us if two digit - # years were part of the string, or if they were prefixed - # with two zeroes. So what we do is to assume that 69-99 - # refer to 1900, and everything below to 2000 - if year >= 0 and year <= 68: - year += 2000 - elif year >= 69 and year <= 99: - year += 1900 - return datetime(*((year,) + t[1:7])) - \ - timedelta(seconds=t[-1] or 0) - except (ValueError, OverflowError): - return None - - -def cookie_date(expires=None): - """Formats the time to ensure compatibility with Netscape's cookie - standard. - - Accepts a floating point number expressed in seconds since the epoch in, a - datetime object or a timetuple. All times in UTC. The :func:`parse_date` - function can be used to parse such a date. - - Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. - - :param expires: If provided that date is used, otherwise the current. - """ - return _dump_date(expires, '-') - - -def http_date(timestamp=None): - """Formats the time to match the RFC1123 date format. - - Accepts a floating point number expressed in seconds since the epoch in, a - datetime object or a timetuple. All times in UTC. The :func:`parse_date` - function can be used to parse such a date. - - Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. - - :param timestamp: If provided that date is used, otherwise the current. - """ - return _dump_date(timestamp, ' ') - - -def is_resource_modified(environ, etag=None, data=None, last_modified=None): - """Convenience method for conditional requests. - - :param environ: the WSGI environment of the request to be checked. - :param etag: the etag for the response for comparison. - :param data: or alternatively the data of the response to automatically - generate an etag using :func:`generate_etag`. - :param last_modified: an optional date of the last modification. - :return: `True` if the resource was modified, otherwise `False`. - """ - if etag is None and data is not None: - etag = generate_etag(data) - elif data is not None: - raise TypeError('both data and etag given') - if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'): - return False - - unmodified = False - if isinstance(last_modified, basestring): - last_modified = parse_date(last_modified) - - # ensure that microsecond is zero because the HTTP spec does not transmit - # that either and we might have some false positives. See issue #39 - if last_modified is not None: - last_modified = last_modified.replace(microsecond=0) - - modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE')) - - if modified_since and last_modified and last_modified <= modified_since: - unmodified = True - if etag: - if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH')) - if if_none_match: - unmodified = if_none_match.contains_raw(etag) - - return not unmodified - - -def remove_entity_headers(headers, allowed=('expires', 'content-location')): - """Remove all entity headers from a list or :class:`Headers` object. This - operation works in-place. `Expires` and `Content-Location` headers are - by default not removed. The reason for this is :rfc:`2616` section - 10.3.5 which specifies some entity headers that should be sent. - - .. versionchanged:: 0.5 - added `allowed` parameter. - - :param headers: a list or :class:`Headers` object. - :param allowed: a list of headers that should still be allowed even though - they are entity headers. - """ - allowed = set(x.lower() for x in allowed) - headers[:] = [(key, value) for key, value in headers if - not is_entity_header(key) or key.lower() in allowed] - - -def remove_hop_by_hop_headers(headers): - """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or - :class:`Headers` object. This operation works in-place. - - .. versionadded:: 0.5 - - :param headers: a list or :class:`Headers` object. - """ - headers[:] = [(key, value) for key, value in headers if - not is_hop_by_hop_header(key)] - - -def is_entity_header(header): - """Check if a header is an entity header. - - .. versionadded:: 0.5 - - :param header: the header to test. - :return: `True` if it's an entity header, `False` otherwise. - """ - return header.lower() in _entity_headers - - -def is_hop_by_hop_header(header): - """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. - - .. versionadded:: 0.5 - - :param header: the header to test. - :return: `True` if it's an entity header, `False` otherwise. - """ - return header.lower() in _hop_by_hop_headers - - -def parse_cookie(header, charset='utf-8', errors='replace', - cls=None): - """Parse a cookie. Either from a string or WSGI environ. - - Per default encoding errors are ignored. If you want a different behavior - you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a - :exc:`HTTPUnicodeError` is raised. - - .. versionchanged:: 0.5 - This function now returns a :class:`TypeConversionDict` instead of a - regular dict. The `cls` parameter was added. - - :param header: the header to be used to parse the cookie. Alternatively - this can be a WSGI environment. - :param charset: the charset for the cookie values. - :param errors: the error behavior for the charset decoding. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`TypeConversionDict` is - used. - """ - if isinstance(header, dict): - header = header.get('HTTP_COOKIE', '') - if cls is None: - cls = TypeConversionDict - cookie = _ExtendedCookie() - cookie.load(header) - result = {} - - # decode to unicode and skip broken items. Our extended morsel - # and extended cookie will catch CookieErrors and convert them to - # `None` items which we have to skip here. - for key, value in cookie.iteritems(): - if value.value is not None: - result[key] = _decode_unicode(unquote_header_value(value.value), - charset, errors) - - return cls(result) - - -def dump_cookie(key, value='', max_age=None, expires=None, path='/', - domain=None, secure=None, httponly=False, charset='utf-8', - sync_expires=True): - """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix - The parameters are the same as in the cookie Morsel object in the - Python standard library but it accepts unicode data, too. - - :param max_age: should be a number of seconds, or `None` (default) if - the cookie should last only as long as the client's - browser session. Additionally `timedelta` objects - are accepted, too. - :param expires: should be a `datetime` object or unix timestamp. - :param path: limits the cookie to a given path, per default it will - span the whole domain. - :param domain: Use this if you want to set a cross-domain cookie. For - example, ``domain=".example.com"`` will set a cookie - that is readable by the domain ``www.example.com``, - ``foo.example.com`` etc. Otherwise, a cookie will only - be readable by the domain that set it. - :param secure: The cookie will only be available via HTTPS - :param httponly: disallow JavaScript to access the cookie. This is an - extension to the cookie standard and probably not - supported by all browsers. - :param charset: the encoding for unicode values. - :param sync_expires: automatically set expires if max_age is defined - but expires not. - """ - try: - key = str(key) - except UnicodeError: - raise TypeError('invalid key %r' % key) - if isinstance(value, unicode): - value = value.encode(charset) - value = quote_header_value(value) - morsel = _ExtendedMorsel(key, value) - if isinstance(max_age, timedelta): - max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds - if expires is not None: - if not isinstance(expires, basestring): - expires = cookie_date(expires) - morsel['expires'] = expires - elif max_age is not None and sync_expires: - morsel['expires'] = cookie_date(time() + max_age) - if domain and ':' in domain: - # The port part of the domain should NOT be used. Strip it - domain = domain.split(':', 1)[0] - if domain: - assert '.' in domain, ( - "Setting \"domain\" for a cookie on a server running localy (ex: " - "localhost) is not supportted by complying browsers. You should " - "have something like: \"127.0.0.1 localhost dev.localhost\" on " - "your hosts file and then point your server to run on " - "\"dev.localhost\" and also set \"domain\" for \"dev.localhost\"" - ) - for k, v in (('path', path), ('domain', domain), ('secure', secure), - ('max-age', max_age), ('httponly', httponly)): - if v is not None and v is not False: - morsel[k] = str(v) - return morsel.output(header='').lstrip() - - -def is_byte_range_valid(start, stop, length): - """Checks if a given byte content range is valid for the given length. - - .. versionadded:: 0.7 - """ - if (start is None) != (stop is None): - return False - elif start is None: - return length is None or length >= 0 - elif length is None: - return 0 <= start < stop - elif start >= stop: - return False - return 0 <= start < length - - -# circular dependency fun -from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \ - WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \ - RequestCacheControl - - -# DEPRECATED -# backwards compatible imports -from werkzeug.datastructures import MIMEAccept, CharsetAccept, \ - LanguageAccept, Headers diff --git a/src/lib/werkzeug/local.py b/src/lib/werkzeug/local.py deleted file mode 100644 index c5507bb..0000000 --- a/src/lib/werkzeug/local.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.local - ~~~~~~~~~~~~~~ - - This module implements context-local objects. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from werkzeug.wsgi import ClosingIterator -from werkzeug._internal import _patch_wrapper - -# since each thread has its own greenlet we can just use those as identifiers -# for the context. If greenlets are not available we fall back to the -# current thread ident. -try: - from greenlet import getcurrent as get_ident -except ImportError: # pragma: no cover - try: - from thread import get_ident - except ImportError: # pragma: no cover - from dummy_thread import get_ident - - -def release_local(local): - """Releases the contents of the local for the current context. - This makes it possible to use locals without a manager. - - Example:: - - >>> loc = Local() - >>> loc.foo = 42 - >>> release_local(loc) - >>> hasattr(loc, 'foo') - False - - With this function one can release :class:`Local` objects as well - as :class:`StackLocal` objects. However it is not possible to - release data held by proxies that way, one always has to retain - a reference to the underlying local object in order to be able - to release it. - - .. versionadded:: 0.6.1 - """ - local.__release_local__() - - -class Local(object): - __slots__ = ('__storage__', '__ident_func__') - - def __init__(self): - object.__setattr__(self, '__storage__', {}) - object.__setattr__(self, '__ident_func__', get_ident) - - def __iter__(self): - return iter(self.__storage__.items()) - - def __call__(self, proxy): - """Create a proxy for a name.""" - return LocalProxy(self, proxy) - - def __release_local__(self): - self.__storage__.pop(self.__ident_func__(), None) - - def __getattr__(self, name): - try: - return self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - ident = self.__ident_func__() - storage = self.__storage__ - try: - storage[ident][name] = value - except KeyError: - storage[ident] = {name: value} - - def __delattr__(self, name): - try: - del self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - -class LocalStack(object): - """This class works similar to a :class:`Local` but keeps a stack - of objects instead. This is best explained with an example:: - - >>> ls = LocalStack() - >>> ls.push(42) - >>> ls.top - 42 - >>> ls.push(23) - >>> ls.top - 23 - >>> ls.pop() - 23 - >>> ls.top - 42 - - They can be force released by using a :class:`LocalManager` or with - the :func:`release_local` function but the correct way is to pop the - item from the stack after using. When the stack is empty it will - no longer be bound to the current context (and as such released). - - By calling the stack without arguments it returns a proxy that resolves to - the topmost item on the stack. - - .. versionadded:: 0.6.1 - """ - - def __init__(self): - self._local = Local() - - def __release_local__(self): - self._local.__release_local__() - - def _get__ident_func__(self): - return self._local.__ident_func__ - def _set__ident_func__(self, value): - object.__setattr__(self._local, '__ident_func__', value) - __ident_func__ = property(_get__ident_func__, _set__ident_func__) - del _get__ident_func__, _set__ident_func__ - - def __call__(self): - def _lookup(): - rv = self.top - if rv is None: - raise RuntimeError('object unbound') - return rv - return LocalProxy(_lookup) - - def push(self, obj): - """Pushes a new item to the stack""" - rv = getattr(self._local, 'stack', None) - if rv is None: - self._local.stack = rv = [] - rv.append(obj) - return rv - - def pop(self): - """Removes the topmost item from the stack, will return the - old value or `None` if the stack was already empty. - """ - stack = getattr(self._local, 'stack', None) - if stack is None: - return None - elif len(stack) == 1: - release_local(self._local) - return stack[-1] - else: - return stack.pop() - - @property - def top(self): - """The topmost item on the stack. If the stack is empty, - `None` is returned. - """ - try: - return self._local.stack[-1] - except (AttributeError, IndexError): - return None - - -class LocalManager(object): - """Local objects cannot manage themselves. For that you need a local - manager. You can pass a local manager multiple locals or add them later - by appending them to `manager.locals`. Everytime the manager cleans up - it, will clean up all the data left in the locals for this context. - - The `ident_func` parameter can be added to override the default ident - function for the wrapped locals. - - .. versionchanged:: 0.6.1 - Instead of a manager the :func:`release_local` function can be used - as well. - - .. versionchanged:: 0.7 - `ident_func` was added. - """ - - def __init__(self, locals=None, ident_func=None): - if locals is None: - self.locals = [] - elif isinstance(locals, Local): - self.locals = [locals] - else: - self.locals = list(locals) - if ident_func is not None: - self.ident_func = ident_func - for local in self.locals: - object.__setattr__(local, '__ident_func__', ident_func) - else: - self.ident_func = get_ident - - def get_ident(self): - """Return the context identifier the local objects use internally for - this context. You cannot override this method to change the behavior - but use it to link other context local objects (such as SQLAlchemy's - scoped sessions) to the Werkzeug locals. - - .. versionchanged:: 0.7 - Yu can pass a different ident function to the local manager that - will then be propagated to all the locals passed to the - constructor. - """ - return self.ident_func() - - def cleanup(self): - """Manually clean up the data in the locals for this context. Call - this at the end of the request or use `make_middleware()`. - """ - for local in self.locals: - release_local(local) - - def make_middleware(self, app): - """Wrap a WSGI application so that cleaning up happens after - request end. - """ - def application(environ, start_response): - return ClosingIterator(app(environ, start_response), self.cleanup) - return application - - def middleware(self, func): - """Like `make_middleware` but for decorating functions. - - Example usage:: - - @manager.middleware - def application(environ, start_response): - ... - - The difference to `make_middleware` is that the function passed - will have all the arguments copied from the inner application - (name, docstring, module). - """ - return _patch_wrapper(func, self.make_middleware(func)) - - def __repr__(self): - return '<%s storages: %d>' % ( - self.__class__.__name__, - len(self.locals) - ) - - -class LocalProxy(object): - """Acts as a proxy for a werkzeug local. Forwards all operations to - a proxied object. The only operations not supported for forwarding - are right handed operands and any kind of assignment. - - Example usage:: - - from werkzeug.local import Local - l = Local() - - # these are proxies - request = l('request') - user = l('user') - - - from werkzeug.local import LocalStack - _response_local = LocalStack() - - # this is a proxy - response = _response_local() - - Whenever something is bound to l.user / l.request the proxy objects - will forward all operations. If no object is bound a :exc:`RuntimeError` - will be raised. - - To create proxies to :class:`Local` or :class:`LocalStack` objects, - call the object as shown above. If you want to have a proxy to an - object looked up by a function, you can (as of Werkzeug 0.6.1) pass - a function to the :class:`LocalProxy` constructor:: - - session = LocalProxy(lambda: get_current_request().session) - - .. versionchanged:: 0.6.1 - The class can be instanciated with a callable as well now. - """ - __slots__ = ('__local', '__dict__', '__name__') - - def __init__(self, local, name=None): - object.__setattr__(self, '_LocalProxy__local', local) - object.__setattr__(self, '__name__', name) - - def _get_current_object(self): - """Return the current object. This is useful if you want the real - object behind the proxy at a time for performance reasons or because - you want to pass the object into a different context. - """ - if not hasattr(self.__local, '__release_local__'): - return self.__local() - try: - return getattr(self.__local, self.__name__) - except AttributeError: - raise RuntimeError('no object bound to %s' % self.__name__) - - @property - def __dict__(self): - try: - return self._get_current_object().__dict__ - except RuntimeError: - raise AttributeError('__dict__') - - def __repr__(self): - try: - obj = self._get_current_object() - except RuntimeError: - return '<%s unbound>' % self.__class__.__name__ - return repr(obj) - - def __nonzero__(self): - try: - return bool(self._get_current_object()) - except RuntimeError: - return False - - def __unicode__(self): - try: - return unicode(self._get_current_object()) - except RuntimeError: - return repr(self) - - def __dir__(self): - try: - return dir(self._get_current_object()) - except RuntimeError: - return [] - - def __getattr__(self, name): - if name == '__members__': - return dir(self._get_current_object()) - return getattr(self._get_current_object(), name) - - def __setitem__(self, key, value): - self._get_current_object()[key] = value - - def __delitem__(self, key): - del self._get_current_object()[key] - - def __setslice__(self, i, j, seq): - self._get_current_object()[i:j] = seq - - def __delslice__(self, i, j): - del self._get_current_object()[i:j] - - __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) - __delattr__ = lambda x, n: delattr(x._get_current_object(), n) - __str__ = lambda x: str(x._get_current_object()) - __lt__ = lambda x, o: x._get_current_object() < o - __le__ = lambda x, o: x._get_current_object() <= o - __eq__ = lambda x, o: x._get_current_object() == o - __ne__ = lambda x, o: x._get_current_object() != o - __gt__ = lambda x, o: x._get_current_object() > o - __ge__ = lambda x, o: x._get_current_object() >= o - __cmp__ = lambda x, o: cmp(x._get_current_object(), o) - __hash__ = lambda x: hash(x._get_current_object()) - __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) - __len__ = lambda x: len(x._get_current_object()) - __getitem__ = lambda x, i: x._get_current_object()[i] - __iter__ = lambda x: iter(x._get_current_object()) - __contains__ = lambda x, i: i in x._get_current_object() - __getslice__ = lambda x, i, j: x._get_current_object()[i:j] - __add__ = lambda x, o: x._get_current_object() + o - __sub__ = lambda x, o: x._get_current_object() - o - __mul__ = lambda x, o: x._get_current_object() * o - __floordiv__ = lambda x, o: x._get_current_object() // o - __mod__ = lambda x, o: x._get_current_object() % o - __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) - __pow__ = lambda x, o: x._get_current_object() ** o - __lshift__ = lambda x, o: x._get_current_object() << o - __rshift__ = lambda x, o: x._get_current_object() >> o - __and__ = lambda x, o: x._get_current_object() & o - __xor__ = lambda x, o: x._get_current_object() ^ o - __or__ = lambda x, o: x._get_current_object() | o - __div__ = lambda x, o: x._get_current_object().__div__(o) - __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) - __neg__ = lambda x: -(x._get_current_object()) - __pos__ = lambda x: +(x._get_current_object()) - __abs__ = lambda x: abs(x._get_current_object()) - __invert__ = lambda x: ~(x._get_current_object()) - __complex__ = lambda x: complex(x._get_current_object()) - __int__ = lambda x: int(x._get_current_object()) - __long__ = lambda x: long(x._get_current_object()) - __float__ = lambda x: float(x._get_current_object()) - __oct__ = lambda x: oct(x._get_current_object()) - __hex__ = lambda x: hex(x._get_current_object()) - __index__ = lambda x: x._get_current_object().__index__() - __coerce__ = lambda x, o: x.__coerce__(x, o) - __enter__ = lambda x: x.__enter__() - __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw) diff --git a/src/lib/werkzeug/routing.py b/src/lib/werkzeug/routing.py deleted file mode 100644 index 7b11b14..0000000 --- a/src/lib/werkzeug/routing.py +++ /dev/null @@ -1,1622 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.routing - ~~~~~~~~~~~~~~~~ - - When it comes to combining multiple controller or view functions (however - you want to call them) you need a dispatcher. A simple way would be - applying regular expression tests on the ``PATH_INFO`` and calling - registered callback functions that return the value then. - - This module implements a much more powerful system than simple regular - expression matching because it can also convert values in the URLs and - build URLs. - - Here a simple example that creates an URL map for an application with - two subdomains (www and kb) and some URL rules: - - >>> m = Map([ - ... # Static URLs - ... Rule('/', endpoint='static/index'), - ... Rule('/about', endpoint='static/about'), - ... Rule('/help', endpoint='static/help'), - ... # Knowledge Base - ... Subdomain('kb', [ - ... Rule('/', endpoint='kb/index'), - ... Rule('/browse/', endpoint='kb/browse'), - ... Rule('/browse//', endpoint='kb/browse'), - ... Rule('/browse//', endpoint='kb/browse') - ... ]) - ... ], default_subdomain='www') - - If the application doesn't use subdomains it's perfectly fine to not set - the default subdomain and not use the `Subdomain` rule factory. The endpoint - in the rules can be anything, for example import paths or unique - identifiers. The WSGI application can use those endpoints to get the - handler for that URL. It doesn't have to be a string at all but it's - recommended. - - Now it's possible to create a URL adapter for one of the subdomains and - build URLs: - - >>> c = m.bind('example.com') - >>> c.build("kb/browse", dict(id=42)) - 'http://kb.example.com/browse/42/' - >>> c.build("kb/browse", dict()) - 'http://kb.example.com/browse/' - >>> c.build("kb/browse", dict(id=42, page=3)) - 'http://kb.example.com/browse/42/3' - >>> c.build("static/about") - '/about' - >>> c.build("static/index", force_external=True) - 'http://www.example.com/' - - >>> c = m.bind('example.com', subdomain='kb') - >>> c.build("static/about") - 'http://www.example.com/about' - - The first argument to bind is the server name *without* the subdomain. - Per default it will assume that the script is mounted on the root, but - often that's not the case so you can provide the real mount point as - second argument: - - >>> c = m.bind('example.com', '/applications/example') - - The third argument can be the subdomain, if not given the default - subdomain is used. For more details about binding have a look at the - documentation of the `MapAdapter`. - - And here is how you can match URLs: - - >>> c = m.bind('example.com') - >>> c.match("/") - ('static/index', {}) - >>> c.match("/about") - ('static/about', {}) - >>> c = m.bind('example.com', '/', 'kb') - >>> c.match("/") - ('kb/index', {}) - >>> c.match("/browse/42/23") - ('kb/browse', {'id': 42, 'page': 23}) - - If matching fails you get a `NotFound` exception, if the rule thinks - it's a good idea to redirect (for example because the URL was defined - to have a slash at the end but the request was missing that slash) it - will raise a `RequestRedirect` exception. Both are subclasses of the - `HTTPException` so you can use those errors as responses in the - application. - - If matching succeeded but the URL rule was incompatible to the given - method (for example there were only rules for `GET` and `HEAD` and - routing system tried to match a `POST` request) a `MethodNotAllowed` - method is raised. - - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -import posixpath -from pprint import pformat -from urlparse import urljoin - -from werkzeug.urls import url_encode, url_decode, url_quote -from werkzeug.utils import redirect, format_string -from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed -from werkzeug._internal import _get_environ -from werkzeug.datastructures import ImmutableDict, MultiDict - - -_rule_re = re.compile(r''' - (?P[^<]*) # static rule data - < - (?: - (?P[a-zA-Z_][a-zA-Z0-9_]*) # converter name - (?:\((?P.*?)\))? # converter arguments - \: # variable delimiter - )? - (?P[a-zA-Z_][a-zA-Z0-9_]*) # variable name - > -''', re.VERBOSE) -_simple_rule_re = re.compile(r'<([^>]+)>') -_converter_args_re = re.compile(r''' - ((?P\w+)\s*=\s*)? - (?P - True|False| - \d+.\d+| - \d+.| - \d+| - \w+| - [urUR]?(?P"[^"]*?"|'[^']*') - )\s*, -''', re.VERBOSE|re.UNICODE) - - -_PYTHON_CONSTANTS = { - 'None': None, - 'True': True, - 'False': False -} - - -def _pythonize(value): - if value in _PYTHON_CONSTANTS: - return _PYTHON_CONSTANTS[value] - for convert in int, float: - try: - return convert(value) - except ValueError: - pass - if value[:1] == value[-1:] and value[0] in '"\'': - value = value[1:-1] - return unicode(value) - - -def parse_converter_args(argstr): - argstr += ',' - args = [] - kwargs = {} - - for item in _converter_args_re.finditer(argstr): - value = item.group('stringval') - if value is None: - value = item.group('value') - value = _pythonize(value) - if not item.group('name'): - args.append(value) - else: - name = item.group('name') - kwargs[name] = value - - return tuple(args), kwargs - - -def parse_rule(rule): - """Parse a rule and return it as generator. Each iteration yields tuples - in the form ``(converter, arguments, variable)``. If the converter is - `None` it's a static url part, otherwise it's a dynamic one. - - :internal: - """ - pos = 0 - end = len(rule) - do_match = _rule_re.match - used_names = set() - while pos < end: - m = do_match(rule, pos) - if m is None: - break - data = m.groupdict() - if data['static']: - yield None, None, data['static'] - variable = data['variable'] - converter = data['converter'] or 'default' - if variable in used_names: - raise ValueError('variable name %r used twice.' % variable) - used_names.add(variable) - yield converter, data['args'] or None, variable - pos = m.end() - if pos < end: - remaining = rule[pos:] - if '>' in remaining or '<' in remaining: - raise ValueError('malformed url rule: %r' % rule) - yield None, None, remaining - - -def get_converter(map, name, args): - """Create a new converter for the given arguments or raise - exception if the converter does not exist. - - :internal: - """ - if not name in map.converters: - raise LookupError('the converter %r does not exist' % name) - if args: - args, kwargs = parse_converter_args(args) - else: - args = () - kwargs = {} - return map.converters[name](map, *args, **kwargs) - - -class RoutingException(Exception): - """Special exceptions that require the application to redirect, notifying - about missing urls, etc. - - :internal: - """ - - -class RequestRedirect(HTTPException, RoutingException): - """Raise if the map requests a redirect. This is for example the case if - `strict_slashes` are activated and an url that requires a trailing slash. - - The attribute `new_url` contains the absolute destination url. - """ - code = 301 - - def __init__(self, new_url): - RoutingException.__init__(self, new_url) - self.new_url = new_url - - def get_response(self, environ): - return redirect(self.new_url, self.code) - - -class RequestSlash(RoutingException): - """Internal exception.""" - - -class RequestAliasRedirect(RoutingException): - """This rule is an alias and wants to redirect to the canonical URL.""" - - def __init__(self, matched_values): - self.matched_values = matched_values - - -class BuildError(RoutingException, LookupError): - """Raised if the build system cannot find a URL for an endpoint with the - values provided. - """ - - def __init__(self, endpoint, values, method): - LookupError.__init__(self, endpoint, values, method) - self.endpoint = endpoint - self.values = values - self.method = method - - -class ValidationError(ValueError): - """Validation error. If a rule converter raises this exception the rule - does not match the current URL and the next URL is tried. - """ - - -class RuleFactory(object): - """As soon as you have more complex URL setups it's a good idea to use rule - factories to avoid repetitive tasks. Some of them are builtin, others can - be added by subclassing `RuleFactory` and overriding `get_rules`. - """ - - def get_rules(self, map): - """Subclasses of `RuleFactory` have to override this method and return - an iterable of rules.""" - raise NotImplementedError() - - -class Subdomain(RuleFactory): - """All URLs provided by this factory have the subdomain set to a - specific domain. For example if you want to use the subdomain for - the current language this can be a good setup:: - - url_map = Map([ - Rule('/', endpoint='#select_language'), - Subdomain('', [ - Rule('/', endpoint='index'), - Rule('/about', endpoint='about'), - Rule('/help', endpoint='help') - ]) - ]) - - All the rules except for the ``'#select_language'`` endpoint will now - listen on a two letter long subdomain that holds the language code - for the current request. - """ - - def __init__(self, subdomain, rules): - self.subdomain = subdomain - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.subdomain = self.subdomain - yield rule - - -class Submount(RuleFactory): - """Like `Subdomain` but prefixes the URL rule with a given string:: - - url_map = Map([ - Rule('/', endpoint='index'), - Submount('/blog', [ - Rule('/', endpoint='blog/index'), - Rule('/entry/', endpoint='blog/show') - ]) - ]) - - Now the rule ``'blog/show'`` matches ``/blog/entry/``. - """ - - def __init__(self, path, rules): - self.path = path.rstrip('/') - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.rule = self.path + rule.rule - yield rule - - -class EndpointPrefix(RuleFactory): - """Prefixes all endpoints (which must be strings for this factory) with - another string. This can be useful for sub applications:: - - url_map = Map([ - Rule('/', endpoint='index'), - EndpointPrefix('blog/', [Submount('/blog', [ - Rule('/', endpoint='index'), - Rule('/entry/', endpoint='show') - ])]) - ]) - """ - - def __init__(self, prefix, rules): - self.prefix = prefix - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.endpoint = self.prefix + rule.endpoint - yield rule - - -class RuleTemplate(object): - """Returns copies of the rules wrapped and expands string templates in - the endpoint, rule, defaults or subdomain sections. - - Here a small example for such a rule template:: - - from werkzeug.routing import Map, Rule, RuleTemplate - - resource = RuleTemplate([ - Rule('/$name/', endpoint='$name.list'), - Rule('/$name/', endpoint='$name.show') - ]) - - url_map = Map([resource(name='user'), resource(name='page')]) - - When a rule template is called the keyword arguments are used to - replace the placeholders in all the string parameters. - """ - - def __init__(self, rules): - self.rules = list(rules) - - def __call__(self, *args, **kwargs): - return RuleTemplateFactory(self.rules, dict(*args, **kwargs)) - - -class RuleTemplateFactory(RuleFactory): - """A factory that fills in template variables into rules. Used by - `RuleTemplate` internally. - - :internal: - """ - - def __init__(self, rules, context): - self.rules = rules - self.context = context - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - new_defaults = subdomain = None - if rule.defaults: - new_defaults = {} - for key, value in rule.defaults.iteritems(): - if isinstance(value, basestring): - value = format_string(value, self.context) - new_defaults[key] = value - if rule.subdomain is not None: - subdomain = format_string(rule.subdomain, self.context) - new_endpoint = rule.endpoint - if isinstance(new_endpoint, basestring): - new_endpoint = format_string(new_endpoint, self.context) - yield Rule( - format_string(rule.rule, self.context), - new_defaults, - subdomain, - rule.methods, - rule.build_only, - new_endpoint, - rule.strict_slashes - ) - - -class Rule(RuleFactory): - """A Rule represents one URL pattern. There are some options for `Rule` - that change the way it behaves and are passed to the `Rule` constructor. - Note that besides the rule-string all arguments *must* be keyword arguments - in order to not break the application on Werkzeug upgrades. - - `string` - Rule strings basically are just normal URL paths with placeholders in - the format ```` where the converter and the - arguments are optional. If no converter is defined the `default` - converter is used which means `string` in the normal configuration. - - URL rules that end with a slash are branch URLs, others are leaves. - If you have `strict_slashes` enabled (which is the default), all - branch URLs that are matched without a trailing slash will trigger a - redirect to the same URL with the missing slash appended. - - The converters are defined on the `Map`. - - `endpoint` - The endpoint for this rule. This can be anything. A reference to a - function, a string, a number etc. The preferred way is using a string - because the endpoint is used for URL generation. - - `defaults` - An optional dict with defaults for other rules with the same endpoint. - This is a bit tricky but useful if you want to have unique URLs:: - - url_map = Map([ - Rule('/all/', defaults={'page': 1}, endpoint='all_entries'), - Rule('/all/page/', endpoint='all_entries') - ]) - - If a user now visits ``http://example.com/all/page/1`` he will be - redirected to ``http://example.com/all/``. If `redirect_defaults` is - disabled on the `Map` instance this will only affect the URL - generation. - - `subdomain` - The subdomain rule string for this rule. If not specified the rule - only matches for the `default_subdomain` of the map. If the map is - not bound to a subdomain this feature is disabled. - - Can be useful if you want to have user profiles on different subdomains - and all subdomains are forwarded to your application:: - - url_map = Map([ - Rule('/', subdomain='', endpoint='user/homepage'), - Rule('/stats', subdomain='', endpoint='user/stats') - ]) - - `methods` - A sequence of http methods this rule applies to. If not specified, all - methods are allowed. For example this can be useful if you want different - endpoints for `POST` and `GET`. If methods are defined and the path - matches but the method matched against is not in this list or in the - list of another rule for that path the error raised is of the type - `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the - list of methods and `HEAD` is not, `HEAD` is added automatically. - - .. versionchanged:: 0.6.1 - `HEAD` is now automatically added to the methods if `GET` is - present. The reason for this is that existing code often did not - work properly in servers not rewriting `HEAD` to `GET` - automatically and it was not documented how `HEAD` should be - treated. This was considered a bug in Werkzeug because of that. - - `strict_slashes` - Override the `Map` setting for `strict_slashes` only for this rule. If - not specified the `Map` setting is used. - - `build_only` - Set this to True and the rule will never match but will create a URL - that can be build. This is useful if you have resources on a subdomain - or folder that are not handled by the WSGI application (like static data) - - `redirect_to` - If given this must be either a string or callable. In case of a - callable it's called with the url adapter that triggered the match and - the values of the URL as keyword arguments and has to return the target - for the redirect, otherwise it has to be a string with placeholders in - rule syntax:: - - def foo_with_slug(adapter, id): - # ask the database for the slug for the old id. this of - # course has nothing to do with werkzeug. - return 'foo/' + Foo.get_slug_for_id(id) - - url_map = Map([ - Rule('/foo/', endpoint='foo'), - Rule('/some/old/url/', redirect_to='foo/'), - Rule('/other/old/url/', redirect_to=foo_with_slug) - ]) - - When the rule is matched the routing system will raise a - `RequestRedirect` exception with the target for the redirect. - - Keep in mind that the URL will be joined against the URL root of the - script so don't use a leading slash on the target URL unless you - really mean root of that domain. - - `alias` - If enabled this rule serves as an alias for another rule with the same - endpoint and arguments. - - `host` - If provided and the URL map has host matching enabled this can be - used to provide a match rule for the whole host. This also means - that the subdomain feature is disabled. - - .. versionadded:: 0.7 - The `alias` and `host` parameters were added. - """ - - def __init__(self, string, defaults=None, subdomain=None, methods=None, - build_only=False, endpoint=None, strict_slashes=None, - redirect_to=None, alias=False, host=None): - if not string.startswith('/'): - raise ValueError('urls must start with a leading slash') - self.rule = string - self.is_leaf = not string.endswith('/') - - self.map = None - self.strict_slashes = strict_slashes - self.subdomain = subdomain - self.host = host - self.defaults = defaults - self.build_only = build_only - self.alias = alias - if methods is None: - self.methods = None - else: - self.methods = set([x.upper() for x in methods]) - if 'HEAD' not in self.methods and 'GET' in self.methods: - self.methods.add('HEAD') - self.endpoint = endpoint - self.redirect_to = redirect_to - - if defaults: - self.arguments = set(map(str, defaults)) - else: - self.arguments = set() - self._trace = self._converters = self._regex = self._weights = None - - def empty(self): - """Return an unbound copy of this rule. This can be useful if you - want to reuse an already bound URL for another map.""" - defaults = None - if self.defaults: - defaults = dict(self.defaults) - return Rule(self.rule, defaults, self.subdomain, self.methods, - self.build_only, self.endpoint, self.strict_slashes, - self.redirect_to, self.alias, self.host) - - def get_rules(self, map): - yield self - - def refresh(self): - """Rebinds and refreshes the URL. Call this if you modified the - rule in place. - - :internal: - """ - self.bind(self.map, rebind=True) - - def bind(self, map, rebind=False): - """Bind the url to a map and create a regular expression based on - the information from the rule itself and the defaults from the map. - - :internal: - """ - if self.map is not None and not rebind: - raise RuntimeError('url rule %r already bound to map %r' % - (self, self.map)) - self.map = map - if self.strict_slashes is None: - self.strict_slashes = map.strict_slashes - if self.subdomain is None: - self.subdomain = map.default_subdomain - self.compile() - - def compile(self): - """Compiles the regular expression and stores it.""" - assert self.map is not None, 'rule not bound' - - if self.map.host_matching: - domain_rule = self.host or '' - else: - domain_rule = self.subdomain or '' - - self._trace = [] - self._converters = {} - self._weights = [] - regex_parts = [] - - def _build_regex(rule): - for converter, arguments, variable in parse_rule(rule): - if converter is None: - regex_parts.append(re.escape(variable)) - self._trace.append((False, variable)) - for part in variable.split('/'): - if part: - self._weights.append((0, -len(part))) - else: - convobj = get_converter(self.map, converter, arguments) - regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex)) - self._converters[variable] = convobj - self._trace.append((True, variable)) - self._weights.append((1, convobj.weight)) - self.arguments.add(str(variable)) - - _build_regex(domain_rule) - regex_parts.append('\\|') - self._trace.append((False, '|')) - _build_regex(self.is_leaf and self.rule or self.rule.rstrip('/')) - if not self.is_leaf: - self._trace.append((False, '/')) - - if self.build_only: - return - regex = r'^%s%s$' % ( - u''.join(regex_parts), - (not self.is_leaf or not self.strict_slashes) and \ - '(?/?)' or '' - ) - self._regex = re.compile(regex, re.UNICODE) - - def match(self, path): - """Check if the rule matches a given path. Path is a string in the - form ``"subdomain|/path(method)"`` and is assembled by the map. If - the map is doing host matching the subdomain part will be the host - instead. - - If the rule matches a dict with the converted values is returned, - otherwise the return value is `None`. - - :internal: - """ - if not self.build_only: - m = self._regex.search(path) - if m is not None: - groups = m.groupdict() - # we have a folder like part of the url without a trailing - # slash and strict slashes enabled. raise an exception that - # tells the map to redirect to the same url but with a - # trailing slash - if self.strict_slashes and not self.is_leaf and \ - not groups.pop('__suffix__'): - raise RequestSlash() - # if we are not in strict slashes mode we have to remove - # a __suffix__ - elif not self.strict_slashes: - del groups['__suffix__'] - - result = {} - for name, value in groups.iteritems(): - try: - value = self._converters[name].to_python(value) - except ValidationError: - return - result[str(name)] = value - if self.defaults: - result.update(self.defaults) - - if self.alias and self.map.redirect_defaults: - raise RequestAliasRedirect(result) - - return result - - def build(self, values, append_unknown=True): - """Assembles the relative url for that rule and the subdomain. - If building doesn't work for some reasons `None` is returned. - - :internal: - """ - tmp = [] - add = tmp.append - processed = set(self.arguments) - for is_dynamic, data in self._trace: - if is_dynamic: - try: - add(self._converters[data].to_url(values[data])) - except ValidationError: - return - processed.add(data) - else: - add(url_quote(data, self.map.charset, safe='/:|')) - domain_part, url = (u''.join(tmp)).split('|', 1) - - if append_unknown: - query_vars = MultiDict(values) - for key in processed: - if key in query_vars: - del query_vars[key] - - if query_vars: - url += '?' + url_encode(query_vars, self.map.charset, - sort=self.map.sort_parameters, - key=self.map.sort_key) - - return domain_part, url - - def provides_defaults_for(self, rule): - """Check if this rule has defaults for a given rule. - - :internal: - """ - return not self.build_only and self.defaults and \ - self.endpoint == rule.endpoint and self != rule and \ - self.arguments == rule.arguments - - def suitable_for(self, values, method=None): - """Check if the dict of values has enough data for url generation. - - :internal: - """ - # if a method was given explicitly and that method is not supported - # by this rule, this rule is not suitable. - if method is not None and self.methods is not None \ - and method not in self.methods: - return False - - defaults = self.defaults or () - - # all arguments required must be either in the defaults dict or - # the value dictionary otherwise it's not suitable - for key in self.arguments: - if key not in defaults and key not in values: - return False - - # in case defaults are given we ensure taht either the value was - # skipped or the value is the same as the default value. - if defaults: - for key, value in defaults.iteritems(): - if key in values and value != values[key]: - return False - - return True - - def match_compare_key(self): - """The match compare key for sorting. - - Current implementation: - - 1. rules without any arguments come first for performance - reasons only as we expect them to match faster and some - common ones usually don't have any arguments (index pages etc.) - 2. The more complex rules come first so the second argument is the - negative length of the number of weights. - 3. lastly we order by the actual weights. - - :internal: - """ - return bool(self.arguments), -len(self._weights), self._weights - - def build_compare_key(self): - """The build compare key for sorting. - - :internal: - """ - return self.alias and 1 or 0, -len(self.arguments), \ - -len(self.defaults or ()) - - def __eq__(self, other): - return self.__class__ is other.__class__ and \ - self._trace == other._trace - - def __ne__(self, other): - return not self.__eq__(other) - - def __unicode__(self): - return self.rule - - def __str__(self): - charset = self.map is not None and self.map.charset or 'utf-8' - return unicode(self).encode(charset) - - def __repr__(self): - if self.map is None: - return '<%s (unbound)>' % self.__class__.__name__ - charset = self.map is not None and self.map.charset or 'utf-8' - tmp = [] - for is_dynamic, data in self._trace: - if is_dynamic: - tmp.append('<%s>' % data) - else: - tmp.append(data) - return '<%s %r%s -> %s>' % ( - self.__class__.__name__, - (u''.join(tmp).encode(charset)).lstrip('|'), - self.methods is not None and ' (%s)' % \ - ', '.join(self.methods) or '', - self.endpoint - ) - - -class BaseConverter(object): - """Base class for all converters.""" - regex = '[^/]+' - weight = 100 - - def __init__(self, map): - self.map = map - - def to_python(self, value): - return value - - def to_url(self, value): - return url_quote(value, self.map.charset) - - -class UnicodeConverter(BaseConverter): - """This converter is the default converter and accepts any string but - only one path segment. Thus the string can not include a slash. - - This is the default validator. - - Example:: - - Rule('/pages/'), - Rule('/') - - :param map: the :class:`Map`. - :param minlength: the minimum length of the string. Must be greater - or equal 1. - :param maxlength: the maximum length of the string. - :param length: the exact length of the string. - """ - - def __init__(self, map, minlength=1, maxlength=None, length=None): - BaseConverter.__init__(self, map) - if length is not None: - length = '{%d}' % int(length) - else: - if maxlength is None: - maxlength = '' - else: - maxlength = int(maxlength) - length = '{%s,%s}' % ( - int(minlength), - maxlength - ) - self.regex = '[^/]' + length - - -class AnyConverter(BaseConverter): - """Matches one of the items provided. Items can either be Python - identifiers or strings:: - - Rule('/') - - :param map: the :class:`Map`. - :param items: this function accepts the possible items as positional - arguments. - """ - - def __init__(self, map, *items): - BaseConverter.__init__(self, map) - self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items]) - - -class PathConverter(BaseConverter): - """Like the default :class:`UnicodeConverter`, but it also matches - slashes. This is useful for wikis and similar applications:: - - Rule('/') - Rule('//edit') - - :param map: the :class:`Map`. - """ - regex = '[^/].*?' - weight = 200 - - -class NumberConverter(BaseConverter): - """Baseclass for `IntegerConverter` and `FloatConverter`. - - :internal: - """ - weight = 50 - - def __init__(self, map, fixed_digits=0, min=None, max=None): - BaseConverter.__init__(self, map) - self.fixed_digits = fixed_digits - self.min = min - self.max = max - - def to_python(self, value): - if (self.fixed_digits and len(value) != self.fixed_digits): - raise ValidationError() - value = self.num_convert(value) - if (self.min is not None and value < self.min) or \ - (self.max is not None and value > self.max): - raise ValidationError() - return value - - def to_url(self, value): - value = self.num_convert(value) - if self.fixed_digits: - value = ('%%0%sd' % self.fixed_digits) % value - return str(value) - - -class IntegerConverter(NumberConverter): - """This converter only accepts integer values:: - - Rule('/page/') - - This converter does not support negative values. - - :param map: the :class:`Map`. - :param fixed_digits: the number of fixed digits in the URL. If you set - this to ``4`` for example, the application will - only match if the url looks like ``/0001/``. The - default is variable length. - :param min: the minimal value. - :param max: the maximal value. - """ - regex = r'\d+' - num_convert = int - - -class FloatConverter(NumberConverter): - """This converter only accepts floating point values:: - - Rule('/probability/') - - This converter does not support negative values. - - :param map: the :class:`Map`. - :param min: the minimal value. - :param max: the maximal value. - """ - regex = r'\d+\.\d+' - num_convert = float - - def __init__(self, map, min=None, max=None): - NumberConverter.__init__(self, map, 0, min, max) - - -#: the default converter mapping for the map. -DEFAULT_CONVERTERS = { - 'default': UnicodeConverter, - 'string': UnicodeConverter, - 'any': AnyConverter, - 'path': PathConverter, - 'int': IntegerConverter, - 'float': FloatConverter -} - - -class Map(object): - """The map class stores all the URL rules and some configuration - parameters. Some of the configuration values are only stored on the - `Map` instance since those affect all rules, others are just defaults - and can be overridden for each rule. Note that you have to specify all - arguments besides the `rules` as keyword arguments! - - :param rules: sequence of url rules for this map. - :param default_subdomain: The default subdomain for rules without a - subdomain defined. - :param charset: charset of the url. defaults to ``"utf-8"`` - :param strict_slashes: Take care of trailing slashes. - :param redirect_defaults: This will redirect to the default rule if it - wasn't visited that way. This helps creating - unique URLs. - :param converters: A dict of converters that adds additional converters - to the list of converters. If you redefine one - converter this will override the original one. - :param sort_parameters: If set to `True` the url parameters are sorted. - See `url_encode` for more details. - :param sort_key: The sort key function for `url_encode`. - :param encoding_errors: the error method to use for decoding - :param host_matching: if set to `True` it enables the host matching - feature and disables the subdomain one. If - enabled the `host` parameter to rules is used - instead of the `subdomain` one. - - .. versionadded:: 0.5 - `sort_parameters` and `sort_key` was added. - - .. versionadded:: 0.7 - `encoding_errors` and `host_matching` was added. - """ - - #: .. versionadded:: 0.6 - #: a dict of default converters to be used. - default_converters = ImmutableDict(DEFAULT_CONVERTERS) - - def __init__(self, rules=None, default_subdomain='', charset='utf-8', - strict_slashes=True, redirect_defaults=True, - converters=None, sort_parameters=False, sort_key=None, - encoding_errors='replace', host_matching=False): - self._rules = [] - self._rules_by_endpoint = {} - self._remap = True - - self.default_subdomain = default_subdomain - self.charset = charset - self.encoding_errors = encoding_errors - self.strict_slashes = strict_slashes - self.redirect_defaults = redirect_defaults - self.host_matching = host_matching - - self.converters = self.default_converters.copy() - if converters: - self.converters.update(converters) - - self.sort_parameters = sort_parameters - self.sort_key = sort_key - - for rulefactory in rules or (): - self.add(rulefactory) - - def is_endpoint_expecting(self, endpoint, *arguments): - """Iterate over all rules and check if the endpoint expects - the arguments provided. This is for example useful if you have - some URLs that expect a language code and others that do not and - you want to wrap the builder a bit so that the current language - code is automatically added if not provided but endpoints expect - it. - - :param endpoint: the endpoint to check. - :param arguments: this function accepts one or more arguments - as positional arguments. Each one of them is - checked. - """ - self.update() - arguments = set(arguments) - for rule in self._rules_by_endpoint[endpoint]: - if arguments.issubset(rule.arguments): - return True - return False - - def iter_rules(self, endpoint=None): - """Iterate over all rules or the rules of an endpoint. - - :param endpoint: if provided only the rules for that endpoint - are returned. - :return: an iterator - """ - self.update() - if endpoint is not None: - return iter(self._rules_by_endpoint[endpoint]) - return iter(self._rules) - - def add(self, rulefactory): - """Add a new rule or factory to the map and bind it. Requires that the - rule is not bound to another map. - - :param rulefactory: a :class:`Rule` or :class:`RuleFactory` - """ - for rule in rulefactory.get_rules(self): - rule.bind(self) - self._rules.append(rule) - self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule) - self._remap = True - - def bind(self, server_name, script_name=None, subdomain=None, - url_scheme='http', default_method='GET', path_info=None, - query_args=None): - """Return a new :class:`MapAdapter` with the details specified to the - call. Note that `script_name` will default to ``'/'`` if not further - specified or `None`. The `server_name` at least is a requirement - because the HTTP RFC requires absolute URLs for redirects and so all - redirect exceptions raised by Werkzeug will contain the full canonical - URL. - - If no path_info is passed to :meth:`match` it will use the default path - info passed to bind. While this doesn't really make sense for - manual bind calls, it's useful if you bind a map to a WSGI - environment which already contains the path info. - - `subdomain` will default to the `default_subdomain` for this map if - no defined. If there is no `default_subdomain` you cannot use the - subdomain feature. - - .. versionadded:: 0.7 - `query_args` added - - .. versionadded:: 0.8 - `query_args` can now also be a string. - """ - server_name = server_name.lower() - if self.host_matching: - if subdomain is not None: - raise RuntimeError('host matching enabled and a ' - 'subdomain was provided') - elif subdomain is None: - subdomain = self.default_subdomain - if script_name is None: - script_name = '/' - if isinstance(server_name, unicode): - server_name = server_name.encode('idna') - return MapAdapter(self, server_name, script_name, subdomain, - url_scheme, path_info, default_method, query_args) - - def bind_to_environ(self, environ, server_name=None, subdomain=None): - """Like :meth:`bind` but you can pass it an WSGI environment and it - will fetch the information from that dictionary. Note that because of - limitations in the protocol there is no way to get the current - subdomain and real `server_name` from the environment. If you don't - provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or - `HTTP_HOST` if provided) as used `server_name` with disabled subdomain - feature. - - If `subdomain` is `None` but an environment and a server name is - provided it will calculate the current subdomain automatically. - Example: `server_name` is ``'example.com'`` and the `SERVER_NAME` - in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated - subdomain will be ``'staging.dev'``. - - If the object passed as environ has an environ attribute, the value of - this attribute is used instead. This allows you to pass request - objects. Additionally `PATH_INFO` added as a default of the - :class:`MapAdapter` so that you don't have to pass the path info to - the match method. - - .. versionchanged:: 0.5 - previously this method accepted a bogus `calculate_subdomain` - parameter that did not have any effect. It was removed because - of that. - - .. versionchanged:: 0.8 - This will no longer raise a ValueError when an unexpected server - name was passed. - - :param environ: a WSGI environment. - :param server_name: an optional server name hint (see above). - :param subdomain: optionally the current subdomain (see above). - """ - environ = _get_environ(environ) - if server_name is None: - if 'HTTP_HOST' in environ: - server_name = environ['HTTP_HOST'] - else: - server_name = environ['SERVER_NAME'] - if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ - in (('https', '443'), ('http', '80')): - server_name += ':' + environ['SERVER_PORT'] - elif subdomain is None and not self.host_matching: - server_name = server_name.lower() - if 'HTTP_HOST' in environ: - wsgi_server_name = environ.get('HTTP_HOST') - else: - wsgi_server_name = environ.get('SERVER_NAME') - if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ - in (('https', '443'), ('http', '80')): - wsgi_server_name += ':' + environ['SERVER_PORT'] - wsgi_server_name = wsgi_server_name.lower() - cur_server_name = wsgi_server_name.split('.') - real_server_name = server_name.split('.') - offset = -len(real_server_name) - if cur_server_name[offset:] != real_server_name: - # This can happen even with valid configs if the server was - # accesssed directly by IP address under some situations. - # Instead of raising an exception like in Werkzeug 0.7 or - # earlier we go by an invalid subdomain which will result - # in a 404 error on matching. - subdomain = '' - else: - subdomain = '.'.join(filter(None, cur_server_name[:offset])) - return Map.bind(self, server_name, environ.get('SCRIPT_NAME'), - subdomain, environ['wsgi.url_scheme'], - environ['REQUEST_METHOD'], environ.get('PATH_INFO'), - query_args=environ.get('QUERY_STRING', '')) - - def update(self): - """Called before matching and building to keep the compiled rules - in the correct order after things changed. - """ - if self._remap: - self._rules.sort(key=lambda x: x.match_compare_key()) - for rules in self._rules_by_endpoint.itervalues(): - rules.sort(key=lambda x: x.build_compare_key()) - self._remap = False - - def __repr__(self): - rules = self.iter_rules() - return '%s(%s)' % (self.__class__.__name__, pformat(list(rules))) - - -class MapAdapter(object): - """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does - the URL matching and building based on runtime information. - """ - - def __init__(self, map, server_name, script_name, subdomain, - url_scheme, path_info, default_method, query_args=None): - self.map = map - self.server_name = server_name - if not script_name.endswith('/'): - script_name += '/' - self.script_name = script_name - self.subdomain = subdomain - self.url_scheme = url_scheme - self.path_info = path_info or u'' - self.default_method = default_method - self.query_args = query_args - - def dispatch(self, view_func, path_info=None, method=None, - catch_http_exceptions=False): - """Does the complete dispatching process. `view_func` is called with - the endpoint and a dict with the values for the view. It should - look up the view function, call it, and return a response object - or WSGI application. http exceptions are not caught by default - so that applications can display nicer error messages by just - catching them by hand. If you want to stick with the default - error messages you can pass it ``catch_http_exceptions=True`` and - it will catch the http exceptions. - - Here a small example for the dispatch usage:: - - from werkzeug.wrappers import Request, Response - from werkzeug.wsgi import responder - from werkzeug.routing import Map, Rule - - def on_index(request): - return Response('Hello from the index') - - url_map = Map([Rule('/', endpoint='index')]) - views = {'index': on_index} - - @responder - def application(environ, start_response): - request = Request(environ) - urls = url_map.bind_to_environ(environ) - return urls.dispatch(lambda e, v: views[e](request, **v), - catch_http_exceptions=True) - - Keep in mind that this method might return exception objects, too, so - use :class:`Response.force_type` to get a response object. - - :param view_func: a function that is called with the endpoint as - first argument and the value dict as second. Has - to dispatch to the actual view function with this - information. (see above) - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - :param catch_http_exceptions: set to `True` to catch any of the - werkzeug :class:`HTTPException`\s. - """ - try: - try: - endpoint, args = self.match(path_info, method) - except RequestRedirect, e: - return e - return view_func(endpoint, args) - except HTTPException, e: - if catch_http_exceptions: - return e - raise - - def match(self, path_info=None, method=None, return_rule=False, - query_args=None): - """The usage is simple: you just pass the match method the current - path info as well as the method (which defaults to `GET`). The - following things can then happen: - - - you receive a `NotFound` exception that indicates that no URL is - matching. A `NotFound` exception is also a WSGI application you - can call to get a default page not found page (happens to be the - same object as `werkzeug.exceptions.NotFound`) - - - you receive a `MethodNotAllowed` exception that indicates that there - is a match for this URL but not for the current request method. - This is useful for RESTful applications. - - - you receive a `RequestRedirect` exception with a `new_url` - attribute. This exception is used to notify you about a request - Werkzeug requests from your WSGI application. This is for example the - case if you request ``/foo`` although the correct URL is ``/foo/`` - You can use the `RequestRedirect` instance as response-like object - similar to all other subclasses of `HTTPException`. - - - you get a tuple in the form ``(endpoint, arguments)`` if there is - a match (unless `return_rule` is True, in which case you get a tuple - in the form ``(rule, arguments)``) - - If the path info is not passed to the match method the default path - info of the map is used (defaults to the root URL if not defined - explicitly). - - All of the exceptions raised are subclasses of `HTTPException` so they - can be used as WSGI responses. The will all render generic error or - redirect pages. - - Here is a small example for matching: - - >>> m = Map([ - ... Rule('/', endpoint='index'), - ... Rule('/downloads/', endpoint='downloads/index'), - ... Rule('/downloads/', endpoint='downloads/show') - ... ]) - >>> urls = m.bind("example.com", "/") - >>> urls.match("/", "GET") - ('index', {}) - >>> urls.match("/downloads/42") - ('downloads/show', {'id': 42}) - - And here is what happens on redirect and missing URLs: - - >>> urls.match("/downloads") - Traceback (most recent call last): - ... - RequestRedirect: http://example.com/downloads/ - >>> urls.match("/missing") - Traceback (most recent call last): - ... - NotFound: 404 Not Found - - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - :param return_rule: return the rule that matched instead of just the - endpoint (defaults to `False`). - :param query_args: optional query arguments that are used for - automatic redirects as string or dictionary. It's - currently not possible to use the query arguments - for URL matching. - - .. versionadded:: 0.6 - `return_rule` was added. - - .. versionadded:: 0.7 - `query_args` was added. - - .. versionchanged:: 0.8 - `query_args` can now also be a string. - """ - self.map.update() - if path_info is None: - path_info = self.path_info - if not isinstance(path_info, unicode): - path_info = path_info.decode(self.map.charset, - self.map.encoding_errors) - if query_args is None: - query_args = self.query_args - method = (method or self.default_method).upper() - - path = u'%s|/%s' % (self.map.host_matching and self.server_name or - self.subdomain, path_info.lstrip('/')) - - have_match_for = set() - for rule in self.map._rules: - try: - rv = rule.match(path) - except RequestSlash: - raise RequestRedirect(self.make_redirect_url( - path_info + '/', query_args)) - except RequestAliasRedirect, e: - raise RequestRedirect(self.make_alias_redirect_url( - path, rule.endpoint, e.matched_values, method, query_args)) - if rv is None: - continue - if rule.methods is not None and method not in rule.methods: - have_match_for.update(rule.methods) - continue - - if self.map.redirect_defaults: - redirect_url = self.get_default_redirect(rule, method, rv, - query_args) - if redirect_url is not None: - raise RequestRedirect(redirect_url) - - if rule.redirect_to is not None: - if isinstance(rule.redirect_to, basestring): - def _handle_match(match): - value = rv[match.group(1)] - return rule._converters[match.group(1)].to_url(value) - redirect_url = _simple_rule_re.sub(_handle_match, - rule.redirect_to) - else: - redirect_url = rule.redirect_to(self, **rv) - raise RequestRedirect(str(urljoin('%s://%s%s%s' % ( - self.url_scheme, - self.subdomain and self.subdomain + '.' or '', - self.server_name, - self.script_name - ), redirect_url))) - - if return_rule: - return rule, rv - else: - return rule.endpoint, rv - - if have_match_for: - raise MethodNotAllowed(valid_methods=list(have_match_for)) - raise NotFound() - - def test(self, path_info=None, method=None): - """Test if a rule would match. Works like `match` but returns `True` - if the URL matches, or `False` if it does not exist. - - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - """ - try: - self.match(path_info, method) - except RequestRedirect: - pass - except HTTPException: - return False - return True - - def allowed_methods(self, path_info=None): - """Returns the valid methods that match for a given path. - - .. versionadded:: 0.7 - """ - try: - self.match(path_info, method='--') - except MethodNotAllowed, e: - return e.valid_methods - except HTTPException, e: - pass - return [] - - def get_host(self, domain_part): - """Figures out the full host name for the given domain part. The - domain part is a subdomain in case host matching is disabled or - a full host name. - """ - if self.map.host_matching: - if domain_part is None: - return self.server_name - return domain_part - subdomain = domain_part - if subdomain is None: - subdomain = self.subdomain - return (subdomain and subdomain + '.' or '') + self.server_name - - def get_default_redirect(self, rule, method, values, query_args): - """A helper that returns the URL to redirect to if it finds one. - This is used for default redirecting only. - - :internal: - """ - assert self.map.redirect_defaults - for r in self.map._rules_by_endpoint[rule.endpoint]: - # every rule that comes after this one, including ourself - # has a lower priority for the defaults. We order the ones - # with the highest priority up for building. - if r is rule: - break - if r.provides_defaults_for(rule) and \ - r.suitable_for(values, method): - values.update(r.defaults) - domain_part, path = r.build(values) - return self.make_redirect_url( - path, query_args, domain_part=domain_part) - - def encode_query_args(self, query_args): - if not isinstance(query_args, basestring): - query_args = url_encode(query_args, self.map.charset) - return query_args - - def make_redirect_url(self, path_info, query_args=None, domain_part=None): - """Creates a redirect URL. - - :internal: - """ - suffix = '' - if query_args: - suffix = '?' + self.encode_query_args(query_args) - return str('%s://%s/%s%s' % ( - self.url_scheme, - self.get_host(domain_part), - posixpath.join(self.script_name[:-1].lstrip('/'), - url_quote(path_info.lstrip('/'), self.map.charset)), - suffix - )) - - def make_alias_redirect_url(self, path, endpoint, values, method, query_args): - """Internally called to make an alias redirect URL.""" - url = self.build(endpoint, values, method, append_unknown=False, - force_external=True) - if query_args: - url += '?' + self.encode_query_args(query_args) - assert url != path, 'detected invalid alias setting. No canonical ' \ - 'URL found' - return url - - def _partial_build(self, endpoint, values, method, append_unknown): - """Helper for :meth:`build`. Returns subdomain and path for the - rule that accepts this endpoint, values and method. - - :internal: - """ - # in case the method is none, try with the default method first - if method is None: - rv = self._partial_build(endpoint, values, self.default_method, - append_unknown) - if rv is not None: - return rv - - # default method did not match or a specific method is passed, - # check all and go with first result. - for rule in self.map._rules_by_endpoint.get(endpoint, ()): - if rule.suitable_for(values, method): - rv = rule.build(values, append_unknown) - if rv is not None: - return rv - - def build(self, endpoint, values=None, method=None, force_external=False, - append_unknown=True): - """Building URLs works pretty much the other way round. Instead of - `match` you call `build` and pass it the endpoint and a dict of - arguments for the placeholders. - - The `build` function also accepts an argument called `force_external` - which, if you set it to `True` will force external URLs. Per default - external URLs (include the server name) will only be used if the - target URL is on a different subdomain. - - >>> m = Map([ - ... Rule('/', endpoint='index'), - ... Rule('/downloads/', endpoint='downloads/index'), - ... Rule('/downloads/', endpoint='downloads/show') - ... ]) - >>> urls = m.bind("example.com", "/") - >>> urls.build("index", {}) - '/' - >>> urls.build("downloads/show", {'id': 42}) - '/downloads/42' - >>> urls.build("downloads/show", {'id': 42}, force_external=True) - 'http://example.com/downloads/42' - - Because URLs cannot contain non ASCII data you will always get - bytestrings back. Non ASCII characters are urlencoded with the - charset defined on the map instance. - - Additional values are converted to unicode and appended to the URL as - URL querystring parameters: - - >>> urls.build("index", {'q': 'My Searchstring'}) - '/?q=My+Searchstring' - - If a rule does not exist when building a `BuildError` exception is - raised. - - The build method accepts an argument called `method` which allows you - to specify the method you want to have an URL built for if you have - different methods for the same endpoint specified. - - .. versionadded:: 0.6 - the `append_unknown` parameter was added. - - :param endpoint: the endpoint of the URL to build. - :param values: the values for the URL to build. Unhandled values are - appended to the URL as query parameters. - :param method: the HTTP method for the rule if there are different - URLs for different methods on the same endpoint. - :param force_external: enforce full canonical external URLs. - :param append_unknown: unknown parameters are appended to the generated - URL as query string argument. Disable this - if you want the builder to ignore those. - """ - self.map.update() - if values: - if isinstance(values, MultiDict): - valueiter = values.iteritems(multi=True) - else: - valueiter = values.iteritems() - values = dict((k, v) for k, v in valueiter if v is not None) - else: - values = {} - - rv = self._partial_build(endpoint, values, method, append_unknown) - if rv is None: - raise BuildError(endpoint, values, method) - domain_part, path = rv - - host = self.get_host(domain_part) - - # shortcut this. - if not force_external and ( - (self.map.host_matching and host == self.server_name) or - (not self.map.host_matching and domain_part == self.subdomain)): - return str(urljoin(self.script_name, './' + path.lstrip('/'))) - return str('%s://%s%s/%s' % ( - self.url_scheme, - host, - self.script_name[:-1], - path.lstrip('/') - )) diff --git a/src/lib/werkzeug/security.py b/src/lib/werkzeug/security.py deleted file mode 100644 index 5f1d7d4..0000000 --- a/src/lib/werkzeug/security.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.security - ~~~~~~~~~~~~~~~~~ - - Security related helpers such as secure password hashing tools. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import os -import hmac -import posixpath -from itertools import izip -from random import SystemRandom - -# because the API of hmac changed with the introduction of the -# new hashlib module, we have to support both. This sets up a -# mapping to the digest factory functions and the digest modules -# (or factory functions with changed API) -try: - from hashlib import sha1, md5 - _hash_funcs = _hash_mods = {'sha1': sha1, 'md5': md5} - _sha1_mod = sha1 - _md5_mod = md5 -except ImportError: - import sha as _sha1_mod, md5 as _md5_mod - _hash_mods = {'sha1': _sha1_mod, 'md5': _md5_mod} - _hash_funcs = {'sha1': _sha1_mod.new, 'md5': _md5_mod.new} - - -SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' - - -_sys_rng = SystemRandom() -_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] - if sep not in (None, '/')) - - -def safe_str_cmp(a, b): - """This function compares strings in somewhat constant time. This - requires that the length of at least one string is known in advance. - - Returns `True` if the two strings are equal or `False` if they are not. - - .. versionadded:: 0.7 - """ - if len(a) != len(b): - return False - rv = 0 - for x, y in izip(a, b): - rv |= ord(x) ^ ord(y) - return rv == 0 - - -def gen_salt(length): - """Generate a random string of SALT_CHARS with specified ``length``.""" - if length <= 0: - raise ValueError('requested salt of length <= 0') - return ''.join(_sys_rng.choice(SALT_CHARS) for _ in xrange(length)) - - -def _hash_internal(method, salt, password): - """Internal password hash helper. Supports plaintext without salt, - unsalted and salted passwords. In case salted passwords are used - hmac is used. - """ - if method == 'plain': - return password - if salt: - if method not in _hash_mods: - return None - if isinstance(salt, unicode): - salt = salt.encode('utf-8') - h = hmac.new(salt, None, _hash_mods[method]) - else: - if method not in _hash_funcs: - return None - h = _hash_funcs[method]() - if isinstance(password, unicode): - password = password.encode('utf-8') - h.update(password) - return h.hexdigest() - - -def generate_password_hash(password, method='sha1', salt_length=8): - """Hash a password with the given method and salt with with a string of - the given length. The format of the string returned includes the method - that was used so that :func:`check_password_hash` can check the hash. - - The format for the hashed string looks like this:: - - method$salt$hash - - This method can **not** generate unsalted passwords but it is possible - to set the method to plain to enforce plaintext passwords. If a salt - is used, hmac is used internally to salt the password. - - :param password: the password to hash - :param method: the hash method to use (``'md5'`` or ``'sha1'``) - :param salt_length: the lengt of the salt in letters - """ - salt = method != 'plain' and gen_salt(salt_length) or '' - h = _hash_internal(method, salt, password) - if h is None: - raise TypeError('invalid method %r' % method) - return '%s$%s$%s' % (method, salt, h) - - -def check_password_hash(pwhash, password): - """check a password against a given salted and hashed password value. - In order to support unsalted legacy passwords this method supports - plain text passwords, md5 and sha1 hashes (both salted and unsalted). - - Returns `True` if the password matched, `False` otherwise. - - :param pwhash: a hashed string like returned by - :func:`generate_password_hash` - :param password: the plaintext password to compare against the hash - """ - if pwhash.count('$') < 2: - return False - method, salt, hashval = pwhash.split('$', 2) - return safe_str_cmp(_hash_internal(method, salt, password), hashval) - - -def safe_join(directory, filename): - """Safely join `directory` and `filename`. If this cannot be done, - this function returns ``None``. - - :param directory: the base directory. - :param filename: the untrusted filename relative to that directory. - """ - filename = posixpath.normpath(filename) - for sep in _os_alt_seps: - if sep in filename: - return None - if os.path.isabs(filename) or filename.startswith('../'): - return None - return os.path.join(directory, filename) diff --git a/src/lib/werkzeug/serving.py b/src/lib/werkzeug/serving.py deleted file mode 100644 index abb9927..0000000 --- a/src/lib/werkzeug/serving.py +++ /dev/null @@ -1,617 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.serving - ~~~~~~~~~~~~~~~~ - - There are many ways to serve a WSGI application. While you're developing - it you usually don't want a full blown webserver like Apache but a simple - standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in - the standard library. If you're using older versions of Python you can - download the package from the cheeseshop. - - However there are some caveats. Sourcecode won't reload itself when - changed and each time you kill the server using ``^C`` you get an - `KeyboardInterrupt` error. While the latter is easy to solve the first - one can be a pain in the ass in some situations. - - The easiest way is creating a small ``start-myproject.py`` that runs the - application:: - - #!/usr/bin/env python - # -*- coding: utf-8 -*- - from myproject import make_app - from werkzeug.serving import run_simple - - app = make_app(...) - run_simple('localhost', 8080, app, use_reloader=True) - - You can also pass it a `extra_files` keyword argument with a list of - additional files (like configuration files) you want to observe. - - For bigger applications you should consider using `werkzeug.script` - instead of a simple start file. - - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import os -import socket -import sys -import time -import thread -import signal -import subprocess -from urllib import unquote -from SocketServer import ThreadingMixIn, ForkingMixIn -from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler - -import werkzeug -from werkzeug._internal import _log -from werkzeug.exceptions import InternalServerError - - -class WSGIRequestHandler(BaseHTTPRequestHandler, object): - """A request handler that implements WSGI dispatching.""" - - @property - def server_version(self): - return 'Werkzeug/' + werkzeug.__version__ - - def make_environ(self): - if '?' in self.path: - path_info, query = self.path.split('?', 1) - else: - path_info = self.path - query = '' - - def shutdown_server(): - self.server.shutdown_signal = True - - url_scheme = self.server.ssl_context is None and 'http' or 'https' - environ = { - 'wsgi.version': (1, 0), - 'wsgi.url_scheme': url_scheme, - 'wsgi.input': self.rfile, - 'wsgi.errors': sys.stderr, - 'wsgi.multithread': self.server.multithread, - 'wsgi.multiprocess': self.server.multiprocess, - 'wsgi.run_once': False, - 'werkzeug.server.shutdown': - shutdown_server, - 'SERVER_SOFTWARE': self.server_version, - 'REQUEST_METHOD': self.command, - 'SCRIPT_NAME': '', - 'PATH_INFO': unquote(path_info), - 'QUERY_STRING': query, - 'CONTENT_TYPE': self.headers.get('Content-Type', ''), - 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), - 'REMOTE_ADDR': self.client_address[0], - 'REMOTE_PORT': self.client_address[1], - 'SERVER_NAME': self.server.server_address[0], - 'SERVER_PORT': str(self.server.server_address[1]), - 'SERVER_PROTOCOL': self.request_version - } - - for key, value in self.headers.items(): - key = 'HTTP_' + key.upper().replace('-', '_') - if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): - environ[key] = value - - return environ - - def run_wsgi(self): - app = self.server.app - environ = self.make_environ() - headers_set = [] - headers_sent = [] - - def write(data): - assert headers_set, 'write() before start_response' - if not headers_sent: - status, response_headers = headers_sent[:] = headers_set - code, msg = status.split(None, 1) - self.send_response(int(code), msg) - header_keys = set() - for key, value in response_headers: - self.send_header(key, value) - key = key.lower() - header_keys.add(key) - if 'content-length' not in header_keys: - self.close_connection = True - self.send_header('Connection', 'close') - if 'server' not in header_keys: - self.send_header('Server', self.version_string()) - if 'date' not in header_keys: - self.send_header('Date', self.date_time_string()) - self.end_headers() - - assert type(data) is str, 'applications must write bytes' - self.wfile.write(data) - self.wfile.flush() - - def start_response(status, response_headers, exc_info=None): - if exc_info: - try: - if headers_sent: - raise exc_info[0], exc_info[1], exc_info[2] - finally: - exc_info = None - elif headers_set: - raise AssertionError('Headers already set') - headers_set[:] = [status, response_headers] - return write - - def execute(app): - application_iter = app(environ, start_response) - try: - for data in application_iter: - write(data) - # make sure the headers are sent - if not headers_sent: - write('') - finally: - if hasattr(application_iter, 'close'): - application_iter.close() - application_iter = None - - try: - execute(app) - except (socket.error, socket.timeout), e: - self.connection_dropped(e, environ) - except Exception: - if self.server.passthrough_errors: - raise - from werkzeug.debug.tbtools import get_current_traceback - traceback = get_current_traceback(ignore_system_exceptions=True) - try: - # if we haven't yet sent the headers but they are set - # we roll back to be able to set them again. - if not headers_sent: - del headers_set[:] - execute(InternalServerError()) - except Exception: - pass - self.server.log('error', 'Error on request:\n%s', - traceback.plaintext) - - def handle(self): - """Handles a request ignoring dropped connections.""" - rv = None - try: - rv = BaseHTTPRequestHandler.handle(self) - except (socket.error, socket.timeout), e: - self.connection_dropped(e) - except Exception: - if self.server.ssl_context is None or not is_ssl_error(): - raise - if self.server.shutdown_signal: - self.initiate_shutdown() - return rv - - def initiate_shutdown(self): - """A horrible, horrible way to kill the server for Python 2.6 and - later. It's the best we can do. - """ - # Windows does not provide SIGKILL, go with SIGTERM then. - sig = getattr(signal, 'SIGKILL', signal.SIGTERM) - # reloader active - if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': - os.kill(os.getpid(), sig) - # python 2.7 - self.server._BaseServer__shutdown_request = True - # python 2.6 - self.server._BaseServer__serving = False - - def connection_dropped(self, error, environ=None): - """Called if the connection was closed by the client. By default - nothing happens. - """ - - def handle_one_request(self): - """Handle a single HTTP request.""" - self.raw_requestline = self.rfile.readline() - if not self.raw_requestline: - self.close_connection = 1 - elif self.parse_request(): - return self.run_wsgi() - - def send_response(self, code, message=None): - """Send the response header and log the response code.""" - self.log_request(code) - if message is None: - message = code in self.responses and self.responses[code][0] or '' - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s %d %s\r\n" % - (self.protocol_version, code, message)) - - def version_string(self): - return BaseHTTPRequestHandler.version_string(self).strip() - - def address_string(self): - return self.client_address[0] - - def log_request(self, code='-', size='-'): - self.log('info', '"%s" %s %s', self.requestline, code, size) - - def log_error(self, *args): - self.log('error', *args) - - def log_message(self, format, *args): - self.log('info', format, *args) - - def log(self, type, message, *args): - _log(type, '%s - - [%s] %s\n' % (self.address_string(), - self.log_date_time_string(), - message % args)) - - -#: backwards compatible name if someone is subclassing it -BaseRequestHandler = WSGIRequestHandler - - -def generate_adhoc_ssl_context(): - """Generates an adhoc SSL context for the development server.""" - from random import random - from OpenSSL import crypto, SSL - - cert = crypto.X509() - cert.set_serial_number(int(random() * sys.maxint)) - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(60 * 60 * 24 * 365) - - subject = cert.get_subject() - subject.CN = '*' - subject.O = 'Dummy Certificate' - - issuer = cert.get_issuer() - issuer.CN = 'Untrusted Authority' - issuer.O = 'Self-Signed' - - pkey = crypto.PKey() - pkey.generate_key(crypto.TYPE_RSA, 768) - cert.set_pubkey(pkey) - cert.sign(pkey, 'md5') - - ctx = SSL.Context(SSL.SSLv23_METHOD) - ctx.use_privatekey(pkey) - ctx.use_certificate(cert) - - return ctx - - -def is_ssl_error(error=None): - """Checks if the given error (or the current one) is an SSL error.""" - if error is None: - error = sys.exc_info()[1] - from OpenSSL import SSL - return isinstance(error, SSL.Error) - - -class _SSLConnectionFix(object): - """Wrapper around SSL connection to provide a working makefile().""" - - def __init__(self, con): - self._con = con - - def makefile(self, mode, bufsize): - return socket._fileobject(self._con, mode, bufsize) - - def __getattr__(self, attrib): - return getattr(self._con, attrib) - - -def select_ip_version(host, port): - """Returns AF_INET4 or AF_INET6 depending on where to connect to.""" - # disabled due to problems with current ipv6 implementations - # and various operating systems. Probably this code also is - # not supposed to work, but I can't come up with any other - # ways to implement this. - ##try: - ## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, - ## socket.SOCK_STREAM, 0, - ## socket.AI_PASSIVE) - ## if info: - ## return info[0][0] - ##except socket.gaierror: - ## pass - if ':' in host and hasattr(socket, 'AF_INET6'): - return socket.AF_INET6 - return socket.AF_INET - - -class BaseWSGIServer(HTTPServer, object): - """Simple single-threaded, single-process WSGI server.""" - multithread = False - multiprocess = False - request_queue_size = 128 - - def __init__(self, host, port, app, handler=None, - passthrough_errors=False, ssl_context=None): - if handler is None: - handler = WSGIRequestHandler - self.address_family = select_ip_version(host, port) - HTTPServer.__init__(self, (host, int(port)), handler) - self.app = app - self.passthrough_errors = passthrough_errors - self.shutdown_signal = False - - if ssl_context is not None: - try: - from OpenSSL import tsafe - except ImportError: - raise TypeError('SSL is not available if the OpenSSL ' - 'library is not installed.') - if ssl_context == 'adhoc': - ssl_context = generate_adhoc_ssl_context() - self.socket = tsafe.Connection(ssl_context, self.socket) - self.ssl_context = ssl_context - else: - self.ssl_context = None - - def log(self, type, message, *args): - _log(type, message, *args) - - def serve_forever(self): - self.shutdown_signal = False - try: - HTTPServer.serve_forever(self) - except KeyboardInterrupt: - pass - - def handle_error(self, request, client_address): - if self.passthrough_errors: - raise - else: - return HTTPServer.handle_error(self, request, client_address) - - def get_request(self): - con, info = self.socket.accept() - if self.ssl_context is not None: - con = _SSLConnectionFix(con) - return con, info - - -class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): - """A WSGI server that does threading.""" - multithread = True - - -class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer): - """A WSGI server that does forking.""" - multiprocess = True - - def __init__(self, host, port, app, processes=40, handler=None, - passthrough_errors=False, ssl_context=None): - BaseWSGIServer.__init__(self, host, port, app, handler, - passthrough_errors, ssl_context) - self.max_children = processes - - -def make_server(host, port, app=None, threaded=False, processes=1, - request_handler=None, passthrough_errors=False, - ssl_context=None): - """Create a new server instance that is either threaded, or forks - or just processes one request after another. - """ - if threaded and processes > 1: - raise ValueError("cannot have a multithreaded and " - "multi process server.") - elif threaded: - return ThreadedWSGIServer(host, port, app, request_handler, - passthrough_errors, ssl_context) - elif processes > 1: - return ForkingWSGIServer(host, port, app, processes, request_handler, - passthrough_errors, ssl_context) - else: - return BaseWSGIServer(host, port, app, request_handler, - passthrough_errors, ssl_context) - - -def _iter_module_files(): - for module in sys.modules.values(): - filename = getattr(module, '__file__', None) - if filename: - old = None - while not os.path.isfile(filename): - old = filename - filename = os.path.dirname(filename) - if filename == old: - break - else: - if filename[-4:] in ('.pyc', '.pyo'): - filename = filename[:-1] - yield filename - - -def _reloader_stat_loop(extra_files=None, interval=1): - """When this function is run from the main thread, it will force other - threads to exit when any modules currently loaded change. - - Copyright notice. This function is based on the autoreload.py from - the CherryPy trac which originated from WSGIKit which is now dead. - - :param extra_files: a list of additional files it should watch. - """ - from itertools import chain - mtimes = {} - while 1: - for filename in chain(_iter_module_files(), extra_files or ()): - try: - mtime = os.stat(filename).st_mtime - except OSError: - continue - - old_time = mtimes.get(filename) - if old_time is None: - mtimes[filename] = mtime - continue - elif mtime > old_time: - _log('info', ' * Detected change in %r, reloading' % filename) - sys.exit(3) - time.sleep(interval) - - -def _reloader_inotify(extra_files=None, interval=None): - # Mutated by inotify loop when changes occur. - changed = [False] - - # Setup inotify watches - from pyinotify import WatchManager, Notifier - - # this API changed at one point, support both - try: - from pyinotify import EventsCodes as ec - ec.IN_ATTRIB - except (ImportError, AttributeError): - import pyinotify as ec - - wm = WatchManager() - mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB - - def signal_changed(event): - if changed[0]: - return - _log('info', ' * Detected change in %r, reloading' % event.path) - changed[:] = [True] - - for fname in extra_files or (): - wm.add_watch(fname, mask, signal_changed) - - # ... And now we wait... - notif = Notifier(wm) - try: - while not changed[0]: - # always reiterate through sys.modules, adding them - for fname in _iter_module_files(): - wm.add_watch(fname, mask, signal_changed) - notif.process_events() - if notif.check_events(timeout=interval): - notif.read_events() - # TODO Set timeout to something small and check parent liveliness - finally: - notif.stop() - sys.exit(3) - - -# currently we always use the stat loop reloader for the simple reason -# that the inotify one does not respond to added files properly. Also -# it's quite buggy and the API is a mess. -reloader_loop = _reloader_stat_loop - - -def restart_with_reloader(): - """Spawn a new Python interpreter with the same arguments as this one, - but running the reloader thread. - """ - while 1: - _log('info', ' * Restarting with reloader') - args = [sys.executable] + sys.argv - new_environ = os.environ.copy() - new_environ['WERKZEUG_RUN_MAIN'] = 'true' - - # a weird bug on windows. sometimes unicode strings end up in the - # environment and subprocess.call does not like this, encode them - # to latin1 and continue. - if os.name == 'nt': - for key, value in new_environ.iteritems(): - if isinstance(value, unicode): - new_environ[key] = value.encode('iso-8859-1') - - exit_code = subprocess.call(args, env=new_environ) - if exit_code != 3: - return exit_code - - -def run_with_reloader(main_func, extra_files=None, interval=1): - """Run the given function in an independent python interpreter.""" - import signal - signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) - if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': - thread.start_new_thread(main_func, ()) - try: - reloader_loop(extra_files, interval) - except KeyboardInterrupt: - return - try: - sys.exit(restart_with_reloader()) - except KeyboardInterrupt: - pass - - -def run_simple(hostname, port, application, use_reloader=False, - use_debugger=False, use_evalex=True, - extra_files=None, reloader_interval=1, threaded=False, - processes=1, request_handler=None, static_files=None, - passthrough_errors=False, ssl_context=None): - """Start an application using wsgiref and with an optional reloader. This - wraps `wsgiref` to fix the wrong default reporting of the multithreaded - WSGI variable and adds optional multithreading and fork support. - - .. versionadded:: 0.5 - `static_files` was added to simplify serving of static files as well - as `passthrough_errors`. - - .. versionadded:: 0.6 - support for SSL was added. - - :param hostname: The host for the application. eg: ``'localhost'`` - :param port: The port for the server. eg: ``8080`` - :param application: the WSGI application to execute - :param use_reloader: should the server automatically restart the python - process if modules were changed? - :param use_debugger: should the werkzeug debugging system be used? - :param use_evalex: should the exception evaluation feature be enabled? - :param extra_files: a list of files the reloader should watch - additionally to the modules. For example configuration - files. - :param reloader_interval: the interval for the reloader in seconds. - :param threaded: should the process handle each request in a separate - thread? - :param processes: number of processes to spawn. - :param request_handler: optional parameter that can be used to replace - the default one. You can use this to replace it - with a different - :class:`~BaseHTTPServer.BaseHTTPRequestHandler` - subclass. - :param static_files: a dict of paths for static files. This works exactly - like :class:`SharedDataMiddleware`, it's actually - just wrapping the application in that middleware before - serving. - :param passthrough_errors: set this to `True` to disable the error catching. - This means that the server will die on errors but - it can be useful to hook debuggers in (pdb etc.) - :param ssl_context: an SSL context for the connection. Either an OpenSSL - context, the string ``'adhoc'`` if the server should - automatically create one, or `None` to disable SSL - (which is the default). - """ - if use_debugger: - from werkzeug.debug import DebuggedApplication - application = DebuggedApplication(application, use_evalex) - if static_files: - from werkzeug.wsgi import SharedDataMiddleware - application = SharedDataMiddleware(application, static_files) - - def inner(): - make_server(hostname, port, application, threaded, - processes, request_handler, - passthrough_errors, ssl_context).serve_forever() - - if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': - display_hostname = hostname != '*' and hostname or 'localhost' - if ':' in display_hostname: - display_hostname = '[%s]' % display_hostname - _log('info', ' * Running on %s://%s:%d/', ssl_context is None - and 'http' or 'https', display_hostname, port) - if use_reloader: - # Create and destroy a socket so that any exceptions are raised before - # we spawn a separate Python interpreter and lose this ability. - address_family = select_ip_version(hostname, port) - test_socket = socket.socket(address_family, socket.SOCK_STREAM) - test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - test_socket.bind((hostname, port)) - test_socket.close() - run_with_reloader(inner, extra_files, reloader_interval) - else: - inner() diff --git a/src/lib/werkzeug/templates.py b/src/lib/werkzeug/templates.py deleted file mode 100644 index a9d60e9..0000000 --- a/src/lib/werkzeug/templates.py +++ /dev/null @@ -1,399 +0,0 @@ -# -*- coding: utf-8 -*- -r""" - werkzeug.templates - ~~~~~~~~~~~~~~~~~~ - - A minimal template engine. - - :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD License. -""" -import sys -import re -import __builtin__ as builtins -from compiler import ast, parse -from compiler.pycodegen import ModuleCodeGenerator -from tokenize import PseudoToken -from werkzeug import urls, utils -from werkzeug._internal import _decode_unicode -from werkzeug.datastructures import MultiDict - - -from warnings import warn -warn(DeprecationWarning('werkzeug.templates is deprecated and ' - 'will be removed in Werkzeug 1.0')) - - -# Copyright notice: The `parse_data` method uses the string interpolation -# algorithm by Ka-Ping Yee which originally was part of `Itpl20.py`_. -# -# .. _Itpl20.py: http://lfw.org/python/Itpl20.py - - -token_re = re.compile('%s|%s(?s)' % ( - r'[uU]?[rR]?("""|\'\'\')((?\n?(?s)') -escape_re = re.compile(r'\\\n|\\(\\|<%)') -namestart_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_' -undefined = type('UndefinedType', (object,), { - '__iter__': lambda x: iter(()), - '__repr__': lambda x: 'Undefined', - '__str__': lambda x: '' -})() -runtime_vars = frozenset(['Undefined', '__to_unicode', '__context', - '__write', '__write_many']) - - -def call_stmt(func, args, lineno): - return ast.CallFunc(ast.Name(func, lineno=lineno), - args, lineno=lineno) - - -def tokenize(source, filename): - escape = escape_re.sub - escape_repl = lambda m: m.group(1) or '' - lineno = 1 - pos = 0 - - for match in directive_re.finditer(source): - start, end = match.span() - if start > pos: - data = source[pos:start] - yield lineno, 'data', escape(escape_repl, data) - lineno += data.count('\n') - is_comment, is_code, cmd, args = match.groups() - if is_code: - yield lineno, 'code', args - elif not is_comment: - yield lineno, 'cmd', (cmd, args) - lineno += source[start:end].count('\n') - pos = end - - if pos < len(source): - yield lineno, 'data', escape(escape_repl, source[pos:]) - - -def transform(node, filename): - root = ast.Module(None, node, lineno=1) - nodes = [root] - while nodes: - node = nodes.pop() - node.filename = filename - if node.__class__ in (ast.Printnl, ast.Print): - node.dest = ast.Name('__context') - elif node.__class__ is ast.Const and isinstance(node.value, str): - try: - node.value.decode('ascii') - except UnicodeError: - node.value = node.value.decode('utf-8') - nodes.extend(node.getChildNodes()) - return root - - -class TemplateSyntaxError(SyntaxError): - - def __init__(self, msg, filename, lineno): - from linecache import getline - l = getline(filename, lineno) - SyntaxError.__init__(self, msg, (filename, lineno, len(l) or 1, l)) - - -class Parser(object): - - def __init__(self, gen, filename): - self.gen = gen - self.filename = filename - self.lineno = 1 - - def fail(self, msg): - raise TemplateSyntaxError(msg, self.filename, self.lineno) - - def parse_python(self, expr, type='exec'): - if isinstance(expr, unicode): - expr = '\xef\xbb\xbf' + expr.encode('utf-8') - try: - node = parse(expr, type) - except SyntaxError, e: - raise TemplateSyntaxError(str(e), self.filename, - self.lineno + e.lineno - 1) - nodes = [node] - while nodes: - n = nodes.pop() - if hasattr(n, 'lineno'): - n.lineno = (n.lineno or 1) + self.lineno - 1 - nodes.extend(n.getChildNodes()) - return node.node - - def parse(self, needle=()): - start_lineno = self.lineno - result = [] - add = result.append - for self.lineno, token, value in self.gen: - if token == 'data': - add(self.parse_data(value)) - elif token == 'code': - add(self.parse_code(value.splitlines())) - elif token == 'cmd': - name, args = value - if name in needle: - return name, args, ast.Stmt(result, lineno=start_lineno) - if name in ('for', 'while'): - add(self.parse_loop(args, name)) - elif name == 'if': - add(self.parse_if(args)) - else: - self.fail('unknown directive %s' % name) - if needle: - self.fail('unexpected end of template') - return ast.Stmt(result, lineno=start_lineno) - - def parse_loop(self, args, type): - rv = self.parse_python('%s %s: pass' % (type, args), 'exec').nodes[0] - tag, value, rv.body = self.parse(('end' + type, 'else')) - if value: - self.fail('unexpected data after ' + tag) - if tag == 'else': - tag, value, rv.else_ = self.parse(('end' + type,)) - if value: - self.fail('unexpected data after else') - return rv - - def parse_if(self, args): - cond = self.parse_python('if %s: pass' % args).nodes[0] - tag, value, body = self.parse(('else', 'elif', 'endif')) - cond.tests[0] = (cond.tests[0][0], body) - while 1: - if tag == 'else': - if value: - self.fail('unexpected data after else') - tag, value, cond.else_ = self.parse(('endif',)) - elif tag == 'elif': - expr = self.parse_python(value, 'eval') - tag, value, body = self.parse(('else', 'elif', 'endif')) - cond.tests.append((expr, body)) - continue - break - if value: - self.fail('unexpected data after endif') - return cond - - def parse_code(self, lines): - margin = sys.maxint - for line in lines[1:]: - content = len(line.lstrip()) - if content: - indent = len(line) - content - margin = min(margin, indent) - if lines: - lines[0] = lines[0].lstrip() - if margin < sys.maxint: - for i in xrange(1, len(lines)): - lines[i] = lines[i][margin:] - while lines and not lines[-1]: - lines.pop() - while lines and not lines[0]: - lines.pop(0) - return self.parse_python('\n'.join(lines)) - - def parse_data(self, text): - start_lineno = lineno = self.lineno - pos = 0 - end = len(text) - nodes = [] - - def match_or_fail(pos): - match = token_re.match(text, pos) - if match is None: - self.fail('invalid syntax') - return match.group().strip(), match.end() - - def write_expr(code): - node = self.parse_python(code, 'eval') - nodes.append(call_stmt('__to_unicode', [node], lineno)) - return code.count('\n') - - def write_data(value): - if value: - nodes.append(ast.Const(value, lineno=lineno)) - return value.count('\n') - return 0 - - while 1: - offset = text.find('$', pos) - if offset < 0: - break - next = text[offset + 1] - - if next == '{': - lineno += write_data(text[pos:offset]) - pos = offset + 2 - level = 1 - while level: - token, pos = match_or_fail(pos) - if token in ('{', '}'): - level += token == '{' and 1 or -1 - lineno += write_expr(text[offset + 2:pos - 1]) - elif next in namestart_chars: - lineno += write_data(text[pos:offset]) - token, pos = match_or_fail(offset + 1) - while pos < end: - if text[pos] == '.' and pos + 1 < end and \ - text[pos + 1] in namestart_chars: - token, pos = match_or_fail(pos + 1) - elif text[pos] in '([': - pos += 1 - level = 1 - while level: - token, pos = match_or_fail(pos) - if token in ('(', ')', '[', ']'): - level += token in '([' and 1 or -1 - else: - break - lineno += write_expr(text[offset + 1:pos]) - else: - lineno += write_data(text[pos:offset + 1]) - pos = offset + 1 + (next == '$') - write_data(text[pos:]) - - return ast.Discard(call_stmt(len(nodes) == 1 and '__write' or - '__write_many', nodes, start_lineno), - lineno=start_lineno) - - -class Context(object): - - def __init__(self, namespace, charset, errors): - self.charset = charset - self.errors = errors - self._namespace = namespace - self._buffer = [] - self._write = self._buffer.append - _extend = self._buffer.extend - self.runtime = dict( - Undefined=undefined, - __to_unicode=self.to_unicode, - __context=self, - __write=self._write, - __write_many=lambda *a: _extend(a) - ) - - def write(self, value): - self._write(self.to_unicode(value)) - - def to_unicode(self, value): - if isinstance(value, str): - return _decode_unicode(value, self.charset, self.errors) - return unicode(value) - - def get_value(self, as_unicode=True): - rv = u''.join(self._buffer) - if not as_unicode: - return rv.encode(self.charset, self.errors) - return rv - - def __getitem__(self, key, default=undefined): - try: - return self._namespace[key] - except KeyError: - return getattr(builtins, key, default) - - def get(self, key, default=None): - return self.__getitem__(key, default) - - def __setitem__(self, key, value): - self._namespace[key] = value - - def __delitem__(self, key): - del self._namespace[key] - - -class TemplateCodeGenerator(ModuleCodeGenerator): - - def __init__(self, node, filename): - ModuleCodeGenerator.__init__(self, transform(node, filename)) - - def _nameOp(self, prefix, name): - if name in runtime_vars: - return self.emit(prefix + '_GLOBAL', name) - return ModuleCodeGenerator._nameOp(self, prefix, name) - - -class Template(object): - """Represents a simple text based template. It's a good idea to load such - templates from files on the file system to get better debug output. - """ - - default_context = { - 'escape': utils.escape, - 'url_quote': urls.url_quote, - 'url_quote_plus': urls.url_quote_plus, - 'url_encode': urls.url_encode - } - - def __init__(self, source, filename='