First ansible commit
This commit is contained in:
BIN
.ve/lib/python2.7/site-packages/.libs_cffi_backend/libffi-45372312.so.6.0.4
Executable file
BIN
.ve/lib/python2.7/site-packages/.libs_cffi_backend/libffi-45372312.so.6.0.4
Executable file
Binary file not shown.
@@ -0,0 +1,37 @@
|
||||
|
||||
Jinja2
|
||||
~~~~~~
|
||||
|
||||
Jinja2 is a template engine written in pure Python. It provides a
|
||||
`Django`_ inspired non-XML syntax but supports inline expressions and
|
||||
an optional `sandboxed`_ environment.
|
||||
|
||||
Nutshell
|
||||
--------
|
||||
|
||||
Here a small example of a Jinja template::
|
||||
|
||||
{% extends 'base.html' %}
|
||||
{% block title %}Memberlist{% endblock %}
|
||||
{% block content %}
|
||||
<ul>
|
||||
{% for user in users %}
|
||||
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endblock %}
|
||||
|
||||
Philosophy
|
||||
----------
|
||||
|
||||
Application logic is for the controller but don't try to make the life
|
||||
for the template designer too hard by giving him too few functionality.
|
||||
|
||||
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
|
||||
|
||||
.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
|
||||
.. _Django: https://www.djangoproject.com/
|
||||
.. _Jinja2 webpage: http://jinja.pocoo.org/
|
||||
.. _documentation: http://jinja.pocoo.org/2/documentation/
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,31 @@
|
||||
Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
|
||||
|
||||
Some rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* The names of the contributors may not be used to endorse or
|
||||
promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -0,0 +1,68 @@
|
||||
Metadata-Version: 2.0
|
||||
Name: Jinja2
|
||||
Version: 2.10
|
||||
Summary: A small but fast and easy to use stand-alone template engine written in pure python.
|
||||
Home-page: http://jinja.pocoo.org/
|
||||
Author: Armin Ronacher
|
||||
Author-email: armin.ronacher@active-4.com
|
||||
License: BSD
|
||||
Description-Content-Type: UNKNOWN
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.6
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Text Processing :: Markup :: HTML
|
||||
Requires-Dist: MarkupSafe (>=0.23)
|
||||
Provides-Extra: i18n
|
||||
Requires-Dist: Babel (>=0.8); extra == 'i18n'
|
||||
|
||||
|
||||
Jinja2
|
||||
~~~~~~
|
||||
|
||||
Jinja2 is a template engine written in pure Python. It provides a
|
||||
`Django`_ inspired non-XML syntax but supports inline expressions and
|
||||
an optional `sandboxed`_ environment.
|
||||
|
||||
Nutshell
|
||||
--------
|
||||
|
||||
Here a small example of a Jinja template::
|
||||
|
||||
{% extends 'base.html' %}
|
||||
{% block title %}Memberlist{% endblock %}
|
||||
{% block content %}
|
||||
<ul>
|
||||
{% for user in users %}
|
||||
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endblock %}
|
||||
|
||||
Philosophy
|
||||
----------
|
||||
|
||||
Application logic is for the controller but don't try to make the life
|
||||
for the template designer too hard by giving him too few functionality.
|
||||
|
||||
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
|
||||
|
||||
.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
|
||||
.. _Django: https://www.djangoproject.com/
|
||||
.. _Jinja2 webpage: http://jinja.pocoo.org/
|
||||
.. _documentation: http://jinja.pocoo.org/2/documentation/
|
||||
|
||||
|
||||
61
.ve/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD
Normal file
61
.ve/lib/python2.7/site-packages/Jinja2-2.10.dist-info/RECORD
Normal file
@@ -0,0 +1,61 @@
|
||||
Jinja2-2.10.dist-info/DESCRIPTION.rst,sha256=b5ckFDoM7vVtz_mAsJD4OPteFKCqE7beu353g4COoYI,978
|
||||
Jinja2-2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
Jinja2-2.10.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554
|
||||
Jinja2-2.10.dist-info/METADATA,sha256=18EgU8zR6-av-0-5y_gXebzK4GnBB_76lALUsl-6QHM,2258
|
||||
Jinja2-2.10.dist-info/RECORD,,
|
||||
Jinja2-2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
|
||||
Jinja2-2.10.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72
|
||||
Jinja2-2.10.dist-info/metadata.json,sha256=NPUJ9TMBxVQAv_kTJzvU8HwmP-4XZvbK9mz6_4YUVl4,1473
|
||||
Jinja2-2.10.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
|
||||
jinja2/__init__.py,sha256=xJHjaMoy51_KXn1wf0cysH6tUUifUxZCwSOfcJGEYZw,2614
|
||||
jinja2/__init__.pyc,,
|
||||
jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596
|
||||
jinja2/_compat.pyc,,
|
||||
jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726
|
||||
jinja2/_identifier.pyc,,
|
||||
jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
|
||||
jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878
|
||||
jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794
|
||||
jinja2/bccache.pyc,,
|
||||
jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386
|
||||
jinja2/compiler.pyc,,
|
||||
jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
|
||||
jinja2/constants.pyc,,
|
||||
jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045
|
||||
jinja2/debug.pyc,,
|
||||
jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400
|
||||
jinja2/defaults.pyc,,
|
||||
jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849
|
||||
jinja2/environment.pyc,,
|
||||
jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
|
||||
jinja2/exceptions.pyc,,
|
||||
jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500
|
||||
jinja2/ext.pyc,,
|
||||
jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528
|
||||
jinja2/filters.pyc,,
|
||||
jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197
|
||||
jinja2/idtracking.pyc,,
|
||||
jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559
|
||||
jinja2/lexer.pyc,,
|
||||
jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
|
||||
jinja2/loaders.pyc,,
|
||||
jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
|
||||
jinja2/meta.pyc,,
|
||||
jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308
|
||||
jinja2/nativetypes.pyc,,
|
||||
jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853
|
||||
jinja2/nodes.pyc,,
|
||||
jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
|
||||
jinja2/optimizer.pyc,,
|
||||
jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875
|
||||
jinja2/parser.pyc,,
|
||||
jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755
|
||||
jinja2/runtime.pyc,,
|
||||
jinja2/sandbox.py,sha256=TVyZHlNqqTzsv9fv2NvJNmSdWRHTguhyMHdxjWms32U,16708
|
||||
jinja2/sandbox.pyc,,
|
||||
jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237
|
||||
jinja2/tests.pyc,,
|
||||
jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629
|
||||
jinja2/utils.pyc,,
|
||||
jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
|
||||
jinja2/visitor.pyc,,
|
||||
@@ -0,0 +1,6 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.30.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
|
||||
[babel.extractors]
|
||||
jinja2 = jinja2.ext:babel_extract[i18n]
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://jinja.pocoo.org/"}}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "extras": ["i18n"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Jinja2", "run_requires": [{"extra": "i18n", "requires": ["Babel (>=0.8)"]}, {"requires": ["MarkupSafe (>=0.23)"]}], "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "version": "2.10"}
|
||||
@@ -0,0 +1 @@
|
||||
jinja2
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,33 @@
|
||||
`BSD 3-Clause <https://opensource.org/licenses/BSD-3-Clause>`_
|
||||
|
||||
Copyright © 2010 by the Pallets team.
|
||||
|
||||
Some rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
|
||||
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
|
||||
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
@@ -0,0 +1,103 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: MarkupSafe
|
||||
Version: 1.1.0
|
||||
Summary: Safely add untrusted strings to HTML/XML markup.
|
||||
Home-page: https://www.palletsprojects.com/p/markupsafe/
|
||||
Author: Armin Ronacher
|
||||
Author-email: armin.ronacher@active-4.com
|
||||
Maintainer: Pallets Team
|
||||
Maintainer-email: contact@palletsprojects.com
|
||||
License: BSD
|
||||
Project-URL: Code, https://github.com/pallets/markupsafe
|
||||
Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues
|
||||
Project-URL: Documentation, https://markupsafe.palletsprojects.com/
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Text Processing :: Markup :: HTML
|
||||
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
|
||||
|
||||
MarkupSafe
|
||||
==========
|
||||
|
||||
MarkupSafe implements a text object that escapes characters so it is
|
||||
safe to use in HTML and XML. Characters that have special meanings are
|
||||
replaced so that they display as the actual characters. This mitigates
|
||||
injection attacks, meaning untrusted user input can safely be displayed
|
||||
on a page.
|
||||
|
||||
|
||||
Installing
|
||||
----------
|
||||
|
||||
Install and update using `pip`_:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
pip install -U MarkupSafe
|
||||
|
||||
.. _pip: https://pip.pypa.io/en/stable/quickstart/
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> from markupsafe import Markup, escape
|
||||
>>> # escape replaces special characters and wraps in Markup
|
||||
>>> escape('<script>alert(document.cookie);</script>')
|
||||
Markup(u'<script>alert(document.cookie);</script>')
|
||||
>>> # wrap in Markup to mark text "safe" and prevent escaping
|
||||
>>> Markup('<strong>Hello</strong>')
|
||||
Markup('<strong>hello</strong>')
|
||||
>>> escape(Markup('<strong>Hello</strong>'))
|
||||
Markup('<strong>hello</strong>')
|
||||
>>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
|
||||
>>> # methods and operators escape their arguments
|
||||
>>> template = Markup("Hello <em>%s</em>")
|
||||
>>> template % '"World"'
|
||||
Markup('Hello <em>"World"</em>')
|
||||
|
||||
|
||||
Donate
|
||||
------
|
||||
|
||||
The Pallets organization develops and supports MarkupSafe and other
|
||||
libraries that use it. In order to grow the community of contributors
|
||||
and users, and allow the maintainers to devote more time to the
|
||||
projects, `please donate today`_.
|
||||
|
||||
.. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20
|
||||
|
||||
|
||||
Links
|
||||
-----
|
||||
|
||||
* Website: https://www.palletsprojects.com/p/markupsafe/
|
||||
* Documentation: https://markupsafe.palletsprojects.com/
|
||||
* License: `BSD <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
|
||||
* Releases: https://pypi.org/project/MarkupSafe/
|
||||
* Code: https://github.com/pallets/markupsafe
|
||||
* Issue tracker: https://github.com/pallets/markupsafe/issues
|
||||
* Test status:
|
||||
|
||||
* Linux, Mac: https://travis-ci.org/pallets/markupsafe
|
||||
* Windows: https://ci.appveyor.com/project/pallets/markupsafe
|
||||
|
||||
* Test coverage: https://codecov.io/gh/pallets/markupsafe
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
MarkupSafe-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
MarkupSafe-1.1.0.dist-info/LICENSE.txt,sha256=7V249lpOdvRv2m6SF9gCDtq_nsg8tFpdeTdsWWM_g9M,1614
|
||||
MarkupSafe-1.1.0.dist-info/METADATA,sha256=TQpVUBLFsel2ZJb9pTbCUrLsM-SyNylHTrlhfyLkLs8,3585
|
||||
MarkupSafe-1.1.0.dist-info/RECORD,,
|
||||
MarkupSafe-1.1.0.dist-info/WHEEL,sha256=M5Ujap42zjfAFnpJOoFU72TFHuBKh-JF0Rqu5vZhkVE,110
|
||||
MarkupSafe-1.1.0.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
|
||||
markupsafe/__init__.py,sha256=T5J4pS7LRx1xRqfV3xz-QN_D9pSmfVDJnTrc2cTO4Ro,10164
|
||||
markupsafe/__init__.pyc,,
|
||||
markupsafe/_compat.py,sha256=3oSvQpEFzsJ29NKVy-Fqk6ZlRxmlCB5k0G21aN0zNtQ,596
|
||||
markupsafe/_compat.pyc,,
|
||||
markupsafe/_constants.py,sha256=ueEz1Jxdw5TKWBbhPr4Ad_2L2MSEh73AYiYe4l3cZy4,4728
|
||||
markupsafe/_constants.pyc,,
|
||||
markupsafe/_native.py,sha256=fUrjjbRXIpHM-8l9QXFJ2xg5rv_48U2aN99plyL0kfs,1911
|
||||
markupsafe/_native.pyc,,
|
||||
markupsafe/_speedups.c,sha256=VfElhhq9oulHEd2wBZ2MX9A80r4jFovsVGQD2zxmVk0,9883
|
||||
markupsafe/_speedups.so,sha256=M9jIOnbesM7g3fuHWHF516J7hLvJgsOXZ6Mhc1TIIi0,29204
|
||||
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.31.1)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp27-cp27mu-manylinux1_x86_64
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
markupsafe
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,174 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
196
.ve/lib/python2.7/site-packages/PyNaCl-1.3.0.dist-info/METADATA
Normal file
196
.ve/lib/python2.7/site-packages/PyNaCl-1.3.0.dist-info/METADATA
Normal file
@@ -0,0 +1,196 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: PyNaCl
|
||||
Version: 1.3.0
|
||||
Summary: Python binding to the Networking and Cryptography (NaCl) library
|
||||
Home-page: https://github.com/pyca/pynacl/
|
||||
Author: The PyNaCl developers
|
||||
Author-email: cryptography-dev@python.org
|
||||
License: Apache License 2.0
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Provides-Extra: docs
|
||||
Provides-Extra: tests
|
||||
Requires-Dist: six
|
||||
Requires-Dist: cffi (>=1.4.1)
|
||||
Provides-Extra: docs
|
||||
Requires-Dist: sphinx (>=1.6.5); extra == 'docs'
|
||||
Requires-Dist: sphinx-rtd-theme; extra == 'docs'
|
||||
Provides-Extra: tests
|
||||
Requires-Dist: pytest (!=3.3.0,>=3.2.1); extra == 'tests'
|
||||
Requires-Dist: hypothesis (>=3.27.0); extra == 'tests'
|
||||
|
||||
===============================================
|
||||
PyNaCl: Python binding to the libsodium library
|
||||
===============================================
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/pynacl.svg
|
||||
:target: https://pypi.org/project/PyNaCl/
|
||||
:alt: Latest Version
|
||||
|
||||
.. image:: https://travis-ci.org/pyca/pynacl.svg?branch=master
|
||||
:target: https://travis-ci.org/pyca/pynacl
|
||||
|
||||
.. image:: https://codecov.io/github/pyca/pynacl/coverage.svg?branch=master
|
||||
:target: https://codecov.io/github/pyca/pynacl?branch=master
|
||||
|
||||
PyNaCl is a Python binding to `libsodium`_, which is a fork of the
|
||||
`Networking and Cryptography library`_. These libraries have a stated goal of
|
||||
improving usability, security and speed. It supports Python 2.7 and 3.4+ as
|
||||
well as PyPy 2.6+.
|
||||
|
||||
.. _libsodium: https://github.com/jedisct1/libsodium
|
||||
.. _Networking and Cryptography library: https://nacl.cr.yp.to/
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Digital signatures
|
||||
* Secret-key encryption
|
||||
* Public-key encryption
|
||||
* Hashing and message authentication
|
||||
* Password based key derivation and password hashing
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
Binary wheel install
|
||||
--------------------
|
||||
|
||||
PyNaCl ships as a binary wheel on OS X, Windows and Linux ``manylinux1`` [#many]_ ,
|
||||
so all dependencies are included. Make sure you have an up-to-date pip
|
||||
and run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pip install pynacl
|
||||
|
||||
Linux source build
|
||||
------------------
|
||||
|
||||
PyNaCl relies on `libsodium`_, a portable C library. A copy is bundled
|
||||
with PyNaCl so to install you can run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ pip install pynacl
|
||||
|
||||
If you'd prefer to use the version of ``libsodium`` provided by your
|
||||
distribution, you can disable the bundled copy during install by running:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ SODIUM_INSTALL=system pip install pynacl
|
||||
|
||||
.. warning:: Usage of the legacy ``easy_install`` command provided by setuptools
|
||||
is generally discouraged, and is completely unsupported in PyNaCl's case.
|
||||
|
||||
.. _libsodium: https://github.com/jedisct1/libsodium
|
||||
|
||||
.. [#many] `manylinux1 wheels <https://www.python.org/dev/peps/pep-0513/>`_
|
||||
are built on a baseline linux environment based on Centos 5.11
|
||||
and should work on most x86 and x86_64 glibc based linux environments.
|
||||
|
||||
Changelog
|
||||
=========
|
||||
|
||||
1.3.0 2018-09-26
|
||||
----------------
|
||||
|
||||
* Added support for Python 3.7.
|
||||
* Update ``libsodium`` to 1.0.16.
|
||||
* Run and test all code examples in PyNaCl docs through sphinx's
|
||||
doctest builder.
|
||||
* Add low-level bindings for chacha20-poly1305 AEAD constructions.
|
||||
* Add low-level bindings for the chacha20-poly1305 secretstream constructions.
|
||||
* Add low-level bindings for ed25519ph pre-hashed signing construction.
|
||||
* Add low-level bindings for constant-time increment and addition
|
||||
on fixed-precision big integers represented as little-endian
|
||||
byte sequences.
|
||||
* Add low-level bindings for the ISO/IEC 7816-4 compatible padding API.
|
||||
* Add low-level bindings for libsodium's crypto_kx... key exchange
|
||||
construction.
|
||||
* Set hypothesis deadline to None in tests/test_pwhash.py to avoid
|
||||
incorrect test failures on slower processor architectures. GitHub
|
||||
issue #370
|
||||
|
||||
1.2.1 - 2017-12-04
|
||||
------------------
|
||||
|
||||
* Update hypothesis minimum allowed version.
|
||||
* Infrastructure: add proper configuration for readthedocs builder
|
||||
runtime environment.
|
||||
|
||||
1.2.0 - 2017-11-01
|
||||
------------------
|
||||
|
||||
* Update ``libsodium`` to 1.0.15.
|
||||
* Infrastructure: add jenkins support for automatic build of
|
||||
``manylinux1`` binary wheels
|
||||
* Added support for ``SealedBox`` construction.
|
||||
* Added support for ``argon2i`` and ``argon2id`` password hashing constructs
|
||||
and restructured high-level password hashing implementation to expose
|
||||
the same interface for all hashers.
|
||||
* Added support for 128 bit ``siphashx24`` variant of ``siphash24``.
|
||||
* Added support for ``from_seed`` APIs for X25519 keypair generation.
|
||||
* Dropped support for Python 3.3.
|
||||
|
||||
1.1.2 - 2017-03-31
|
||||
------------------
|
||||
|
||||
* reorder link time library search path when using bundled
|
||||
libsodium
|
||||
|
||||
1.1.1 - 2017-03-15
|
||||
------------------
|
||||
|
||||
* Fixed a circular import bug in ``nacl.utils``.
|
||||
|
||||
1.1.0 - 2017-03-14
|
||||
------------------
|
||||
|
||||
* Dropped support for Python 2.6.
|
||||
* Added ``shared_key()`` method on ``Box``.
|
||||
* You can now pass ``None`` to ``nonce`` when encrypting with ``Box`` or
|
||||
``SecretBox`` and it will automatically generate a random nonce.
|
||||
* Added support for ``siphash24``.
|
||||
* Added support for ``blake2b``.
|
||||
* Added support for ``scrypt``.
|
||||
* Update ``libsodium`` to 1.0.11.
|
||||
* Default to the bundled ``libsodium`` when compiling.
|
||||
* All raised exceptions are defined mixing-in
|
||||
``nacl.exceptions.CryptoError``
|
||||
|
||||
1.0.1 - 2016-01-24
|
||||
------------------
|
||||
|
||||
* Fix an issue with absolute paths that prevented the creation of wheels.
|
||||
|
||||
1.0 - 2016-01-23
|
||||
----------------
|
||||
|
||||
* PyNaCl has been ported to use the new APIs available in cffi 1.0+.
|
||||
Due to this change we no longer support PyPy releases older than 2.6.
|
||||
* Python 3.2 support has been dropped.
|
||||
* Functions to convert between Ed25519 and Curve25519 keys have been added.
|
||||
|
||||
0.3.0 - 2015-03-04
|
||||
------------------
|
||||
|
||||
* The low-level API (`nacl.c.*`) has been changed to match the
|
||||
upstream NaCl C/C++ conventions (as well as those of other NaCl bindings).
|
||||
The order of arguments and return values has changed significantly. To
|
||||
avoid silent failures, `nacl.c` has been removed, and replaced with
|
||||
`nacl.bindings` (with the new argument ordering). If you have code which
|
||||
calls these functions (e.g. `nacl.c.crypto_box_keypair()`), you must review
|
||||
the new docstrings and update your code/imports to match the new
|
||||
conventions.
|
||||
|
||||
|
||||
@@ -0,0 +1,65 @@
|
||||
PyNaCl-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
PyNaCl-1.3.0.dist-info/LICENSE.txt,sha256=0xdK1j5yHUydzLitQyCEiZLTFDabxGMZcgtYAskVP-k,9694
|
||||
PyNaCl-1.3.0.dist-info/METADATA,sha256=pl3lEk7jyuFK7MNHloWA3m28tYU6xHI-S4le43id_x8,6632
|
||||
PyNaCl-1.3.0.dist-info/RECORD,,
|
||||
PyNaCl-1.3.0.dist-info/WHEEL,sha256=M5Ujap42zjfAFnpJOoFU72TFHuBKh-JF0Rqu5vZhkVE,110
|
||||
PyNaCl-1.3.0.dist-info/top_level.txt,sha256=wfdEOI_G2RIzmzsMyhpqP17HUh6Jcqi99to9aHLEslo,13
|
||||
nacl/__init__.py,sha256=PS9BuXZoCwSvrDpB8HXldTHnA6lb4y00IRi3uqdW5_E,1170
|
||||
nacl/__init__.pyc,,
|
||||
nacl/_sodium.so,sha256=Y91KhpxJJwnncNk__ClJgn5ksyrtGqqtyKy_DXInjNE,2491323
|
||||
nacl/bindings/__init__.py,sha256=dNH1zFjW87qszsld5oy6xMf2S1w2v_qshQwYHp66pz4,14943
|
||||
nacl/bindings/__init__.pyc,,
|
||||
nacl/bindings/crypto_aead.py,sha256=DE5zdi09GeHZxvmrhHtxVuTqF61y1cs8trTGh_6uP8Q,17335
|
||||
nacl/bindings/crypto_aead.pyc,,
|
||||
nacl/bindings/crypto_box.py,sha256=hbHJetr9id5OvkbJwJoeqRQAhqSIGwWC2aXRAF5oPE4,9708
|
||||
nacl/bindings/crypto_box.pyc,,
|
||||
nacl/bindings/crypto_generichash.py,sha256=-e4b4DaopLBQHhEjLSjEoumy5fOs4QdTb-hou1S34C4,8010
|
||||
nacl/bindings/crypto_generichash.pyc,,
|
||||
nacl/bindings/crypto_hash.py,sha256=7Xp4mpXr4cpn-hAOU66KlYVUCVHP6deT0v_eW4UZZXo,2243
|
||||
nacl/bindings/crypto_hash.pyc,,
|
||||
nacl/bindings/crypto_kx.py,sha256=2Gjxu5c7IKAwW2MOJa9zEn1EgpIVQ0tbZQs33REZb38,6937
|
||||
nacl/bindings/crypto_kx.pyc,,
|
||||
nacl/bindings/crypto_pwhash.py,sha256=lWhEFKmXzFhKnzzxtWDwozs0CseZDkGgTJaI4YQ5rak,16898
|
||||
nacl/bindings/crypto_pwhash.pyc,,
|
||||
nacl/bindings/crypto_scalarmult.py,sha256=VA2khmlUrnR24KK0CAdDw2dQ0jiYkku9-_NA-f1p21c,1803
|
||||
nacl/bindings/crypto_scalarmult.pyc,,
|
||||
nacl/bindings/crypto_secretbox.py,sha256=luvzB3lwBwXxKm63e9nA2neGtOXeeG8R9SyWEckIqdI,2864
|
||||
nacl/bindings/crypto_secretbox.pyc,,
|
||||
nacl/bindings/crypto_secretstream.py,sha256=gdKinW10jP3CZ51hanE40s6e39rz8iuajdXTSBSKVcM,10474
|
||||
nacl/bindings/crypto_secretstream.pyc,,
|
||||
nacl/bindings/crypto_shorthash.py,sha256=eVUE8byB1RjI0AoHib5BdZSSLtSqtdIcHgPCPWf2OZM,2189
|
||||
nacl/bindings/crypto_shorthash.pyc,,
|
||||
nacl/bindings/crypto_sign.py,sha256=uA0RdHM4vsBDNhph2f7fcuI_9K8vvW-4hNHjajTIVU0,9641
|
||||
nacl/bindings/crypto_sign.pyc,,
|
||||
nacl/bindings/randombytes.py,sha256=eThts6s-9xBXOl3GNzT57fV1dZUhzPjjAmAVIUHfcrc,988
|
||||
nacl/bindings/randombytes.pyc,,
|
||||
nacl/bindings/sodium_core.py,sha256=52z0K7y6Ge6IlXcysWDVN7UdYcTOij6v0Cb0OLo8_Qc,1079
|
||||
nacl/bindings/sodium_core.pyc,,
|
||||
nacl/bindings/utils.py,sha256=jOKsDbsjxN9v_HI8DOib72chyU3byqbynXxbiV909-g,4420
|
||||
nacl/bindings/utils.pyc,,
|
||||
nacl/encoding.py,sha256=tOiyIQVVpGU6A4Lzr0tMuqomhc_Aj0V_c1t56a-ZtPw,1928
|
||||
nacl/encoding.pyc,,
|
||||
nacl/exceptions.py,sha256=SG0BNtXnzmppI9in6xMTSizh1ryfgUIvIVMQv_A0bs8,1858
|
||||
nacl/exceptions.pyc,,
|
||||
nacl/hash.py,sha256=4DKlmqpWOZJLhzTPk7_JSGXQ32lJULsS3AzJCGsibus,5928
|
||||
nacl/hash.pyc,,
|
||||
nacl/hashlib.py,sha256=gMxOu-lIlKYr3ywSCjsJRBksYgpU2dvXgaAEfQz7PEg,3909
|
||||
nacl/hashlib.pyc,,
|
||||
nacl/public.py,sha256=-nwQof5ov-wSSdvvoXh-FavTtjfpRnYykZkatNKyLd0,13442
|
||||
nacl/public.pyc,,
|
||||
nacl/pwhash/__init__.py,sha256=CN0mP6yteSYp3ui-DyWR1vjULNrXVN_gQ72CmTPoao0,2695
|
||||
nacl/pwhash/__init__.pyc,,
|
||||
nacl/pwhash/_argon2.py,sha256=Eu3-juLws3_v1gNy5aeSVPEwuRVFdGOrfeF0wPH9VHA,1878
|
||||
nacl/pwhash/_argon2.pyc,,
|
||||
nacl/pwhash/argon2i.py,sha256=EpheK0UHJvZYca_EMhhOcX5GXaOr0xCjFDTIgmSCSDo,4598
|
||||
nacl/pwhash/argon2i.pyc,,
|
||||
nacl/pwhash/argon2id.py,sha256=IqNm5RQNEd1Z9F-bEWT-_Y9noU26QoTR5YdWONg1uuI,4610
|
||||
nacl/pwhash/argon2id.pyc,,
|
||||
nacl/pwhash/scrypt.py,sha256=F9iUKbzZUMG2ZXuuk70p4KXI_nItue3VA39zmwOESE8,6025
|
||||
nacl/pwhash/scrypt.pyc,,
|
||||
nacl/secret.py,sha256=jf4WuUjnnXTekZ2elGgQozZl6zGzxGY_0Nw0fwehUlg,5430
|
||||
nacl/secret.pyc,,
|
||||
nacl/signing.py,sha256=ZwA1l31ZgOIw_sAjiUPkzEo07uYYi8SE7Ni0G_R8ksQ,7302
|
||||
nacl/signing.pyc,,
|
||||
nacl/utils.py,sha256=hhmIriBM7Bwyh3beTrqVqDDucai5gXlSliAMVrxIHPI,1691
|
||||
nacl/utils.pyc,,
|
||||
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.31.1)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp27-cp27mu-manylinux1_x86_64
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
_sodium
|
||||
nacl
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2017-2018 Ingy döt Net
|
||||
Copyright (c) 2006-2016 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -0,0 +1,35 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: PyYAML
|
||||
Version: 3.13
|
||||
Summary: YAML parser and emitter for Python
|
||||
Home-page: http://pyyaml.org/wiki/PyYAML
|
||||
Author: Kirill Simonov
|
||||
Author-email: xi@resolvent.net
|
||||
License: MIT
|
||||
Download-URL: http://pyyaml.org/download/pyyaml/PyYAML-3.13.tar.gz
|
||||
Platform: Any
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Text Processing :: Markup
|
||||
|
||||
YAML is a data serialization format designed for human readability
|
||||
and interaction with scripting languages. PyYAML is a YAML parser
|
||||
and emitter for Python.
|
||||
|
||||
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
|
||||
support, capable extension API, and sensible error messages. PyYAML
|
||||
supports standard YAML tags and provides Python-specific tags that
|
||||
allow to represent an arbitrary Python object.
|
||||
|
||||
PyYAML is applicable for a broad range of tasks from complex
|
||||
configuration files to object serialization and persistance.
|
||||
|
||||
40
.ve/lib/python2.7/site-packages/PyYAML-3.13.dist-info/RECORD
Normal file
40
.ve/lib/python2.7/site-packages/PyYAML-3.13.dist-info/RECORD
Normal file
@@ -0,0 +1,40 @@
|
||||
PyYAML-3.13.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
PyYAML-3.13.dist-info/LICENSE,sha256=v8tqffv9MODMyLs0q3Er0VaFhq2MWweMyuRDsE55dJ4,1101
|
||||
PyYAML-3.13.dist-info/METADATA,sha256=CYonKobrYoECdL2rhy8TUdOptU02BZL8BQaOGbxuudM,1424
|
||||
PyYAML-3.13.dist-info/RECORD,,
|
||||
PyYAML-3.13.dist-info/WHEEL,sha256=-Crjs1WwpTj5CCeFg4GKXWPpZsiCLs9UbQGH1WBfXpw,105
|
||||
PyYAML-3.13.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
|
||||
yaml/__init__.py,sha256=Qz7WIGATMtHvmu_vLmCcFTaiyZn5ptv2rsNGsdzlnbc,9776
|
||||
yaml/__init__.pyc,,
|
||||
yaml/composer.py,sha256=pOjZ5afqNfH22WXyS6xlQCB2PbSrFPjK-qFPOEI76fw,4921
|
||||
yaml/composer.pyc,,
|
||||
yaml/constructor.py,sha256=S_Pux76-hgmgtJeJVtSvQ9ynmtEIR2jAx2ljAochKU0,25145
|
||||
yaml/constructor.pyc,,
|
||||
yaml/cyaml.py,sha256=xK_IxkrRcetZeNwB_wzDAHYCWsumOFfsTlk3CeoM5kQ,3290
|
||||
yaml/cyaml.pyc,,
|
||||
yaml/dumper.py,sha256=ONPYNHirnLm-qCm-h9swnMWzZhncilexboIPRoNdcq4,2719
|
||||
yaml/dumper.pyc,,
|
||||
yaml/emitter.py,sha256=Xya7zhTX3ykxMAdAgDIedejmLb1Q71W2G4yt4nTSMIM,43298
|
||||
yaml/emitter.pyc,,
|
||||
yaml/error.py,sha256=7K-NdIv0qNKPKbnXxEg0L_b9K7nYDORr3rzm8_b-iBY,2559
|
||||
yaml/error.pyc,,
|
||||
yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
|
||||
yaml/events.pyc,,
|
||||
yaml/loader.py,sha256=t_WLbw1-iWQ4KT_FUppJu30cFIU-l8NCb7bjoXJoV6A,1132
|
||||
yaml/loader.pyc,,
|
||||
yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
|
||||
yaml/nodes.pyc,,
|
||||
yaml/parser.py,sha256=sgXahZA3DkySYnaC4D_zcl3l2y4Y5R40icWtdwkF_NE,25542
|
||||
yaml/parser.pyc,,
|
||||
yaml/reader.py,sha256=hKuxSbid1rSlfKBsshf5qaPwVduaCJA5t5S9Jum6CAA,6746
|
||||
yaml/reader.pyc,,
|
||||
yaml/representer.py,sha256=x3F9vDF4iiPit8sR8tgR-kjtotWTzH_Zv9moq0fMtlY,17711
|
||||
yaml/representer.pyc,,
|
||||
yaml/resolver.py,sha256=5Z3boiMikL6Qt6fS5Mt8fHym0GxbW7CMT2f2fnD1ZPQ,9122
|
||||
yaml/resolver.pyc,,
|
||||
yaml/scanner.py,sha256=ft5i4fP9m0MrpKY9N8Xa24H1LqKhwGQXLG1Hd9gCSsk,52446
|
||||
yaml/scanner.pyc,,
|
||||
yaml/serializer.py,sha256=tRsRwfu5E9fpLU7LY3vBQf2prt77hwnYlMt5dnBJLig,4171
|
||||
yaml/serializer.pyc,,
|
||||
yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
|
||||
yaml/tokens.pyc,,
|
||||
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.32.2)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp27-cp27mu-linux_x86_64
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
_yaml
|
||||
yaml
|
||||
BIN
.ve/lib/python2.7/site-packages/_cffi_backend.so
Executable file
BIN
.ve/lib/python2.7/site-packages/_cffi_backend.so
Executable file
Binary file not shown.
675
.ve/lib/python2.7/site-packages/ansible-2.7.1.dist-info/COPYING
Normal file
675
.ve/lib/python2.7/site-packages/ansible-2.7.1.dist-info/COPYING
Normal file
@@ -0,0 +1,675 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
194
.ve/lib/python2.7/site-packages/ansible-2.7.1.dist-info/METADATA
Normal file
194
.ve/lib/python2.7/site-packages/ansible-2.7.1.dist-info/METADATA
Normal file
@@ -0,0 +1,194 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: ansible
|
||||
Version: 2.7.1
|
||||
Summary: Radically simple IT automation
|
||||
Home-page: https://ansible.com/
|
||||
Author: Ansible, Inc.
|
||||
Author-email: info@ansible.com
|
||||
License: GPLv3+
|
||||
Project-URL: Source Code, https://github.com/ansible/ansible
|
||||
Project-URL: CI: Shippable, https://app.shippable.com/github/ansible/ansible
|
||||
Project-URL: Code of Conduct, https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
|
||||
Project-URL: Documentation, https://docs.ansible.com/ansible/
|
||||
Project-URL: Bug Tracker, https://github.com/ansible/ansible/issues
|
||||
Project-URL: Mailing lists, https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Console
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: Operating System :: POSIX
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Topic :: System :: Installation/Setup
|
||||
Classifier: Topic :: System :: Systems Administration
|
||||
Classifier: Topic :: Utilities
|
||||
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*
|
||||
Requires-Dist: jinja2
|
||||
Requires-Dist: PyYAML
|
||||
Requires-Dist: paramiko
|
||||
Requires-Dist: cryptography
|
||||
Requires-Dist: setuptools
|
||||
Provides-Extra: azure
|
||||
Requires-Dist: packaging; extra == 'azure'
|
||||
Requires-Dist: requests[security]; extra == 'azure'
|
||||
Requires-Dist: azure-cli-core (==2.0.35); extra == 'azure'
|
||||
Requires-Dist: azure-cli-nspkg (==3.0.2); extra == 'azure'
|
||||
Requires-Dist: azure-common (==1.1.11); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-batch (==4.1.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-compute (==2.1.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-containerinstance (==0.4.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-containerregistry (==2.0.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-containerservice (==3.0.1); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-dns (==1.2.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-keyvault (==0.40.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-marketplaceordering (==0.1.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-monitor (==0.5.2); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-network (==1.7.1); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-nspkg (==2.0.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-rdbms (==1.2.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-resource (==1.2.2); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-sql (==0.7.1); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-storage (==1.5.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-trafficmanager (==0.50.0); extra == 'azure'
|
||||
Requires-Dist: azure-mgmt-web (==0.32.0); extra == 'azure'
|
||||
Requires-Dist: azure-nspkg (==2.0.0); extra == 'azure'
|
||||
Requires-Dist: azure-storage (==0.35.1); extra == 'azure'
|
||||
Requires-Dist: msrest (==0.4.29); extra == 'azure'
|
||||
Requires-Dist: msrestazure (==0.4.31); extra == 'azure'
|
||||
Requires-Dist: azure-keyvault (==1.0.0a1); extra == 'azure'
|
||||
Requires-Dist: azure-graphrbac (==0.40.0); extra == 'azure'
|
||||
|
||||
|PyPI version| |Docs badge| |Build Status|
|
||||
|
||||
*******
|
||||
Ansible
|
||||
*******
|
||||
|
||||
Ansible is a radically simple IT automation system. It handles
|
||||
configuration-management, application deployment, cloud provisioning,
|
||||
ad-hoc task-execution, and multinode orchestration -- including
|
||||
trivializing things like zero-downtime rolling updates with load
|
||||
balancers.
|
||||
|
||||
Read the documentation and more at https://ansible.com/
|
||||
|
||||
You can find installation instructions
|
||||
`here <https://docs.ansible.com/intro_getting_started.html>`_ for a
|
||||
variety of platforms.
|
||||
|
||||
Most users should probably install a released version of Ansible from ``pip``, a package manager or
|
||||
our `release repository <https://releases.ansible.com/ansible/>`_. `Officially supported
|
||||
<https://www.ansible.com/ansible-engine>`_ builds of Ansible are also available. Some power users
|
||||
run directly from the development branch - while significant efforts are made to ensure that
|
||||
``devel`` is reasonably stable, you're more likely to encounter breaking changes when running
|
||||
Ansible this way.
|
||||
|
||||
Design Principles
|
||||
=================
|
||||
|
||||
* Have a dead simple setup process and a minimal learning curve
|
||||
* Manage machines very quickly and in parallel
|
||||
* Avoid custom-agents and additional open ports, be agentless by
|
||||
leveraging the existing SSH daemon
|
||||
* Describe infrastructure in a language that is both machine and human
|
||||
friendly
|
||||
* Focus on security and easy auditability/review/rewriting of content
|
||||
* Manage new remote machines instantly, without bootstrapping any
|
||||
software
|
||||
* Allow module development in any dynamic language, not just Python
|
||||
* Be usable as non-root
|
||||
* Be the easiest IT automation system to use, ever.
|
||||
|
||||
Get Involved
|
||||
============
|
||||
|
||||
* Read `Community
|
||||
Information <https://docs.ansible.com/community.html>`_ for all
|
||||
kinds of ways to contribute to and interact with the project,
|
||||
including mailing list information and how to submit bug reports and
|
||||
code to Ansible.
|
||||
* All code submissions are done through pull requests. Take care to
|
||||
make sure no merge commits are in the submission, and use
|
||||
``git rebase`` vs ``git merge`` for this reason. If submitting a
|
||||
large code change (other than modules), it's probably a good idea to
|
||||
join ansible-devel and talk about what you would like to do or add
|
||||
first to avoid duplicate efforts. This not only helps everyone
|
||||
know what's going on, it also helps save time and effort if we decide
|
||||
some changes are needed.
|
||||
* Users list:
|
||||
`ansible-project <https://groups.google.com/group/ansible-project>`_
|
||||
* Development list:
|
||||
`ansible-devel <https://groups.google.com/group/ansible-devel>`_
|
||||
* Announcement list:
|
||||
`ansible-announce <https://groups.google.com/group/ansible-announce>`_
|
||||
-- read only
|
||||
* irc.freenode.net: #ansible
|
||||
|
||||
Branch Info
|
||||
===========
|
||||
|
||||
* Releases are named after Led Zeppelin songs. (Releases prior to 2.0
|
||||
were named after Van Halen songs.)
|
||||
* The devel branch corresponds to the release actively under
|
||||
development.
|
||||
* Various release-X.Y branches exist for previous releases.
|
||||
* We'd love to have your contributions, read `Community
|
||||
Information <https://docs.ansible.com/community.html>`_ for notes on
|
||||
how to get started.
|
||||
|
||||
Roadmap
|
||||
=======
|
||||
|
||||
Based on team and community feedback, an initial roadmap will be published for a major or minor version (ex: 2.0, 2.1).
|
||||
Subminor versions will generally not have roadmaps published.
|
||||
|
||||
Ansible 2.1 was the first release which published this and asked for feedback in this manner.
|
||||
Feedback on the roadmap and the new process is quite welcome.
|
||||
The team is aiming for further transparency and better inclusion of both community desires and submissions.
|
||||
|
||||
These are the team's *best guess* roadmaps based on the Ansible team's experience and are also based on requests and feedback from the community.
|
||||
There are things that may not make it due to time constraints, lack of community maintainers, etc.
|
||||
Each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community.
|
||||
|
||||
There are multiple places for you to submit feedback:
|
||||
|
||||
- Add to the agenda of an IRC `Core Team Meeting <https://github.com/ansible/community/blob/master/meetings/README.md>`_ (preferred)
|
||||
- Ansible's google-group: ansible-devel
|
||||
- AnsibleFest conferences
|
||||
- IRC Freenode channel: #ansible-devel (this one may have things lost in lots of conversation)
|
||||
|
||||
For additional details consult the published `Ansible Roadmap <https://docs.ansible.com/ansible/devel/roadmap/>`_.
|
||||
|
||||
Authors
|
||||
=======
|
||||
|
||||
Ansible was created by `Michael DeHaan <https://github.com/mpdehaan>`_
|
||||
(michael.dehaan/gmail/com) and has contributions from over 3700 users
|
||||
(and growing). Thanks everyone!
|
||||
|
||||
`Ansible <https://www.ansible.com>`_ is sponsored by `Red Hat, Inc.
|
||||
<https://www.redhat.com>`_
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
GNU General Public License v3.0
|
||||
|
||||
See `COPYING <COPYING>`_ to see the full text.
|
||||
|
||||
.. |PyPI version| image:: https://img.shields.io/pypi/v/ansible.svg
|
||||
:target: https://pypi.org/project/ansible
|
||||
.. |Docs badge| image:: https://img.shields.io/badge/docs-latest-brightgreen.svg
|
||||
:target: https://docs.ansible.com/ansible
|
||||
.. |Build Status| image:: https://api.shippable.com/projects/573f79d02a8192902e20e34b/badge?branch=devel
|
||||
:target: https://app.shippable.com/projects/573f79d02a8192902e20e34b
|
||||
|
||||
|
||||
6408
.ve/lib/python2.7/site-packages/ansible-2.7.1.dist-info/RECORD
Normal file
6408
.ve/lib/python2.7/site-packages/ansible-2.7.1.dist-info/RECORD
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.32.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: cp27-none-any
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
ansible
|
||||
28
.ve/lib/python2.7/site-packages/ansible/__init__.py
Normal file
28
.ve/lib/python2.7/site-packages/ansible/__init__.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
# Note: Do not add any code to this file. The ansible module may be
|
||||
# a namespace package when using Ansible-2.1+ Anything in this file may not be
|
||||
# available if one of the other packages in the namespace is loaded first.
|
||||
#
|
||||
# This is for backwards compat. Code should be ported to get these from
|
||||
# ansible.release instead of from here.
|
||||
from ansible.release import __version__, __author__
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/__init__.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/__init__.pyc
Normal file
Binary file not shown.
820
.ve/lib/python2.7/site-packages/ansible/cli/__init__.py
Normal file
820
.ve/lib/python2.7/site-packages/ansible/cli/__init__.py
Normal file
@@ -0,0 +1,820 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import getpass
|
||||
import operator
|
||||
import optparse
|
||||
import os
|
||||
import subprocess
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import ansible
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleOptionsError, AnsibleError
|
||||
from ansible.inventory.manager import InventoryManager
|
||||
from ansible.module_utils.six import with_metaclass, string_types
|
||||
from ansible.module_utils._text import to_bytes, to_text
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.release import __version__
|
||||
from ansible.utils.path import unfrackpath
|
||||
from ansible.utils.vars import load_extra_vars, load_options_vars
|
||||
from ansible.vars.manager import VariableManager
|
||||
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class SortedOptParser(optparse.OptionParser):
|
||||
'''Optparser which sorts the options by opt before outputting --help'''
|
||||
|
||||
def format_help(self, formatter=None, epilog=None):
|
||||
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
|
||||
return optparse.OptionParser.format_help(self, formatter=None)
|
||||
|
||||
|
||||
# Note: Inherit from SortedOptParser so that we get our format_help method
|
||||
class InvalidOptsParser(SortedOptParser):
|
||||
'''Ignore invalid options.
|
||||
|
||||
Meant for the special case where we need to take care of help and version
|
||||
but may not know the full range of options yet. (See it in use in set_action)
|
||||
'''
|
||||
def __init__(self, parser):
|
||||
# Since this is special purposed to just handle help and version, we
|
||||
# take a pre-existing option parser here and set our options from
|
||||
# that. This allows us to give accurate help based on the given
|
||||
# option parser.
|
||||
SortedOptParser.__init__(self, usage=parser.usage,
|
||||
option_list=parser.option_list,
|
||||
option_class=parser.option_class,
|
||||
conflict_handler=parser.conflict_handler,
|
||||
description=parser.description,
|
||||
formatter=parser.formatter,
|
||||
add_help_option=False,
|
||||
prog=parser.prog,
|
||||
epilog=parser.epilog)
|
||||
self.version = parser.version
|
||||
|
||||
def _process_long_opt(self, rargs, values):
|
||||
try:
|
||||
optparse.OptionParser._process_long_opt(self, rargs, values)
|
||||
except optparse.BadOptionError:
|
||||
pass
|
||||
|
||||
def _process_short_opts(self, rargs, values):
|
||||
try:
|
||||
optparse.OptionParser._process_short_opts(self, rargs, values)
|
||||
except optparse.BadOptionError:
|
||||
pass
|
||||
|
||||
|
||||
class CLI(with_metaclass(ABCMeta, object)):
|
||||
''' code behind bin/ansible* programs '''
|
||||
|
||||
VALID_ACTIONS = []
|
||||
|
||||
_ITALIC = re.compile(r"I\(([^)]+)\)")
|
||||
_BOLD = re.compile(r"B\(([^)]+)\)")
|
||||
_MODULE = re.compile(r"M\(([^)]+)\)")
|
||||
_URL = re.compile(r"U\(([^)]+)\)")
|
||||
_CONST = re.compile(r"C\(([^)]+)\)")
|
||||
|
||||
PAGER = 'less'
|
||||
|
||||
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
|
||||
# -S (chop long lines) -X (disable termcap init and de-init)
|
||||
LESS_OPTS = 'FRSX'
|
||||
SKIP_INVENTORY_DEFAULTS = False
|
||||
|
||||
def __init__(self, args, callback=None):
|
||||
"""
|
||||
Base init method for all command line programs
|
||||
"""
|
||||
|
||||
self.args = args
|
||||
self.options = None
|
||||
self.parser = None
|
||||
self.action = None
|
||||
self.callback = callback
|
||||
|
||||
def set_action(self):
|
||||
"""
|
||||
Get the action the user wants to execute from the sys argv list.
|
||||
"""
|
||||
for i in range(0, len(self.args)):
|
||||
arg = self.args[i]
|
||||
if arg in self.VALID_ACTIONS:
|
||||
self.action = arg
|
||||
del self.args[i]
|
||||
break
|
||||
|
||||
if not self.action:
|
||||
# if we're asked for help or version, we don't need an action.
|
||||
# have to use a special purpose Option Parser to figure that out as
|
||||
# the standard OptionParser throws an error for unknown options and
|
||||
# without knowing action, we only know of a subset of the options
|
||||
# that could be legal for this command
|
||||
tmp_parser = InvalidOptsParser(self.parser)
|
||||
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
|
||||
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
|
||||
raise AnsibleOptionsError("Missing required action")
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
Actually runs a child defined method using the execute_<action> pattern
|
||||
"""
|
||||
fn = getattr(self, "execute_%s" % self.action)
|
||||
fn()
|
||||
|
||||
@abstractmethod
|
||||
def run(self):
|
||||
"""Run the ansible command
|
||||
|
||||
Subclasses must implement this method. It does the actual work of
|
||||
running an Ansible command.
|
||||
"""
|
||||
|
||||
display.vv(to_text(self.parser.get_version()))
|
||||
|
||||
if C.CONFIG_FILE:
|
||||
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
|
||||
else:
|
||||
display.v(u"No config file found; using defaults")
|
||||
|
||||
# warn about deprecated config options
|
||||
for deprecated in C.config.DEPRECATED:
|
||||
name = deprecated[0]
|
||||
why = deprecated[1]['why']
|
||||
if 'alternatives' in deprecated[1]:
|
||||
alt = ', use %s instead' % deprecated[1]['alternatives']
|
||||
else:
|
||||
alt = ''
|
||||
ver = deprecated[1]['version']
|
||||
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
|
||||
|
||||
@staticmethod
|
||||
def split_vault_id(vault_id):
|
||||
# return (before_@, after_@)
|
||||
# if no @, return whole string as after_
|
||||
if '@' not in vault_id:
|
||||
return (None, vault_id)
|
||||
|
||||
parts = vault_id.split('@', 1)
|
||||
ret = tuple(parts)
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def build_vault_ids(vault_ids, vault_password_files=None,
|
||||
ask_vault_pass=None, create_new_password=None,
|
||||
auto_prompt=True):
|
||||
vault_password_files = vault_password_files or []
|
||||
vault_ids = vault_ids or []
|
||||
|
||||
# convert vault_password_files into vault_ids slugs
|
||||
for password_file in vault_password_files:
|
||||
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
|
||||
|
||||
# note this makes --vault-id higher precendence than --vault-password-file
|
||||
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
|
||||
# used by --vault-id and --vault-password-file
|
||||
vault_ids.append(id_slug)
|
||||
|
||||
# if an action needs an encrypt password (create_new_password=True) and we dont
|
||||
# have other secrets setup, then automatically add a password prompt as well.
|
||||
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
|
||||
if ask_vault_pass or (not vault_ids and auto_prompt):
|
||||
|
||||
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
|
||||
vault_ids.append(id_slug)
|
||||
|
||||
return vault_ids
|
||||
|
||||
# TODO: remove the now unused args
|
||||
@staticmethod
|
||||
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
|
||||
ask_vault_pass=None, create_new_password=False,
|
||||
auto_prompt=True):
|
||||
# list of tuples
|
||||
vault_secrets = []
|
||||
|
||||
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
|
||||
# we need to show different prompts. This is for compat with older Towers that expect a
|
||||
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
|
||||
prompt_formats = {}
|
||||
|
||||
# If there are configured default vault identities, they are considered 'first'
|
||||
# so we prepend them to vault_ids (from cli) here
|
||||
|
||||
vault_password_files = vault_password_files or []
|
||||
if C.DEFAULT_VAULT_PASSWORD_FILE:
|
||||
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
|
||||
|
||||
if create_new_password:
|
||||
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
|
||||
'Confirm vew vault password (%(vault_id)s): ']
|
||||
# 2.3 format prompts for --ask-vault-pass
|
||||
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
|
||||
'Confirm New Vault password: ']
|
||||
else:
|
||||
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
|
||||
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
|
||||
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
|
||||
|
||||
vault_ids = CLI.build_vault_ids(vault_ids,
|
||||
vault_password_files,
|
||||
ask_vault_pass,
|
||||
create_new_password,
|
||||
auto_prompt=auto_prompt)
|
||||
|
||||
for vault_id_slug in vault_ids:
|
||||
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
|
||||
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
|
||||
|
||||
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
|
||||
# confusing since it will use the old format without the vault id in the prompt
|
||||
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
|
||||
|
||||
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
|
||||
# always gets the old format for Tower compatibility.
|
||||
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
|
||||
# format since Tower needs to match on that format.
|
||||
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
|
||||
vault_id=built_vault_id)
|
||||
|
||||
# a empty or invalid password from the prompt will warn and continue to the next
|
||||
# without erroring globablly
|
||||
try:
|
||||
prompted_vault_secret.load()
|
||||
except AnsibleError as exc:
|
||||
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
|
||||
raise
|
||||
|
||||
vault_secrets.append((built_vault_id, prompted_vault_secret))
|
||||
|
||||
# update loader with new secrets incrementally, so we can load a vault password
|
||||
# that is encrypted with a vault secret provided earlier
|
||||
loader.set_vault_secrets(vault_secrets)
|
||||
continue
|
||||
|
||||
# assuming anything else is a password file
|
||||
display.vvvvv('Reading vault password file: %s' % vault_id_value)
|
||||
# read vault_pass from a file
|
||||
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
|
||||
vault_id=vault_id_name,
|
||||
loader=loader)
|
||||
|
||||
# an invalid password file will error globally
|
||||
try:
|
||||
file_vault_secret.load()
|
||||
except AnsibleError as exc:
|
||||
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc))
|
||||
raise
|
||||
|
||||
if vault_id_name:
|
||||
vault_secrets.append((vault_id_name, file_vault_secret))
|
||||
else:
|
||||
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
|
||||
|
||||
# update loader with as-yet-known vault secrets
|
||||
loader.set_vault_secrets(vault_secrets)
|
||||
|
||||
return vault_secrets
|
||||
|
||||
def ask_passwords(self):
|
||||
''' prompt for connection and become passwords if needed '''
|
||||
|
||||
op = self.options
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
become_prompt = ''
|
||||
|
||||
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op.become_method.upper()
|
||||
|
||||
try:
|
||||
if op.ask_pass:
|
||||
sshpass = getpass.getpass(prompt="SSH password: ")
|
||||
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
|
||||
if sshpass:
|
||||
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
|
||||
else:
|
||||
become_prompt = "%s password: " % become_prompt_method
|
||||
|
||||
if op.become_ask_pass:
|
||||
becomepass = getpass.getpass(prompt=become_prompt)
|
||||
if op.ask_pass and becomepass == '':
|
||||
becomepass = sshpass
|
||||
if becomepass:
|
||||
becomepass = to_bytes(becomepass)
|
||||
except EOFError:
|
||||
pass
|
||||
|
||||
return (sshpass, becomepass)
|
||||
|
||||
def normalize_become_options(self):
|
||||
''' this keeps backwards compatibility with sudo/su self.options '''
|
||||
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
|
||||
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
|
||||
|
||||
def _dep(which):
|
||||
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.9')
|
||||
|
||||
if self.options.become:
|
||||
pass
|
||||
elif self.options.sudo:
|
||||
self.options.become = True
|
||||
self.options.become_method = 'sudo'
|
||||
_dep('sudo')
|
||||
elif self.options.su:
|
||||
self.options.become = True
|
||||
self.options.become_method = 'su'
|
||||
_dep('su')
|
||||
|
||||
# other deprecations:
|
||||
if self.options.ask_sudo_pass or self.options.sudo_user:
|
||||
_dep('sudo')
|
||||
if self.options.ask_su_pass or self.options.su_user:
|
||||
_dep('su')
|
||||
|
||||
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False, vault_rekey_opts=False):
|
||||
''' check for conflicting options '''
|
||||
|
||||
op = self.options
|
||||
|
||||
if vault_opts:
|
||||
# Check for vault related conflicts
|
||||
if (op.ask_vault_pass and op.vault_password_files):
|
||||
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
|
||||
|
||||
if vault_rekey_opts:
|
||||
if (op.new_vault_id and op.new_vault_password_file):
|
||||
self.parser.error("--new-vault-password-file and --new-vault-id are mutually exclusive")
|
||||
|
||||
if runas_opts:
|
||||
# Check for privilege escalation conflicts
|
||||
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
|
||||
(op.su or op.su_user) and (op.become or op.become_user) or
|
||||
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
|
||||
|
||||
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
|
||||
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
|
||||
|
||||
if fork_opts:
|
||||
if op.forks < 1:
|
||||
self.parser.error("The number of processes (--forks) must be >= 1")
|
||||
|
||||
@staticmethod
|
||||
def unfrack_paths(option, opt, value, parser):
|
||||
paths = getattr(parser.values, option.dest)
|
||||
if paths is None:
|
||||
paths = []
|
||||
|
||||
if isinstance(value, string_types):
|
||||
paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x]
|
||||
elif isinstance(value, list):
|
||||
paths[:0] = [unfrackpath(x) for x in value if x]
|
||||
else:
|
||||
pass # FIXME: should we raise options error?
|
||||
|
||||
setattr(parser.values, option.dest, paths)
|
||||
|
||||
@staticmethod
|
||||
def unfrack_path(option, opt, value, parser):
|
||||
if value != '-':
|
||||
setattr(parser.values, option.dest, unfrackpath(value))
|
||||
else:
|
||||
setattr(parser.values, option.dest, value)
|
||||
|
||||
@staticmethod
|
||||
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
|
||||
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
|
||||
runas_prompt_opts=False, desc=None, basedir_opts=False, vault_rekey_opts=False):
|
||||
''' create an options parser for most ansible scripts '''
|
||||
|
||||
# base opts
|
||||
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
|
||||
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
|
||||
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
|
||||
|
||||
if inventory_opts:
|
||||
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
|
||||
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
|
||||
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
|
||||
help='outputs a list of matching hosts; does not execute anything else')
|
||||
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
|
||||
help='further limit selected hosts to an additional pattern')
|
||||
|
||||
if module_opts:
|
||||
parser.add_option('-M', '--module-path', dest='module_path', default=None,
|
||||
help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
|
||||
action="callback", callback=CLI.unfrack_paths, type='str')
|
||||
if runtask_opts:
|
||||
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
|
||||
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
|
||||
|
||||
if fork_opts:
|
||||
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
|
||||
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
|
||||
|
||||
if vault_opts:
|
||||
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
|
||||
help='ask for vault password')
|
||||
parser.add_option('--vault-password-file', default=[], dest='vault_password_files',
|
||||
help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string')
|
||||
parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string',
|
||||
help='the vault identity to use')
|
||||
|
||||
if vault_rekey_opts:
|
||||
parser.add_option('--new-vault-password-file', default=None, dest='new_vault_password_file',
|
||||
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string')
|
||||
parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string',
|
||||
help='the new vault identity to use for rekey')
|
||||
|
||||
if subset_opts:
|
||||
parser.add_option('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
|
||||
help="only run plays and tasks tagged with these values")
|
||||
parser.add_option('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
|
||||
help="only run plays and tasks whose tags do not match these values")
|
||||
|
||||
if output_opts:
|
||||
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
|
||||
help='condense output')
|
||||
parser.add_option('-t', '--tree', dest='tree', default=None,
|
||||
help='log output to this directory')
|
||||
|
||||
if connect_opts:
|
||||
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
|
||||
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
|
||||
help='ask for connection password')
|
||||
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
|
||||
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
|
||||
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
|
||||
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
|
||||
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
|
||||
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
|
||||
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
|
||||
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
|
||||
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
|
||||
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
|
||||
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
|
||||
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
|
||||
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
|
||||
help="specify extra arguments to pass to scp only (e.g. -l)")
|
||||
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
|
||||
help="specify extra arguments to pass to ssh only (e.g. -R)")
|
||||
|
||||
parser.add_option_group(connect_group)
|
||||
|
||||
runas_group = None
|
||||
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
|
||||
if runas_opts:
|
||||
runas_group = rg
|
||||
# priv user defaults to root later on to enable detecting when this option was given here
|
||||
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
|
||||
help="run operations with sudo (nopasswd) (deprecated, use become)")
|
||||
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
|
||||
help='desired sudo user (default=root) (deprecated, use become)')
|
||||
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
|
||||
help='run operations with su (deprecated, use become)')
|
||||
runas_group.add_option('-R', '--su-user', default=None,
|
||||
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
|
||||
|
||||
# consolidated privilege escalation (become)
|
||||
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
|
||||
help="run operations with become (does not imply password prompting)")
|
||||
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
|
||||
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
|
||||
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
|
||||
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
|
||||
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
|
||||
|
||||
if runas_opts or runas_prompt_opts:
|
||||
if not runas_group:
|
||||
runas_group = rg
|
||||
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
|
||||
help='ask for sudo password (deprecated, use become)')
|
||||
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
|
||||
help='ask for su password (deprecated, use become)')
|
||||
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
|
||||
help='ask for privilege escalation password')
|
||||
|
||||
if runas_group:
|
||||
parser.add_option_group(runas_group)
|
||||
|
||||
if async_opts:
|
||||
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
|
||||
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
|
||||
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
|
||||
help='run asynchronously, failing after X seconds (default=N/A)')
|
||||
|
||||
if check_opts:
|
||||
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
|
||||
help="don't make any changes; instead, try to predict some of the changes that may occur")
|
||||
parser.add_option('--syntax-check', dest='syntax', action='store_true',
|
||||
help="perform a syntax check on the playbook, but do not execute it")
|
||||
parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
|
||||
help="when changing (small) files and templates, show the differences in those files; works great with --check")
|
||||
|
||||
if meta_opts:
|
||||
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
|
||||
help="run handlers even if a task fails")
|
||||
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
|
||||
help="clear the fact cache for every host in inventory")
|
||||
|
||||
if basedir_opts:
|
||||
parser.add_option('--playbook-dir', default=None, dest='basedir', action='store',
|
||||
help="Since this tool does not use playbooks, use this as a subsitute playbook directory."
|
||||
"This sets the relative path for many features including roles/ group_vars/ etc.")
|
||||
return parser
|
||||
|
||||
@abstractmethod
|
||||
def parse(self):
|
||||
"""Parse the command line args
|
||||
|
||||
This method parses the command line arguments. It uses the parser
|
||||
stored in the self.parser attribute and saves the args and options in
|
||||
self.args and self.options respectively.
|
||||
|
||||
Subclasses need to implement this method. They will usually create
|
||||
a base_parser, add their own options to the base_parser, and then call
|
||||
this method to do the actual parsing. An implementation will look
|
||||
something like this::
|
||||
|
||||
def parse(self):
|
||||
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
|
||||
parser.add_option('--my-option', dest='my_option', action='store')
|
||||
self.parser = parser
|
||||
super(MyCLI, self).parse()
|
||||
# If some additional transformations are needed for the
|
||||
# arguments and options, do it here.
|
||||
"""
|
||||
|
||||
self.options, self.args = self.parser.parse_args(self.args[1:])
|
||||
|
||||
# process tags
|
||||
if hasattr(self.options, 'tags') and not self.options.tags:
|
||||
# optparse defaults does not do what's expected
|
||||
self.options.tags = ['all']
|
||||
if hasattr(self.options, 'tags') and self.options.tags:
|
||||
tags = set()
|
||||
for tag_set in self.options.tags:
|
||||
for tag in tag_set.split(u','):
|
||||
tags.add(tag.strip())
|
||||
self.options.tags = list(tags)
|
||||
|
||||
# process skip_tags
|
||||
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
|
||||
skip_tags = set()
|
||||
for tag_set in self.options.skip_tags:
|
||||
for tag in tag_set.split(u','):
|
||||
skip_tags.add(tag.strip())
|
||||
self.options.skip_tags = list(skip_tags)
|
||||
|
||||
# process inventory options except for CLIs that require their own processing
|
||||
if hasattr(self.options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
|
||||
|
||||
if self.options.inventory:
|
||||
|
||||
# should always be list
|
||||
if isinstance(self.options.inventory, string_types):
|
||||
self.options.inventory = [self.options.inventory]
|
||||
|
||||
# Ensure full paths when needed
|
||||
self.options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in self.options.inventory]
|
||||
else:
|
||||
self.options.inventory = C.DEFAULT_HOST_LIST
|
||||
|
||||
@staticmethod
|
||||
def version(prog):
|
||||
''' return ansible version '''
|
||||
result = "{0} {1}".format(prog, __version__)
|
||||
gitinfo = CLI._gitinfo()
|
||||
if gitinfo:
|
||||
result = result + " {0}".format(gitinfo)
|
||||
result += "\n config file = %s" % C.CONFIG_FILE
|
||||
if C.DEFAULT_MODULE_PATH is None:
|
||||
cpath = "Default w/o overrides"
|
||||
else:
|
||||
cpath = C.DEFAULT_MODULE_PATH
|
||||
result = result + "\n configured module search path = %s" % cpath
|
||||
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
|
||||
result = result + "\n executable location = %s" % sys.argv[0]
|
||||
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def version_info(gitinfo=False):
|
||||
''' return full ansible version info '''
|
||||
if gitinfo:
|
||||
# expensive call, user with care
|
||||
ansible_version_string = CLI.version('')
|
||||
else:
|
||||
ansible_version_string = __version__
|
||||
ansible_version = ansible_version_string.split()[0]
|
||||
ansible_versions = ansible_version.split('.')
|
||||
for counter in range(len(ansible_versions)):
|
||||
if ansible_versions[counter] == "":
|
||||
ansible_versions[counter] = 0
|
||||
try:
|
||||
ansible_versions[counter] = int(ansible_versions[counter])
|
||||
except Exception:
|
||||
pass
|
||||
if len(ansible_versions) < 3:
|
||||
for counter in range(len(ansible_versions), 3):
|
||||
ansible_versions.append(0)
|
||||
return {'string': ansible_version_string.strip(),
|
||||
'full': ansible_version,
|
||||
'major': ansible_versions[0],
|
||||
'minor': ansible_versions[1],
|
||||
'revision': ansible_versions[2]}
|
||||
|
||||
@staticmethod
|
||||
def _git_repo_info(repo_path):
|
||||
''' returns a string containing git branch, commit id and commit date '''
|
||||
result = None
|
||||
if os.path.exists(repo_path):
|
||||
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
|
||||
if os.path.isfile(repo_path):
|
||||
try:
|
||||
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
|
||||
# There is a possibility the .git file to have an absolute path.
|
||||
if os.path.isabs(gitdir):
|
||||
repo_path = gitdir
|
||||
else:
|
||||
repo_path = os.path.join(repo_path[:-4], gitdir)
|
||||
except (IOError, AttributeError):
|
||||
return ''
|
||||
f = open(os.path.join(repo_path, "HEAD"))
|
||||
line = f.readline().rstrip("\n")
|
||||
if line.startswith("ref:"):
|
||||
branch_path = os.path.join(repo_path, line[5:])
|
||||
else:
|
||||
branch_path = None
|
||||
f.close()
|
||||
if branch_path and os.path.exists(branch_path):
|
||||
branch = '/'.join(line.split('/')[2:])
|
||||
f = open(branch_path)
|
||||
commit = f.readline()[:10]
|
||||
f.close()
|
||||
else:
|
||||
# detached HEAD
|
||||
commit = line[:10]
|
||||
branch = 'detached HEAD'
|
||||
branch_path = os.path.join(repo_path, "HEAD")
|
||||
|
||||
date = time.localtime(os.stat(branch_path).st_mtime)
|
||||
if time.daylight == 0:
|
||||
offset = time.timezone
|
||||
else:
|
||||
offset = time.altzone
|
||||
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
|
||||
else:
|
||||
result = ''
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _gitinfo():
|
||||
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
|
||||
repo_path = os.path.join(basedir, '.git')
|
||||
result = CLI._git_repo_info(repo_path)
|
||||
submodules = os.path.join(basedir, '.gitmodules')
|
||||
if not os.path.exists(submodules):
|
||||
return result
|
||||
f = open(submodules)
|
||||
for line in f:
|
||||
tokens = line.strip().split(' ')
|
||||
if tokens[0] == 'path':
|
||||
submodule_path = tokens[2]
|
||||
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
|
||||
if not submodule_info:
|
||||
submodule_info = ' not found - use git submodule update --init ' + submodule_path
|
||||
result += "\n {0}: {1}".format(submodule_path, submodule_info)
|
||||
f.close()
|
||||
return result
|
||||
|
||||
def pager(self, text):
|
||||
''' find reasonable way to display text '''
|
||||
# this is a much simpler form of what is in pydoc.py
|
||||
if not sys.stdout.isatty():
|
||||
display.display(text, screen_only=True)
|
||||
elif 'PAGER' in os.environ:
|
||||
if sys.platform == 'win32':
|
||||
display.display(text, screen_only=True)
|
||||
else:
|
||||
self.pager_pipe(text, os.environ['PAGER'])
|
||||
else:
|
||||
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
if p.returncode == 0:
|
||||
self.pager_pipe(text, 'less')
|
||||
else:
|
||||
display.display(text, screen_only=True)
|
||||
|
||||
@staticmethod
|
||||
def pager_pipe(text, cmd):
|
||||
''' pipe text through a pager '''
|
||||
if 'LESS' not in os.environ:
|
||||
os.environ['LESS'] = CLI.LESS_OPTS
|
||||
try:
|
||||
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
|
||||
cmd.communicate(input=to_bytes(text))
|
||||
except IOError:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def tty_ify(cls, text):
|
||||
|
||||
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
|
||||
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
|
||||
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
|
||||
t = cls._URL.sub(r"\1", t) # U(word) => word
|
||||
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
|
||||
|
||||
return t
|
||||
|
||||
@staticmethod
|
||||
def _play_prereqs(options):
|
||||
|
||||
# all needs loader
|
||||
loader = DataLoader()
|
||||
|
||||
basedir = getattr(options, 'basedir', False)
|
||||
if basedir:
|
||||
loader.set_basedir(basedir)
|
||||
|
||||
vault_ids = options.vault_ids
|
||||
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
|
||||
vault_ids = default_vault_ids + vault_ids
|
||||
|
||||
vault_secrets = CLI.setup_vault_secrets(loader,
|
||||
vault_ids=vault_ids,
|
||||
vault_password_files=options.vault_password_files,
|
||||
ask_vault_pass=options.ask_vault_pass,
|
||||
auto_prompt=False)
|
||||
loader.set_vault_secrets(vault_secrets)
|
||||
|
||||
# create the inventory, and filter it based on the subset specified (if any)
|
||||
inventory = InventoryManager(loader=loader, sources=options.inventory)
|
||||
|
||||
# create the variable manager, which will be shared throughout
|
||||
# the code, ensuring a consistent view of global variables
|
||||
variable_manager = VariableManager(loader=loader, inventory=inventory)
|
||||
|
||||
if hasattr(options, 'basedir'):
|
||||
if options.basedir:
|
||||
variable_manager.safe_basedir = True
|
||||
else:
|
||||
variable_manager.safe_basedir = True
|
||||
|
||||
# load vars from cli options
|
||||
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
|
||||
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
|
||||
|
||||
return loader, inventory, variable_manager
|
||||
|
||||
@staticmethod
|
||||
def get_host_list(inventory, subset, pattern='all'):
|
||||
|
||||
no_hosts = False
|
||||
if len(inventory.list_hosts()) == 0:
|
||||
# Empty inventory
|
||||
if C.LOCALHOST_WARNING:
|
||||
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
|
||||
no_hosts = True
|
||||
|
||||
inventory.subset(subset)
|
||||
|
||||
hosts = inventory.list_hosts(pattern)
|
||||
if len(hosts) == 0 and no_hosts is False:
|
||||
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
|
||||
|
||||
return hosts
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/__init__.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/__init__.pyc
Normal file
Binary file not shown.
188
.ve/lib/python2.7/site-packages/ansible/cli/adhoc.py
Normal file
188
.ve/lib/python2.7/site-packages/ansible/cli/adhoc.py
Normal file
@@ -0,0 +1,188 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
########################################################
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.parsing.splitter import parse_kv
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.plugins.loader import get_all_plugin_loaders
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
########################################################
|
||||
|
||||
class AdHocCLI(CLI):
|
||||
''' is an extra-simple tool/framework/API for doing 'remote things'.
|
||||
this command allows you to define and run a single task 'playbook' against a set of hosts
|
||||
'''
|
||||
|
||||
def parse(self):
|
||||
''' create an options parser for bin/ansible '''
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage='%prog <host-pattern> [options]',
|
||||
runas_opts=True,
|
||||
inventory_opts=True,
|
||||
async_opts=True,
|
||||
output_opts=True,
|
||||
connect_opts=True,
|
||||
check_opts=True,
|
||||
runtask_opts=True,
|
||||
vault_opts=True,
|
||||
fork_opts=True,
|
||||
module_opts=True,
|
||||
basedir_opts=True,
|
||||
desc="Define and run a single task 'playbook' against a set of hosts",
|
||||
epilog="Some modules do not make sense in Ad-Hoc (include, meta, etc)",
|
||||
)
|
||||
|
||||
# options unique to ansible ad-hoc
|
||||
self.parser.add_option('-a', '--args', dest='module_args',
|
||||
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
|
||||
self.parser.add_option('-m', '--module-name', dest='module_name',
|
||||
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
|
||||
default=C.DEFAULT_MODULE_NAME)
|
||||
|
||||
super(AdHocCLI, self).parse()
|
||||
|
||||
if len(self.args) < 1:
|
||||
raise AnsibleOptionsError("Missing target hosts")
|
||||
elif len(self.args) > 1:
|
||||
raise AnsibleOptionsError("Extraneous options or arguments")
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
|
||||
|
||||
def _play_ds(self, pattern, async_val, poll):
|
||||
check_raw = self.options.module_name in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw')
|
||||
return dict(
|
||||
name="Ansible Ad-Hoc",
|
||||
hosts=pattern,
|
||||
gather_facts='no',
|
||||
tasks=[dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args, check_raw=check_raw)), async_val=async_val,
|
||||
poll=poll)]
|
||||
)
|
||||
|
||||
def run(self):
|
||||
''' create and execute the single task playbook '''
|
||||
|
||||
super(AdHocCLI, self).run()
|
||||
|
||||
# only thing left should be host pattern
|
||||
pattern = to_text(self.args[0], errors='surrogate_or_strict')
|
||||
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
|
||||
self.normalize_become_options()
|
||||
(sshpass, becomepass) = self.ask_passwords()
|
||||
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
|
||||
|
||||
# dynamically load any plugins
|
||||
get_all_plugin_loaders()
|
||||
|
||||
loader, inventory, variable_manager = self._play_prereqs(self.options)
|
||||
|
||||
try:
|
||||
hosts = CLI.get_host_list(inventory, self.options.subset, pattern)
|
||||
except AnsibleError:
|
||||
if self.options.subset:
|
||||
raise
|
||||
else:
|
||||
hosts = []
|
||||
display.warning("No hosts matched, nothing to do")
|
||||
|
||||
if self.options.listhosts:
|
||||
display.display(' hosts (%d):' % len(hosts))
|
||||
for host in hosts:
|
||||
display.display(' %s' % host)
|
||||
return 0
|
||||
|
||||
if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args:
|
||||
err = "No argument passed to %s module" % self.options.module_name
|
||||
if pattern.endswith(".yml"):
|
||||
err = err + ' (did you mean to run ansible-playbook?)'
|
||||
raise AnsibleOptionsError(err)
|
||||
|
||||
# Avoid modules that don't work with ad-hoc
|
||||
if self.options.module_name in ('import_playbook',):
|
||||
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands" % self.options.module_name)
|
||||
|
||||
play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval)
|
||||
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
# used in start callback
|
||||
playbook = Playbook(loader)
|
||||
playbook._entries.append(play)
|
||||
playbook._file_name = '__adhoc_playbook__'
|
||||
|
||||
if self.callback:
|
||||
cb = self.callback
|
||||
elif self.options.one_line:
|
||||
cb = 'oneline'
|
||||
# Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
|
||||
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
|
||||
cb = C.DEFAULT_STDOUT_CALLBACK
|
||||
else:
|
||||
cb = 'minimal'
|
||||
|
||||
run_tree = False
|
||||
if self.options.tree:
|
||||
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
|
||||
C.TREE_DIR = self.options.tree
|
||||
run_tree = True
|
||||
|
||||
# now create a task queue manager to execute the play
|
||||
self._tqm = None
|
||||
try:
|
||||
self._tqm = TaskQueueManager(
|
||||
inventory=inventory,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
options=self.options,
|
||||
passwords=passwords,
|
||||
stdout_callback=cb,
|
||||
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
|
||||
run_tree=run_tree,
|
||||
)
|
||||
|
||||
self._tqm.send_callback('v2_playbook_on_start', playbook)
|
||||
|
||||
result = self._tqm.run(play)
|
||||
|
||||
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
|
||||
finally:
|
||||
if self._tqm:
|
||||
self._tqm.cleanup()
|
||||
if loader:
|
||||
loader.cleanup_all_tmp_files()
|
||||
|
||||
return result
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/adhoc.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/adhoc.pyc
Normal file
Binary file not shown.
174
.ve/lib/python2.7/site-packages/ansible/cli/config.py
Normal file
174
.ve/lib/python2.7/site-packages/ansible/cli/config.py
Normal file
@@ -0,0 +1,174 @@
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from ansible.cli import CLI
|
||||
from ansible.config.manager import ConfigManager, Setting, find_ini_config_file
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
from ansible.utils.color import stringc
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class ConfigCLI(CLI):
|
||||
""" Config command line class """
|
||||
|
||||
VALID_ACTIONS = ("view", "dump", "list") # TODO: edit, update, search
|
||||
|
||||
def __init__(self, args, callback=None):
|
||||
|
||||
self.config_file = None
|
||||
self.config = None
|
||||
super(ConfigCLI, self).__init__(args, callback)
|
||||
|
||||
def parse(self):
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage="usage: %%prog [%s] [--help] [options] [ansible.cfg]" % "|".join(self.VALID_ACTIONS),
|
||||
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
|
||||
desc="View, edit, and manage ansible configuration.",
|
||||
)
|
||||
self.parser.add_option('-c', '--config', dest='config_file', help="path to configuration file, defaults to first file found in precedence.")
|
||||
|
||||
self.set_action()
|
||||
|
||||
# options specific to self.actions
|
||||
if self.action == "list":
|
||||
self.parser.set_usage("usage: %prog list [options] ")
|
||||
if self.action == "dump":
|
||||
self.parser.add_option('--only-changed', dest='only_changed', action='store_true',
|
||||
help="Only show configurations that have changed from the default")
|
||||
elif self.action == "update":
|
||||
self.parser.add_option('-s', '--setting', dest='setting', help="config setting, the section defaults to 'defaults'")
|
||||
self.parser.set_usage("usage: %prog update [options] [-c ansible.cfg] -s '[section.]setting=value'")
|
||||
elif self.action == "search":
|
||||
self.parser.set_usage("usage: %prog update [options] [-c ansible.cfg] <search term>")
|
||||
|
||||
self.options, self.args = self.parser.parse_args()
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
def run(self):
|
||||
|
||||
super(ConfigCLI, self).run()
|
||||
|
||||
if self.options.config_file:
|
||||
self.config_file = unfrackpath(self.options.config_file, follow=False)
|
||||
self.config = ConfigManager(self.config_file)
|
||||
else:
|
||||
self.config = ConfigManager()
|
||||
self.config_file = find_ini_config_file()
|
||||
|
||||
if self.config_file:
|
||||
try:
|
||||
if not os.path.exists(self.config_file):
|
||||
raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
|
||||
elif not os.path.isfile(self.config_file):
|
||||
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
|
||||
|
||||
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
|
||||
except:
|
||||
if self.action in ['view']:
|
||||
raise
|
||||
elif self.action in ['edit', 'update']:
|
||||
display.warning("File does not exist, used empty file: %s" % self.config_file)
|
||||
|
||||
elif self.action == 'view':
|
||||
raise AnsibleError('Invalid or no config file was supplied')
|
||||
|
||||
self.execute()
|
||||
|
||||
def execute_update(self):
|
||||
'''
|
||||
Updates a single setting in the specified ansible.cfg
|
||||
'''
|
||||
raise AnsibleError("Option not implemented yet")
|
||||
|
||||
# pylint: disable=unreachable
|
||||
if self.options.setting is None:
|
||||
raise AnsibleOptionsError("update option requries a setting to update")
|
||||
|
||||
(entry, value) = self.options.setting.split('=')
|
||||
if '.' in entry:
|
||||
(section, option) = entry.split('.')
|
||||
else:
|
||||
section = 'defaults'
|
||||
option = entry
|
||||
subprocess.call([
|
||||
'ansible',
|
||||
'-m', 'ini_file',
|
||||
'localhost',
|
||||
'-c', 'local',
|
||||
'-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
|
||||
])
|
||||
|
||||
def execute_view(self):
|
||||
'''
|
||||
Displays the current config file
|
||||
'''
|
||||
try:
|
||||
with open(self.config_file, 'rb') as f:
|
||||
self.pager(to_text(f.read(), errors='surrogate_or_strict'))
|
||||
except Exception as e:
|
||||
raise AnsibleError("Failed to open config file: %s" % to_native(e))
|
||||
|
||||
def execute_edit(self):
|
||||
'''
|
||||
Opens ansible.cfg in the default EDITOR
|
||||
'''
|
||||
raise AnsibleError("Option not implemented yet")
|
||||
|
||||
# pylint: disable=unreachable
|
||||
try:
|
||||
editor = shlex.split(os.environ.get('EDITOR', 'vi'))
|
||||
editor.append(self.config_file)
|
||||
subprocess.call(editor)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Failed to open editor: %s" % to_native(e))
|
||||
|
||||
def execute_list(self):
|
||||
'''
|
||||
list all current configs reading lib/constants.py and shows env and config file setting names
|
||||
'''
|
||||
self.pager(to_text(yaml.dump(self.config.get_configuration_definitions(), Dumper=AnsibleDumper), errors='surrogate_or_strict'))
|
||||
|
||||
def execute_dump(self):
|
||||
'''
|
||||
Shows the current settings, merges ansible.cfg if specified
|
||||
'''
|
||||
# FIXME: deal with plugins, not just base config
|
||||
text = []
|
||||
defaults = self.config.get_configuration_definitions().copy()
|
||||
for setting in self.config.data.get_settings():
|
||||
if setting.name in defaults:
|
||||
defaults[setting.name] = setting
|
||||
|
||||
for setting in sorted(defaults):
|
||||
if isinstance(defaults[setting], Setting):
|
||||
if defaults[setting].origin == 'default':
|
||||
color = 'green'
|
||||
else:
|
||||
color = 'yellow'
|
||||
msg = "%s(%s) = %s" % (setting, defaults[setting].origin, defaults[setting].value)
|
||||
else:
|
||||
color = 'green'
|
||||
msg = "%s(%s) = %s" % (setting, 'default', defaults[setting].get('default'))
|
||||
if not self.options.only_changed or color == 'yellow':
|
||||
text.append(stringc(msg, color))
|
||||
|
||||
self.pager(to_text('\n'.join(text), errors='surrogate_or_strict'))
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/config.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/config.pyc
Normal file
Binary file not shown.
442
.ve/lib/python2.7/site-packages/ansible/cli/console.py
Normal file
442
.ve/lib/python2.7/site-packages/ansible/cli/console.py
Normal file
@@ -0,0 +1,442 @@
|
||||
# (c) 2014, Nandor Sivok <dominis@haxor.hu>
|
||||
# (c) 2016, Redhat Inc
|
||||
#
|
||||
# ansible-console is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# ansible-console is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
########################################################
|
||||
# ansible-console is an interactive REPL shell for ansible
|
||||
# with built-in tab completion for all the documented modules
|
||||
#
|
||||
# Available commands:
|
||||
# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*)
|
||||
# list - list available hosts in the current path
|
||||
# forks - change fork
|
||||
# become - become
|
||||
# ! - forces shell module instead of the ansible module (!yum update -y)
|
||||
|
||||
import atexit
|
||||
import cmd
|
||||
import getpass
|
||||
import readline
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.parsing.splitter import parse_kv
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.plugins.loader import module_loader, fragment_loader
|
||||
from ansible.utils import plugin_docs
|
||||
from ansible.utils.color import stringc
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class ConsoleCLI(CLI, cmd.Cmd):
|
||||
''' a REPL that allows for running ad-hoc tasks against a chosen inventory (based on dominis' ansible-shell).'''
|
||||
|
||||
modules = []
|
||||
ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '
|
||||
'selecting hosts in inventory or any combination of the two separated by commas.'}
|
||||
|
||||
# use specific to console, but fallback to highlight for backwards compatibility
|
||||
NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
super(ConsoleCLI, self).__init__(args)
|
||||
|
||||
self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n'
|
||||
|
||||
self.groups = []
|
||||
self.hosts = []
|
||||
self.pattern = None
|
||||
self.variable_manager = None
|
||||
self.loader = None
|
||||
self.passwords = dict()
|
||||
|
||||
self.modules = None
|
||||
cmd.Cmd.__init__(self)
|
||||
|
||||
def parse(self):
|
||||
self.parser = CLI.base_parser(
|
||||
usage='%prog [<host-pattern>] [options]',
|
||||
runas_opts=True,
|
||||
inventory_opts=True,
|
||||
connect_opts=True,
|
||||
check_opts=True,
|
||||
vault_opts=True,
|
||||
fork_opts=True,
|
||||
module_opts=True,
|
||||
basedir_opts=True,
|
||||
desc="REPL console for executing Ansible tasks.",
|
||||
epilog="This is not a live session/connection, each task executes in the background and returns it's results."
|
||||
)
|
||||
|
||||
# options unique to shell
|
||||
self.parser.add_option('--step', dest='step', action='store_true',
|
||||
help="one-step-at-a-time: confirm each task before running")
|
||||
|
||||
self.parser.set_defaults(cwd='*')
|
||||
|
||||
super(ConsoleCLI, self).parse()
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
|
||||
|
||||
def get_names(self):
|
||||
return dir(self)
|
||||
|
||||
def cmdloop(self):
|
||||
try:
|
||||
cmd.Cmd.cmdloop(self)
|
||||
except KeyboardInterrupt:
|
||||
self.do_exit(self)
|
||||
|
||||
def set_prompt(self):
|
||||
login_user = self.options.remote_user or getpass.getuser()
|
||||
self.selected = self.inventory.list_hosts(self.options.cwd)
|
||||
prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks)
|
||||
if self.options.become and self.options.become_user in [None, 'root']:
|
||||
prompt += "# "
|
||||
color = C.COLOR_ERROR
|
||||
else:
|
||||
prompt += "$ "
|
||||
color = self.NORMAL_PROMPT
|
||||
self.prompt = stringc(prompt, color)
|
||||
|
||||
def list_modules(self):
|
||||
modules = set()
|
||||
if self.options.module_path:
|
||||
for path in self.options.module_path:
|
||||
if path:
|
||||
module_loader.add_directory(path)
|
||||
|
||||
module_paths = module_loader._get_paths()
|
||||
for path in module_paths:
|
||||
if path is not None:
|
||||
modules.update(self._find_modules_in_path(path))
|
||||
return modules
|
||||
|
||||
def _find_modules_in_path(self, path):
|
||||
|
||||
if os.path.isdir(path):
|
||||
for module in os.listdir(path):
|
||||
if module.startswith('.'):
|
||||
continue
|
||||
elif os.path.isdir(module):
|
||||
self._find_modules_in_path(module)
|
||||
elif module.startswith('__'):
|
||||
continue
|
||||
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
|
||||
continue
|
||||
elif module in C.IGNORE_FILES:
|
||||
continue
|
||||
elif module.startswith('_'):
|
||||
fullpath = '/'.join([path, module])
|
||||
if os.path.islink(fullpath): # avoids aliases
|
||||
continue
|
||||
module = module.replace('_', '', 1)
|
||||
|
||||
module = os.path.splitext(module)[0] # removes the extension
|
||||
yield module
|
||||
|
||||
def default(self, arg, forceshell=False):
|
||||
""" actually runs modules """
|
||||
if arg.startswith("#"):
|
||||
return False
|
||||
|
||||
if not self.options.cwd:
|
||||
display.error("No host found")
|
||||
return False
|
||||
|
||||
if arg.split()[0] in self.modules:
|
||||
module = arg.split()[0]
|
||||
module_args = ' '.join(arg.split()[1:])
|
||||
else:
|
||||
module = 'shell'
|
||||
module_args = arg
|
||||
|
||||
if forceshell is True:
|
||||
module = 'shell'
|
||||
module_args = arg
|
||||
|
||||
self.options.module_name = module
|
||||
|
||||
result = None
|
||||
try:
|
||||
check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw')
|
||||
play_ds = dict(
|
||||
name="Ansible Shell",
|
||||
hosts=self.options.cwd,
|
||||
gather_facts='no',
|
||||
tasks=[dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))]
|
||||
)
|
||||
play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
|
||||
except Exception as e:
|
||||
display.error(u"Unable to build command: %s" % to_text(e))
|
||||
return False
|
||||
|
||||
try:
|
||||
cb = 'minimal' # FIXME: make callbacks configurable
|
||||
# now create a task queue manager to execute the play
|
||||
self._tqm = None
|
||||
try:
|
||||
self._tqm = TaskQueueManager(
|
||||
inventory=self.inventory,
|
||||
variable_manager=self.variable_manager,
|
||||
loader=self.loader,
|
||||
options=self.options,
|
||||
passwords=self.passwords,
|
||||
stdout_callback=cb,
|
||||
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
|
||||
run_tree=False,
|
||||
)
|
||||
|
||||
result = self._tqm.run(play)
|
||||
finally:
|
||||
if self._tqm:
|
||||
self._tqm.cleanup()
|
||||
if self.loader:
|
||||
self.loader.cleanup_all_tmp_files()
|
||||
|
||||
if result is None:
|
||||
display.error("No hosts found")
|
||||
return False
|
||||
except KeyboardInterrupt:
|
||||
display.error('User interrupted execution')
|
||||
return False
|
||||
except Exception as e:
|
||||
display.error(to_text(e))
|
||||
# FIXME: add traceback in very very verbose mode
|
||||
return False
|
||||
|
||||
def emptyline(self):
|
||||
return
|
||||
|
||||
def do_shell(self, arg):
|
||||
"""
|
||||
You can run shell commands through the shell module.
|
||||
|
||||
eg.:
|
||||
shell ps uax | grep java | wc -l
|
||||
shell killall python
|
||||
shell halt -n
|
||||
|
||||
You can use the ! to force the shell module. eg.:
|
||||
!ps aux | grep java | wc -l
|
||||
"""
|
||||
self.default(arg, True)
|
||||
|
||||
def do_forks(self, arg):
|
||||
"""Set the number of forks"""
|
||||
if not arg:
|
||||
display.display('Usage: forks <number>')
|
||||
return
|
||||
self.options.forks = int(arg)
|
||||
self.set_prompt()
|
||||
|
||||
do_serial = do_forks
|
||||
|
||||
def do_verbosity(self, arg):
|
||||
"""Set verbosity level"""
|
||||
if not arg:
|
||||
display.display('Usage: verbosity <number>')
|
||||
else:
|
||||
display.verbosity = int(arg)
|
||||
display.v('verbosity level set to %s' % arg)
|
||||
|
||||
def do_cd(self, arg):
|
||||
"""
|
||||
Change active host/group. You can use hosts patterns as well eg.:
|
||||
cd webservers
|
||||
cd webservers:dbservers
|
||||
cd webservers:!phoenix
|
||||
cd webservers:&staging
|
||||
cd webservers:dbservers:&staging:!phoenix
|
||||
"""
|
||||
if not arg:
|
||||
self.options.cwd = '*'
|
||||
elif arg in '/*':
|
||||
self.options.cwd = 'all'
|
||||
elif self.inventory.get_hosts(arg):
|
||||
self.options.cwd = arg
|
||||
else:
|
||||
display.display("no host matched")
|
||||
|
||||
self.set_prompt()
|
||||
|
||||
def do_list(self, arg):
|
||||
"""List the hosts in the current group"""
|
||||
if arg == 'groups':
|
||||
for group in self.groups:
|
||||
display.display(group)
|
||||
else:
|
||||
for host in self.selected:
|
||||
display.display(host.name)
|
||||
|
||||
def do_become(self, arg):
|
||||
"""Toggle whether plays run with become"""
|
||||
if arg:
|
||||
self.options.become = boolean(arg, strict=False)
|
||||
display.v("become changed to %s" % self.options.become)
|
||||
self.set_prompt()
|
||||
else:
|
||||
display.display("Please specify become value, e.g. `become yes`")
|
||||
|
||||
def do_remote_user(self, arg):
|
||||
"""Given a username, set the remote user plays are run by"""
|
||||
if arg:
|
||||
self.options.remote_user = arg
|
||||
self.set_prompt()
|
||||
else:
|
||||
display.display("Please specify a remote user, e.g. `remote_user root`")
|
||||
|
||||
def do_become_user(self, arg):
|
||||
"""Given a username, set the user that plays are run by when using become"""
|
||||
if arg:
|
||||
self.options.become_user = arg
|
||||
else:
|
||||
display.display("Please specify a user, e.g. `become_user jenkins`")
|
||||
display.v("Current user is %s" % self.options.become_user)
|
||||
self.set_prompt()
|
||||
|
||||
def do_become_method(self, arg):
|
||||
"""Given a become_method, set the privilege escalation method when using become"""
|
||||
if arg:
|
||||
self.options.become_method = arg
|
||||
display.v("become_method changed to %s" % self.options.become_method)
|
||||
else:
|
||||
display.display("Please specify a become_method, e.g. `become_method su`")
|
||||
|
||||
def do_check(self, arg):
|
||||
"""Toggle whether plays run with check mode"""
|
||||
if arg:
|
||||
self.options.check = boolean(arg, strict=False)
|
||||
display.v("check mode changed to %s" % self.options.check)
|
||||
else:
|
||||
display.display("Please specify check mode value, e.g. `check yes`")
|
||||
|
||||
def do_diff(self, arg):
|
||||
"""Toggle whether plays run with diff"""
|
||||
if arg:
|
||||
self.options.diff = boolean(arg, strict=False)
|
||||
display.v("diff mode changed to %s" % self.options.diff)
|
||||
else:
|
||||
display.display("Please specify a diff value , e.g. `diff yes`")
|
||||
|
||||
def do_exit(self, args):
|
||||
"""Exits from the console"""
|
||||
sys.stdout.write('\n')
|
||||
return -1
|
||||
|
||||
do_EOF = do_exit
|
||||
|
||||
def helpdefault(self, module_name):
|
||||
if module_name in self.modules:
|
||||
in_path = module_loader.find_plugin(module_name)
|
||||
if in_path:
|
||||
oc, a, _, _ = plugin_docs.get_docstring(in_path, fragment_loader)
|
||||
if oc:
|
||||
display.display(oc['short_description'])
|
||||
display.display('Parameters:')
|
||||
for opt in oc['options'].keys():
|
||||
display.display(' ' + stringc(opt, self.NORMAL_PROMPT) + ' ' + oc['options'][opt]['description'][0])
|
||||
else:
|
||||
display.error('No documentation found for %s.' % module_name)
|
||||
else:
|
||||
display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
|
||||
|
||||
def complete_cd(self, text, line, begidx, endidx):
|
||||
mline = line.partition(' ')[2]
|
||||
offs = len(mline) - len(text)
|
||||
|
||||
if self.options.cwd in ('all', '*', '\\'):
|
||||
completions = self.hosts + self.groups
|
||||
else:
|
||||
completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)]
|
||||
|
||||
return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]
|
||||
|
||||
def completedefault(self, text, line, begidx, endidx):
|
||||
if line.split()[0] in self.modules:
|
||||
mline = line.split(' ')[-1]
|
||||
offs = len(mline) - len(text)
|
||||
completions = self.module_args(line.split()[0])
|
||||
|
||||
return [s[offs:] + '=' for s in completions if s.startswith(mline)]
|
||||
|
||||
def module_args(self, module_name):
|
||||
in_path = module_loader.find_plugin(module_name)
|
||||
oc, a, _, _ = plugin_docs.get_docstring(in_path, fragment_loader)
|
||||
return list(oc['options'].keys())
|
||||
|
||||
def run(self):
|
||||
|
||||
super(ConsoleCLI, self).run()
|
||||
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
|
||||
# hosts
|
||||
if len(self.args) != 1:
|
||||
self.pattern = 'all'
|
||||
else:
|
||||
self.pattern = self.args[0]
|
||||
self.options.cwd = self.pattern
|
||||
|
||||
# dynamically add modules as commands
|
||||
self.modules = self.list_modules()
|
||||
for module in self.modules:
|
||||
setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
|
||||
setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
|
||||
|
||||
self.normalize_become_options()
|
||||
(sshpass, becomepass) = self.ask_passwords()
|
||||
self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
|
||||
|
||||
self.loader, self.inventory, self.variable_manager = self._play_prereqs(self.options)
|
||||
|
||||
hosts = CLI.get_host_list(self.inventory, self.options.subset, self.pattern)
|
||||
|
||||
self.groups = self.inventory.list_groups()
|
||||
self.hosts = [x.name for x in hosts]
|
||||
|
||||
# This hack is to work around readline issues on a mac:
|
||||
# http://stackoverflow.com/a/7116997/541202
|
||||
if 'libedit' in readline.__doc__:
|
||||
readline.parse_and_bind("bind ^I rl_complete")
|
||||
else:
|
||||
readline.parse_and_bind("tab: complete")
|
||||
|
||||
histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
|
||||
try:
|
||||
readline.read_history_file(histfile)
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
atexit.register(readline.write_history_file, histfile)
|
||||
self.set_prompt()
|
||||
self.cmdloop()
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/console.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/console.pyc
Normal file
Binary file not shown.
626
.ve/lib/python2.7/site-packages/ansible/cli/doc.py
Normal file
626
.ve/lib/python2.7/site-packages/ansible/cli/doc.py
Normal file
@@ -0,0 +1,626 @@
|
||||
# (c) 2014, James Tanner <tanner.jc@gmail.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import textwrap
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
from collections import Sequence
|
||||
import ansible.plugins.loader as plugin_loader
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.parsing.metadata import extract_metadata
|
||||
from ansible.parsing.plugin_docs import read_docstub
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
from ansible.plugins.loader import action_loader, fragment_loader
|
||||
from ansible.utils.plugin_docs import BLACKLIST, get_docstring
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class DocCLI(CLI):
|
||||
''' displays information on modules installed in Ansible libraries.
|
||||
It displays a terse listing of plugins and their short descriptions,
|
||||
provides a printout of their DOCUMENTATION strings,
|
||||
and it can create a short "snippet" which can be pasted into a playbook. '''
|
||||
|
||||
# default ignore list for detailed views
|
||||
IGNORE = ('module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs')
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
super(DocCLI, self).__init__(args)
|
||||
self.plugin_list = set()
|
||||
|
||||
def parse(self):
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage='usage: %prog [-l|-F|-s] [options] [-t <plugin type> ] [plugin]',
|
||||
module_opts=True,
|
||||
desc="plugin documentation tool",
|
||||
epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
|
||||
)
|
||||
|
||||
self.parser.add_option("-F", "--list_files", action="store_true", default=False, dest="list_files",
|
||||
help='Show plugin names and their source files without summaries (implies --list)')
|
||||
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
|
||||
help='List available plugins')
|
||||
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
|
||||
help='Show playbook snippet for specified plugin(s)')
|
||||
self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_plugins',
|
||||
help='**For internal testing only** Show documentation for all plugins.')
|
||||
self.parser.add_option("-j", "--json", action="store_true", default=False, dest='json_dump',
|
||||
help='**For internal testing only** Dump json metadata for all plugins.')
|
||||
self.parser.add_option("-t", "--type", action="store", default='module', dest='type', type='choice',
|
||||
help='Choose which plugin type (defaults to "module")',
|
||||
choices=C.DOCUMENTABLE_PLUGINS)
|
||||
super(DocCLI, self).parse()
|
||||
|
||||
if [self.options.all_plugins, self.options.json_dump, self.options.list_dir, self.options.list_files, self.options.show_snippet].count(True) > 1:
|
||||
raise AnsibleOptionsError("Only one of -l, -F, -s, -j or -a can be used at the same time.")
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
def run(self):
|
||||
|
||||
super(DocCLI, self).run()
|
||||
|
||||
plugin_type = self.options.type
|
||||
|
||||
if plugin_type in C.DOCUMENTABLE_PLUGINS:
|
||||
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
|
||||
else:
|
||||
raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
|
||||
|
||||
# add to plugin path from command line
|
||||
if self.options.module_path:
|
||||
for path in self.options.module_path:
|
||||
if path:
|
||||
loader.add_directory(path)
|
||||
|
||||
# save only top level paths for errors
|
||||
search_paths = DocCLI.print_paths(loader)
|
||||
loader._paths = None # reset so we can use subdirs below
|
||||
|
||||
# list plugins names and filepath for type
|
||||
if self.options.list_files:
|
||||
paths = loader._get_paths()
|
||||
for path in paths:
|
||||
self.plugin_list.update(self.find_plugins(path, plugin_type))
|
||||
|
||||
list_text = self.get_plugin_list_filenames(loader)
|
||||
self.pager(list_text)
|
||||
return 0
|
||||
|
||||
# list plugins for type
|
||||
if self.options.list_dir:
|
||||
paths = loader._get_paths()
|
||||
for path in paths:
|
||||
self.plugin_list.update(self.find_plugins(path, plugin_type))
|
||||
|
||||
self.pager(self.get_plugin_list_text(loader))
|
||||
return 0
|
||||
|
||||
# process all plugins of type
|
||||
if self.options.all_plugins:
|
||||
self.args = self.get_all_plugins_of_type(plugin_type, loader)
|
||||
|
||||
# dump plugin metadata as JSON
|
||||
if self.options.json_dump:
|
||||
plugin_data = {}
|
||||
for plugin_type in C.DOCUMENTABLE_PLUGINS:
|
||||
plugin_data[plugin_type] = dict()
|
||||
plugin_names = self.get_all_plugins_of_type(plugin_type)
|
||||
for plugin_name in plugin_names:
|
||||
plugin_info = self.get_plugin_metadata(plugin_type, plugin_name)
|
||||
if plugin_info is not None:
|
||||
plugin_data[plugin_type][plugin_name] = plugin_info
|
||||
|
||||
self.pager(json.dumps(plugin_data, sort_keys=True, indent=4))
|
||||
|
||||
return 0
|
||||
|
||||
if len(self.args) == 0:
|
||||
raise AnsibleOptionsError("Incorrect options passed")
|
||||
|
||||
# process command line list
|
||||
text = ''
|
||||
for plugin in self.args:
|
||||
textret = self.format_plugin_doc(plugin, loader, plugin_type, search_paths)
|
||||
|
||||
if textret:
|
||||
text += textret
|
||||
|
||||
if text:
|
||||
self.pager(text)
|
||||
|
||||
return 0
|
||||
|
||||
def get_all_plugins_of_type(self, plugin_type):
|
||||
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
|
||||
plugin_list = set()
|
||||
paths = loader._get_paths()
|
||||
for path in paths:
|
||||
plugins_to_add = self.find_plugins(path, plugin_type)
|
||||
plugin_list.update(plugins_to_add)
|
||||
return sorted(set(plugin_list))
|
||||
|
||||
def get_plugin_metadata(self, plugin_type, plugin_name):
|
||||
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
|
||||
filename = loader.find_plugin(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
|
||||
if filename is None:
|
||||
raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
|
||||
|
||||
try:
|
||||
doc, __, __, metadata = get_docstring(filename, fragment_loader, verbose=(self.options.verbosity > 0))
|
||||
except Exception:
|
||||
display.vvv(traceback.format_exc())
|
||||
raise AnsibleError(
|
||||
"%s %s at %s has a documentation error formatting or is missing documentation." %
|
||||
(plugin_type, plugin_name, filename))
|
||||
|
||||
if doc is None:
|
||||
if 'removed' not in metadata.get('status', []):
|
||||
raise AnsibleError(
|
||||
"%s %s at %s has a documentation error formatting or is missing documentation." %
|
||||
(plugin_type, plugin_name, filename))
|
||||
|
||||
# Removed plugins don't have any documentation
|
||||
return None
|
||||
|
||||
return dict(
|
||||
name=plugin_name,
|
||||
namespace=self.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
|
||||
description=doc.get('short_description', "UNKNOWN"),
|
||||
version_added=doc.get('version_added', "UNKNOWN")
|
||||
)
|
||||
|
||||
def namespace_from_plugin_filepath(self, filepath, plugin_name, basedir):
|
||||
if not basedir.endswith('/'):
|
||||
basedir += '/'
|
||||
rel_path = filepath.replace(basedir, '')
|
||||
extension_free = os.path.splitext(rel_path)[0]
|
||||
namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
|
||||
clean_ns = namespace_only.replace('/', '.')
|
||||
if clean_ns == '':
|
||||
clean_ns = None
|
||||
|
||||
return clean_ns
|
||||
|
||||
def format_plugin_doc(self, plugin, loader, plugin_type, search_paths):
|
||||
text = ''
|
||||
|
||||
try:
|
||||
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
|
||||
if filename is None:
|
||||
display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
|
||||
return
|
||||
|
||||
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
|
||||
return
|
||||
|
||||
try:
|
||||
doc, plainexamples, returndocs, metadata = get_docstring(filename, fragment_loader,
|
||||
verbose=(self.options.verbosity > 0))
|
||||
except Exception:
|
||||
display.vvv(traceback.format_exc())
|
||||
display.error(
|
||||
"%s %s has a documentation error formatting or is missing documentation." % (plugin_type, plugin),
|
||||
wrap_text=False)
|
||||
return
|
||||
|
||||
if doc is not None:
|
||||
|
||||
# assign from other sections
|
||||
doc['plainexamples'] = plainexamples
|
||||
doc['returndocs'] = returndocs
|
||||
doc['metadata'] = metadata
|
||||
|
||||
# generate extra data
|
||||
if plugin_type == 'module':
|
||||
# is there corresponding action plugin?
|
||||
if plugin in action_loader:
|
||||
doc['action'] = True
|
||||
else:
|
||||
doc['action'] = False
|
||||
doc['filename'] = filename
|
||||
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
|
||||
if 'docuri' in doc:
|
||||
doc['docuri'] = doc[plugin_type].replace('_', '-')
|
||||
|
||||
if self.options.show_snippet and plugin_type == 'module':
|
||||
text += self.get_snippet_text(doc)
|
||||
else:
|
||||
text += self.get_man_text(doc)
|
||||
|
||||
return text
|
||||
else:
|
||||
if 'removed' in metadata.get('status', []):
|
||||
display.warning("%s %s has been removed\n" % (plugin_type, plugin))
|
||||
return
|
||||
|
||||
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
|
||||
# probably a quoting issue.
|
||||
raise AnsibleError("Parsing produced an empty object.")
|
||||
except Exception as e:
|
||||
display.vvv(traceback.format_exc())
|
||||
raise AnsibleError(
|
||||
"%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, str(e)))
|
||||
|
||||
def find_plugins(self, path, ptype):
|
||||
|
||||
display.vvvv("Searching %s for plugins" % path)
|
||||
|
||||
plugin_list = set()
|
||||
|
||||
if not os.path.exists(path):
|
||||
display.vvvv("%s does not exist" % path)
|
||||
return plugin_list
|
||||
|
||||
bkey = ptype.upper()
|
||||
for plugin in os.listdir(path):
|
||||
display.vvvv("Found %s" % plugin)
|
||||
full_path = '/'.join([path, plugin])
|
||||
|
||||
if plugin.startswith('.'):
|
||||
continue
|
||||
elif os.path.isdir(full_path):
|
||||
continue
|
||||
elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS):
|
||||
continue
|
||||
elif plugin.startswith('__'):
|
||||
continue
|
||||
elif plugin in C.IGNORE_FILES:
|
||||
continue
|
||||
elif plugin .startswith('_'):
|
||||
if os.path.islink(full_path): # avoids aliases
|
||||
continue
|
||||
|
||||
plugin = os.path.splitext(plugin)[0] # removes the extension
|
||||
plugin = plugin.lstrip('_') # remove underscore from deprecated plugins
|
||||
|
||||
if plugin not in BLACKLIST.get(bkey, ()):
|
||||
plugin_list.add(plugin)
|
||||
display.vvvv("Added %s" % plugin)
|
||||
|
||||
return plugin_list
|
||||
|
||||
def get_plugin_list_text(self, loader):
|
||||
columns = display.columns
|
||||
displace = max(len(x) for x in self.plugin_list)
|
||||
linelimit = columns - displace - 5
|
||||
text = []
|
||||
deprecated = []
|
||||
for plugin in sorted(self.plugin_list):
|
||||
|
||||
try:
|
||||
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
|
||||
|
||||
if filename is None:
|
||||
continue
|
||||
if filename.endswith(".ps1"):
|
||||
continue
|
||||
if os.path.isdir(filename):
|
||||
continue
|
||||
|
||||
doc = None
|
||||
try:
|
||||
doc = read_docstub(filename)
|
||||
except Exception:
|
||||
display.warning("%s has a documentation formatting error" % plugin)
|
||||
continue
|
||||
|
||||
if not doc or not isinstance(doc, dict):
|
||||
with open(filename) as f:
|
||||
metadata = extract_metadata(module_data=f.read())
|
||||
if metadata[0]:
|
||||
if 'removed' not in metadata[0].get('status', []):
|
||||
display.warning("%s parsing did not produce documentation." % plugin)
|
||||
else:
|
||||
continue
|
||||
desc = 'UNDOCUMENTED'
|
||||
else:
|
||||
desc = self.tty_ify(doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip())
|
||||
|
||||
if len(desc) > linelimit:
|
||||
desc = desc[:linelimit] + '...'
|
||||
|
||||
if plugin.startswith('_'): # Handle deprecated
|
||||
deprecated.append("%-*s %-*.*s" % (displace, plugin[1:], linelimit, len(desc), desc))
|
||||
else:
|
||||
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
|
||||
except Exception as e:
|
||||
raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)), orig_exc=e)
|
||||
|
||||
if len(deprecated) > 0:
|
||||
text.append("\nDEPRECATED:")
|
||||
text.extend(deprecated)
|
||||
return "\n".join(text)
|
||||
|
||||
def get_plugin_list_filenames(self, loader):
|
||||
columns = display.columns
|
||||
displace = max(len(x) for x in self.plugin_list)
|
||||
linelimit = columns - displace - 5
|
||||
text = []
|
||||
|
||||
for plugin in sorted(self.plugin_list):
|
||||
|
||||
try:
|
||||
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
|
||||
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
|
||||
|
||||
if filename is None:
|
||||
continue
|
||||
if filename.endswith(".ps1"):
|
||||
continue
|
||||
if os.path.isdir(filename):
|
||||
continue
|
||||
|
||||
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)), orig_exc=e)
|
||||
|
||||
return "\n".join(text)
|
||||
|
||||
@staticmethod
|
||||
def print_paths(finder):
|
||||
''' Returns a string suitable for printing of the search path '''
|
||||
|
||||
# Uses a list to get the order right
|
||||
ret = []
|
||||
for i in finder._get_paths(subdirs=False):
|
||||
if i not in ret:
|
||||
ret.append(i)
|
||||
return os.pathsep.join(ret)
|
||||
|
||||
def get_snippet_text(self, doc):
|
||||
|
||||
text = []
|
||||
desc = CLI.tty_ify(doc['short_description'])
|
||||
text.append("- name: %s" % (desc))
|
||||
text.append(" %s:" % (doc['module']))
|
||||
pad = 31
|
||||
subdent = " " * pad
|
||||
limit = display.columns - pad
|
||||
|
||||
for o in sorted(doc['options'].keys()):
|
||||
opt = doc['options'][o]
|
||||
if isinstance(opt['description'], string_types):
|
||||
desc = CLI.tty_ify(opt['description'])
|
||||
else:
|
||||
desc = CLI.tty_ify(" ".join(opt['description']))
|
||||
|
||||
required = opt.get('required', False)
|
||||
if not isinstance(required, bool):
|
||||
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
|
||||
if required:
|
||||
desc = "(required) %s" % desc
|
||||
o = '%s:' % o
|
||||
text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
|
||||
text.append('')
|
||||
|
||||
return "\n".join(text)
|
||||
|
||||
def _dump_yaml(self, struct, indent):
|
||||
return CLI.tty_ify('\n'.join([indent + line for line in yaml.dump(struct, default_flow_style=False, Dumper=AnsibleDumper).split('\n')]))
|
||||
|
||||
def add_fields(self, text, fields, limit, opt_indent):
|
||||
|
||||
for o in sorted(fields):
|
||||
opt = fields[o]
|
||||
|
||||
required = opt.pop('required', False)
|
||||
if not isinstance(required, bool):
|
||||
raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
|
||||
if required:
|
||||
opt_leadin = "="
|
||||
else:
|
||||
opt_leadin = "-"
|
||||
|
||||
text.append("%s %s" % (opt_leadin, o))
|
||||
|
||||
if isinstance(opt['description'], list):
|
||||
for entry in opt['description']:
|
||||
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
|
||||
else:
|
||||
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
|
||||
del opt['description']
|
||||
|
||||
aliases = ''
|
||||
if 'aliases' in opt:
|
||||
if len(opt['aliases']) > 0:
|
||||
aliases = "(Aliases: " + ", ".join(str(i) for i in opt['aliases']) + ")"
|
||||
del opt['aliases']
|
||||
choices = ''
|
||||
if 'choices' in opt:
|
||||
if len(opt['choices']) > 0:
|
||||
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
|
||||
del opt['choices']
|
||||
default = ''
|
||||
if 'default' in opt or not required:
|
||||
default = "[Default: %s" % str(opt.pop('default', '(null)')) + "]"
|
||||
|
||||
text.append(textwrap.fill(CLI.tty_ify(aliases + choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
|
||||
|
||||
if 'options' in opt:
|
||||
text.append("%soptions:\n" % opt_indent)
|
||||
self.add_fields(text, opt.pop('options'), limit, opt_indent + opt_indent)
|
||||
|
||||
if 'spec' in opt:
|
||||
text.append("%sspec:\n" % opt_indent)
|
||||
self.add_fields(text, opt.pop('spec'), limit, opt_indent + opt_indent)
|
||||
|
||||
conf = {}
|
||||
for config in ('env', 'ini', 'yaml', 'vars', 'keywords'):
|
||||
if config in opt and opt[config]:
|
||||
conf[config] = opt.pop(config)
|
||||
for ignore in self.IGNORE:
|
||||
for item in conf[config]:
|
||||
if ignore in item:
|
||||
del item[ignore]
|
||||
|
||||
if conf:
|
||||
text.append(self._dump_yaml({'set_via': conf}, opt_indent))
|
||||
|
||||
for k in sorted(opt):
|
||||
if k.startswith('_'):
|
||||
continue
|
||||
if isinstance(opt[k], string_types):
|
||||
text.append('%s%s: %s' % (opt_indent, k, textwrap.fill(CLI.tty_ify(opt[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
|
||||
elif isinstance(opt[k], (Sequence)) and all(isinstance(x, string_types) for x in opt[k]):
|
||||
text.append(CLI.tty_ify('%s%s: %s' % (opt_indent, k, ', '.join(opt[k]))))
|
||||
else:
|
||||
text.append(self._dump_yaml({k: opt[k]}, opt_indent))
|
||||
text.append('')
|
||||
|
||||
@staticmethod
|
||||
def get_support_block(doc):
|
||||
# Note: 'curated' is deprecated and not used in any of the modules we ship
|
||||
support_level_msg = {'core': 'The Ansible Core Team',
|
||||
'network': 'The Ansible Network Team',
|
||||
'certified': 'an Ansible Partner',
|
||||
'community': 'The Ansible Community',
|
||||
'curated': 'A Third Party',
|
||||
}
|
||||
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
|
||||
return [" * This module is maintained by %s" % support_level_msg[doc['metadata']['supported_by']]]
|
||||
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def get_metadata_block(doc):
|
||||
text = []
|
||||
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
|
||||
text.append("METADATA:")
|
||||
text.append('\tSUPPORT LEVEL: %s' % doc['metadata']['supported_by'])
|
||||
|
||||
for k in (m for m in doc['metadata'] if m not in ('version', 'metadata_version', 'supported_by')):
|
||||
if isinstance(k, list):
|
||||
text.append("\t%s: %s" % (k.capitalize(), ", ".join(doc['metadata'][k])))
|
||||
else:
|
||||
text.append("\t%s: %s" % (k.capitalize(), doc['metadata'][k]))
|
||||
return text
|
||||
|
||||
return []
|
||||
|
||||
def get_man_text(self, doc):
|
||||
|
||||
self.IGNORE = self.IGNORE + (self.options.type,)
|
||||
opt_indent = " "
|
||||
text = []
|
||||
pad = display.columns * 0.20
|
||||
limit = max(display.columns - int(pad), 70)
|
||||
|
||||
text.append("> %s (%s)\n" % (doc.get(self.options.type, doc.get('plugin_type')).upper(), doc.pop('filename')))
|
||||
|
||||
if isinstance(doc['description'], list):
|
||||
desc = " ".join(doc.pop('description'))
|
||||
else:
|
||||
desc = doc.pop('description')
|
||||
|
||||
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
|
||||
|
||||
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
|
||||
text.append("DEPRECATED: \n")
|
||||
if isinstance(doc['deprecated'], dict):
|
||||
if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
|
||||
doc['deprecated']['removed_in'] = doc['deprecated']['version']
|
||||
text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
|
||||
else:
|
||||
text.append("%s" % doc.pop('deprecated'))
|
||||
text.append("\n")
|
||||
|
||||
try:
|
||||
support_block = self.get_support_block(doc)
|
||||
if support_block:
|
||||
text.extend(support_block)
|
||||
except Exception:
|
||||
pass # FIXME: not suported by plugins
|
||||
|
||||
if doc.pop('action', False):
|
||||
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
|
||||
|
||||
if 'options' in doc and doc['options']:
|
||||
text.append("OPTIONS (= is mandatory):\n")
|
||||
self.add_fields(text, doc.pop('options'), limit, opt_indent)
|
||||
text.append('')
|
||||
|
||||
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
|
||||
text.append("NOTES:")
|
||||
for note in doc['notes']:
|
||||
text.append(textwrap.fill(CLI.tty_ify(note), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
|
||||
text.append('')
|
||||
del doc['notes']
|
||||
|
||||
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
|
||||
req = ", ".join(doc.pop('requirements'))
|
||||
text.append("REQUIREMENTS:%s\n" % textwrap.fill(CLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
|
||||
|
||||
# Generic handler
|
||||
for k in sorted(doc):
|
||||
if k in self.IGNORE or not doc[k]:
|
||||
continue
|
||||
if isinstance(doc[k], string_types):
|
||||
text.append('%s: %s' % (k.upper(), textwrap.fill(CLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
|
||||
elif isinstance(doc[k], (list, tuple)):
|
||||
text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
|
||||
else:
|
||||
text.append(self._dump_yaml({k.upper(): doc[k]}, opt_indent))
|
||||
del doc[k]
|
||||
text.append('')
|
||||
|
||||
if 'plainexamples' in doc and doc['plainexamples'] is not None:
|
||||
text.append("EXAMPLES:")
|
||||
if isinstance(doc['plainexamples'], string_types):
|
||||
text.append(doc.pop('plainexamples').strip())
|
||||
else:
|
||||
text.append(yaml.dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
|
||||
text.append('')
|
||||
|
||||
if 'returndocs' in doc and doc['returndocs'] is not None:
|
||||
text.append("RETURN VALUES:\n")
|
||||
if isinstance(doc['returndocs'], string_types):
|
||||
text.append(doc.pop('returndocs'))
|
||||
else:
|
||||
text.append(yaml.dump(doc.pop('returndocs'), indent=2, default_flow_style=False))
|
||||
text.append('')
|
||||
|
||||
try:
|
||||
metadata_block = self.get_metadata_block(doc)
|
||||
if metadata_block:
|
||||
text.extend(metadata_block)
|
||||
text.append('')
|
||||
except Exception:
|
||||
pass # metadata is optional
|
||||
|
||||
return "\n".join(text)
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/doc.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/doc.pyc
Normal file
Binary file not shown.
691
.ve/lib/python2.7/site-packages/ansible/cli/galaxy.py
Normal file
691
.ve/lib/python2.7/site-packages/ansible/cli/galaxy.py
Normal file
@@ -0,0 +1,691 @@
|
||||
########################################################################
|
||||
#
|
||||
# (C) 2013, James Cammarata <jcammarata@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
########################################################################
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.galaxy import Galaxy
|
||||
from ansible.galaxy.api import GalaxyAPI
|
||||
from ansible.galaxy.login import GalaxyLogin
|
||||
from ansible.galaxy.role import GalaxyRole
|
||||
from ansible.galaxy.token import GalaxyToken
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.playbook.role.requirement import RoleRequirement
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class GalaxyCLI(CLI):
|
||||
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
|
||||
|
||||
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
|
||||
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
|
||||
|
||||
def __init__(self, args):
|
||||
self.api = None
|
||||
self.galaxy = None
|
||||
super(GalaxyCLI, self).__init__(args)
|
||||
|
||||
def set_action(self):
|
||||
|
||||
super(GalaxyCLI, self).set_action()
|
||||
|
||||
# specific to actions
|
||||
if self.action == "delete":
|
||||
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
|
||||
self.parser.set_description("Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.")
|
||||
elif self.action == "import":
|
||||
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
|
||||
self.parser.set_description("Import a role.")
|
||||
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
|
||||
self.parser.add_option('--branch', dest='reference',
|
||||
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
|
||||
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
|
||||
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
|
||||
help='Check the status of the most recent import request for given github_user/github_repo.')
|
||||
elif self.action == "info":
|
||||
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
|
||||
self.parser.set_description("View more details about a specific role.")
|
||||
elif self.action == "init":
|
||||
self.parser.set_usage("usage: %prog init [options] role_name")
|
||||
self.parser.set_description("Initialize new role with the base structure of a role.")
|
||||
self.parser.add_option('--init-path', dest='init_path', default="./",
|
||||
help='The path in which the skeleton role will be created. The default is the current working directory.')
|
||||
self.parser.add_option('--type', dest='role_type', action='store', default='default',
|
||||
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
|
||||
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
|
||||
help='The path to a role skeleton that the new role should be based upon.')
|
||||
elif self.action == "install":
|
||||
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
|
||||
self.parser.set_description("Install Roles from file(s), URL(s) or tar file(s)")
|
||||
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
|
||||
help='Ignore errors and continue with the next specified role.')
|
||||
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
|
||||
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
|
||||
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
|
||||
default=False, help='Use tar instead of the scm archive option when packaging the role')
|
||||
elif self.action == "remove":
|
||||
self.parser.set_usage("usage: %prog remove role1 role2 ...")
|
||||
self.parser.set_description("Delete a role from roles_path.")
|
||||
elif self.action == "list":
|
||||
self.parser.set_usage("usage: %prog list [role_name]")
|
||||
self.parser.set_description("Show the name and version of each role installed in the roles_path.")
|
||||
elif self.action == "login":
|
||||
self.parser.set_usage("usage: %prog login [options]")
|
||||
self.parser.set_description("Login to api.github.com server in order to use ansible-galaxy sub command such as 'import', 'delete' and 'setup'.")
|
||||
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
|
||||
elif self.action == "search":
|
||||
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
|
||||
"[--author username]")
|
||||
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
|
||||
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
|
||||
self.parser.add_option('--author', dest='author', help='GitHub username')
|
||||
self.parser.set_description("Search the Galaxy database by tags, platforms, author and multiple keywords.")
|
||||
elif self.action == "setup":
|
||||
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
|
||||
self.parser.add_option('--remove', dest='remove_id', default=None,
|
||||
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
|
||||
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
|
||||
self.parser.set_description("Manage the integration between Galaxy and the given source.")
|
||||
# options that apply to more than one action
|
||||
if self.action in ['init', 'info']:
|
||||
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
|
||||
|
||||
if self.action not in ("delete", "import", "init", "login", "setup"):
|
||||
# NOTE: while the option type=str, the default is a list, and the
|
||||
# callback will set the value to a list.
|
||||
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
|
||||
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
|
||||
' file (/etc/ansible/roles if not configured)', type='str')
|
||||
if self.action in ("init", "install"):
|
||||
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
|
||||
|
||||
def parse(self):
|
||||
''' create an options parser for bin/ansible '''
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
|
||||
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
|
||||
desc="Perform various Role related operations.",
|
||||
)
|
||||
|
||||
# common
|
||||
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
|
||||
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
|
||||
help='Ignore SSL certificate validation errors.')
|
||||
self.set_action()
|
||||
|
||||
super(GalaxyCLI, self).parse()
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
self.galaxy = Galaxy(self.options)
|
||||
|
||||
def run(self):
|
||||
|
||||
super(GalaxyCLI, self).run()
|
||||
|
||||
self.api = GalaxyAPI(self.galaxy)
|
||||
self.execute()
|
||||
|
||||
def exit_without_ignore(self, rc=1):
|
||||
"""
|
||||
Exits with the specified return code unless the
|
||||
option --ignore-errors was specified
|
||||
"""
|
||||
if not self.options.ignore_errors:
|
||||
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
|
||||
|
||||
def _display_role_info(self, role_info):
|
||||
|
||||
text = [u"", u"Role: %s" % to_text(role_info['name'])]
|
||||
text.append(u"\tdescription: %s" % role_info.get('description', ''))
|
||||
|
||||
for k in sorted(role_info.keys()):
|
||||
|
||||
if k in self.SKIP_INFO_KEYS:
|
||||
continue
|
||||
|
||||
if isinstance(role_info[k], dict):
|
||||
text.append(u"\t%s:" % (k))
|
||||
for key in sorted(role_info[k].keys()):
|
||||
if key in self.SKIP_INFO_KEYS:
|
||||
continue
|
||||
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
|
||||
else:
|
||||
text.append(u"\t%s: %s" % (k, role_info[k]))
|
||||
|
||||
return u'\n'.join(text)
|
||||
|
||||
############################
|
||||
# execute actions
|
||||
############################
|
||||
|
||||
def execute_init(self):
|
||||
"""
|
||||
creates the skeleton framework of a role that complies with the galaxy metadata format.
|
||||
"""
|
||||
|
||||
init_path = self.options.init_path
|
||||
force = self.options.force
|
||||
role_skeleton = self.options.role_skeleton
|
||||
|
||||
role_name = self.args.pop(0).strip() if self.args else None
|
||||
if not role_name:
|
||||
raise AnsibleOptionsError("- no role name specified for init")
|
||||
role_path = os.path.join(init_path, role_name)
|
||||
if os.path.exists(role_path):
|
||||
if os.path.isfile(role_path):
|
||||
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
|
||||
elif not force:
|
||||
raise AnsibleError("- the directory %s already exists."
|
||||
"you can use --force to re-initialize this directory,\n"
|
||||
"however it will reset any main.yml files that may have\n"
|
||||
"been modified there already." % role_path)
|
||||
|
||||
inject_data = dict(
|
||||
role_name=role_name,
|
||||
author='your name',
|
||||
description='your description',
|
||||
company='your company (optional)',
|
||||
license='license (GPLv2, CC-BY, etc)',
|
||||
issue_tracker_url='http://example.com/issue/tracker',
|
||||
min_ansible_version='2.4',
|
||||
role_type=self.options.role_type
|
||||
)
|
||||
|
||||
# create role directory
|
||||
if not os.path.exists(role_path):
|
||||
os.makedirs(role_path)
|
||||
|
||||
if role_skeleton is not None:
|
||||
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
|
||||
else:
|
||||
role_skeleton = self.galaxy.default_role_skeleton_path
|
||||
skeleton_ignore_expressions = ['^.*/.git_keep$']
|
||||
|
||||
role_skeleton = os.path.expanduser(role_skeleton)
|
||||
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
|
||||
|
||||
template_env = Environment(loader=FileSystemLoader(role_skeleton))
|
||||
|
||||
for root, dirs, files in os.walk(role_skeleton, topdown=True):
|
||||
rel_root = os.path.relpath(root, role_skeleton)
|
||||
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
|
||||
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
|
||||
|
||||
for f in files:
|
||||
filename, ext = os.path.splitext(f)
|
||||
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
|
||||
continue
|
||||
elif ext == ".j2" and not in_templates_dir:
|
||||
src_template = os.path.join(rel_root, f)
|
||||
dest_file = os.path.join(role_path, rel_root, filename)
|
||||
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
|
||||
else:
|
||||
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
|
||||
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
|
||||
|
||||
for d in dirs:
|
||||
dir_path = os.path.join(role_path, rel_root, d)
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
|
||||
display.display("- %s was created successfully" % role_name)
|
||||
|
||||
def execute_info(self):
|
||||
"""
|
||||
prints out detailed information about an installed role as well as info available from the galaxy API.
|
||||
"""
|
||||
|
||||
if len(self.args) == 0:
|
||||
# the user needs to specify a role
|
||||
raise AnsibleOptionsError("- you must specify a user/role name")
|
||||
|
||||
roles_path = self.options.roles_path
|
||||
|
||||
data = ''
|
||||
for role in self.args:
|
||||
|
||||
role_info = {'path': roles_path}
|
||||
gr = GalaxyRole(self.galaxy, role)
|
||||
|
||||
install_info = gr.install_info
|
||||
if install_info:
|
||||
if 'version' in install_info:
|
||||
install_info['intalled_version'] = install_info['version']
|
||||
del install_info['version']
|
||||
role_info.update(install_info)
|
||||
|
||||
remote_data = False
|
||||
if not self.options.offline:
|
||||
remote_data = self.api.lookup_role_by_name(role, False)
|
||||
|
||||
if remote_data:
|
||||
role_info.update(remote_data)
|
||||
|
||||
if gr.metadata:
|
||||
role_info.update(gr.metadata)
|
||||
|
||||
req = RoleRequirement()
|
||||
role_spec = req.role_yaml_parse({'role': role})
|
||||
if role_spec:
|
||||
role_info.update(role_spec)
|
||||
|
||||
data = self._display_role_info(role_info)
|
||||
# FIXME: This is broken in both 1.9 and 2.0 as
|
||||
# _display_role_info() always returns something
|
||||
if not data:
|
||||
data = u"\n- the role %s was not found" % role
|
||||
|
||||
self.pager(data)
|
||||
|
||||
def execute_install(self):
|
||||
"""
|
||||
uses the args list of roles to be installed, unless -f was specified. The list of roles
|
||||
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
|
||||
"""
|
||||
role_file = self.options.role_file
|
||||
|
||||
if len(self.args) == 0 and role_file is None:
|
||||
# the user needs to specify one of either --role-file or specify a single user/role name
|
||||
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
|
||||
|
||||
no_deps = self.options.no_deps
|
||||
force = self.options.force
|
||||
|
||||
roles_left = []
|
||||
if role_file:
|
||||
try:
|
||||
f = open(role_file, 'r')
|
||||
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
|
||||
try:
|
||||
required_roles = yaml.safe_load(f.read())
|
||||
except Exception as e:
|
||||
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
|
||||
|
||||
if required_roles is None:
|
||||
raise AnsibleError("No roles found in file: %s" % role_file)
|
||||
|
||||
for role in required_roles:
|
||||
if "include" not in role:
|
||||
role = RoleRequirement.role_yaml_parse(role)
|
||||
display.vvv("found role %s in yaml file" % str(role))
|
||||
if "name" not in role and "scm" not in role:
|
||||
raise AnsibleError("Must specify name or src for role")
|
||||
roles_left.append(GalaxyRole(self.galaxy, **role))
|
||||
else:
|
||||
with open(role["include"]) as f_include:
|
||||
try:
|
||||
roles_left += [
|
||||
GalaxyRole(self.galaxy, **r) for r in
|
||||
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
|
||||
]
|
||||
except Exception as e:
|
||||
msg = "Unable to load data from the include requirements file: %s %s"
|
||||
raise AnsibleError(msg % (role_file, e))
|
||||
else:
|
||||
raise AnsibleError("Invalid role requirements file")
|
||||
f.close()
|
||||
except (IOError, OSError) as e:
|
||||
raise AnsibleError('Unable to open %s: %s' % (role_file, str(e)))
|
||||
else:
|
||||
# roles were specified directly, so we'll just go out grab them
|
||||
# (and their dependencies, unless the user doesn't want us to).
|
||||
for rname in self.args:
|
||||
role = RoleRequirement.role_yaml_parse(rname.strip())
|
||||
roles_left.append(GalaxyRole(self.galaxy, **role))
|
||||
|
||||
for role in roles_left:
|
||||
# only process roles in roles files when names matches if given
|
||||
if role_file and self.args and role.name not in self.args:
|
||||
display.vvv('Skipping role %s' % role.name)
|
||||
continue
|
||||
|
||||
display.vvv('Processing role %s ' % role.name)
|
||||
|
||||
# query the galaxy API for the role data
|
||||
|
||||
if role.install_info is not None:
|
||||
if role.install_info['version'] != role.version or force:
|
||||
if force:
|
||||
display.display('- changing role %s from %s to %s' %
|
||||
(role.name, role.install_info['version'], role.version or "unspecified"))
|
||||
role.remove()
|
||||
else:
|
||||
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
|
||||
(role.name, role.install_info['version'], role.version or "unspecified"))
|
||||
continue
|
||||
else:
|
||||
if not force:
|
||||
display.display('- %s is already installed, skipping.' % str(role))
|
||||
continue
|
||||
|
||||
try:
|
||||
installed = role.install()
|
||||
except AnsibleError as e:
|
||||
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
|
||||
self.exit_without_ignore()
|
||||
continue
|
||||
|
||||
# install dependencies, if we want them
|
||||
if not no_deps and installed:
|
||||
if not role.metadata:
|
||||
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
|
||||
else:
|
||||
role_dependencies = role.metadata.get('dependencies') or []
|
||||
for dep in role_dependencies:
|
||||
display.debug('Installing dep %s' % dep)
|
||||
dep_req = RoleRequirement()
|
||||
dep_info = dep_req.role_yaml_parse(dep)
|
||||
dep_role = GalaxyRole(self.galaxy, **dep_info)
|
||||
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
|
||||
# we know we can skip this, as it's not going to
|
||||
# be found on galaxy.ansible.com
|
||||
continue
|
||||
if dep_role.install_info is None:
|
||||
if dep_role not in roles_left:
|
||||
display.display('- adding dependency: %s' % str(dep_role))
|
||||
roles_left.append(dep_role)
|
||||
else:
|
||||
display.display('- dependency %s already pending installation.' % dep_role.name)
|
||||
else:
|
||||
if dep_role.install_info['version'] != dep_role.version:
|
||||
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
|
||||
(str(dep_role), role.name, dep_role.install_info['version']))
|
||||
else:
|
||||
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
|
||||
|
||||
if not installed:
|
||||
display.warning("- %s was NOT installed successfully." % role.name)
|
||||
self.exit_without_ignore()
|
||||
|
||||
return 0
|
||||
|
||||
def execute_remove(self):
|
||||
"""
|
||||
removes the list of roles passed as arguments from the local system.
|
||||
"""
|
||||
|
||||
if len(self.args) == 0:
|
||||
raise AnsibleOptionsError('- you must specify at least one role to remove.')
|
||||
|
||||
for role_name in self.args:
|
||||
role = GalaxyRole(self.galaxy, role_name)
|
||||
try:
|
||||
if role.remove():
|
||||
display.display('- successfully removed %s' % role_name)
|
||||
else:
|
||||
display.display('- %s is not installed, skipping.' % role_name)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
|
||||
|
||||
return 0
|
||||
|
||||
def execute_list(self):
|
||||
"""
|
||||
lists the roles installed on the local system or matches a single role passed as an argument.
|
||||
"""
|
||||
|
||||
if len(self.args) > 1:
|
||||
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
|
||||
|
||||
if len(self.args) == 1:
|
||||
# show only the request role, if it exists
|
||||
name = self.args.pop()
|
||||
gr = GalaxyRole(self.galaxy, name)
|
||||
if gr.metadata:
|
||||
install_info = gr.install_info
|
||||
version = None
|
||||
if install_info:
|
||||
version = install_info.get("version", None)
|
||||
if not version:
|
||||
version = "(unknown version)"
|
||||
# show some more info about single roles here
|
||||
display.display("- %s, %s" % (name, version))
|
||||
else:
|
||||
display.display("- the role %s was not found" % name)
|
||||
else:
|
||||
# show all valid roles in the roles_path directory
|
||||
roles_path = self.options.roles_path
|
||||
path_found = False
|
||||
for path in roles_path:
|
||||
role_path = os.path.expanduser(path)
|
||||
if not os.path.exists(role_path):
|
||||
display.warning("- the configured path %s does not exist." % role_path)
|
||||
continue
|
||||
elif not os.path.isdir(role_path):
|
||||
display.warning("- the configured path %s, exists, but it is not a directory." % role_path)
|
||||
continue
|
||||
path_files = os.listdir(role_path)
|
||||
path_found = True
|
||||
for path_file in path_files:
|
||||
gr = GalaxyRole(self.galaxy, path_file, path=path)
|
||||
if gr.metadata:
|
||||
install_info = gr.install_info
|
||||
version = None
|
||||
if install_info:
|
||||
version = install_info.get("version", None)
|
||||
if not version:
|
||||
version = "(unknown version)"
|
||||
display.display("- %s, %s" % (path_file, version))
|
||||
if not path_found:
|
||||
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
|
||||
return 0
|
||||
|
||||
def execute_search(self):
|
||||
''' searches for roles on the Ansible Galaxy server'''
|
||||
page_size = 1000
|
||||
search = None
|
||||
|
||||
if len(self.args):
|
||||
terms = []
|
||||
for i in range(len(self.args)):
|
||||
terms.append(self.args.pop())
|
||||
search = '+'.join(terms[::-1])
|
||||
|
||||
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
|
||||
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
|
||||
|
||||
response = self.api.search_roles(search, platforms=self.options.platforms,
|
||||
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
|
||||
|
||||
if response['count'] == 0:
|
||||
display.display("No roles match your search.", color=C.COLOR_ERROR)
|
||||
return True
|
||||
|
||||
data = [u'']
|
||||
|
||||
if response['count'] > page_size:
|
||||
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
|
||||
else:
|
||||
data.append(u"Found %d roles matching your search:" % response['count'])
|
||||
|
||||
max_len = []
|
||||
for role in response['results']:
|
||||
max_len.append(len(role['username'] + '.' + role['name']))
|
||||
name_len = max(max_len)
|
||||
format_str = u" %%-%ds %%s" % name_len
|
||||
data.append(u'')
|
||||
data.append(format_str % (u"Name", u"Description"))
|
||||
data.append(format_str % (u"----", u"-----------"))
|
||||
for role in response['results']:
|
||||
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
|
||||
|
||||
data = u'\n'.join(data)
|
||||
self.pager(data)
|
||||
|
||||
return True
|
||||
|
||||
def execute_login(self):
|
||||
"""
|
||||
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
|
||||
"""
|
||||
# Authenticate with github and retrieve a token
|
||||
if self.options.token is None:
|
||||
if C.GALAXY_TOKEN:
|
||||
github_token = C.GALAXY_TOKEN
|
||||
else:
|
||||
login = GalaxyLogin(self.galaxy)
|
||||
github_token = login.create_github_token()
|
||||
else:
|
||||
github_token = self.options.token
|
||||
|
||||
galaxy_response = self.api.authenticate(github_token)
|
||||
|
||||
if self.options.token is None and C.GALAXY_TOKEN is None:
|
||||
# Remove the token we created
|
||||
login.remove_github_token()
|
||||
|
||||
# Store the Galaxy token
|
||||
token = GalaxyToken()
|
||||
token.set(galaxy_response['token'])
|
||||
|
||||
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
|
||||
return 0
|
||||
|
||||
def execute_import(self):
|
||||
""" used to import a role into Ansible Galaxy """
|
||||
|
||||
colors = {
|
||||
'INFO': 'normal',
|
||||
'WARNING': C.COLOR_WARN,
|
||||
'ERROR': C.COLOR_ERROR,
|
||||
'SUCCESS': C.COLOR_OK,
|
||||
'FAILED': C.COLOR_ERROR,
|
||||
}
|
||||
|
||||
if len(self.args) < 2:
|
||||
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
|
||||
|
||||
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
|
||||
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
|
||||
|
||||
if self.options.check_status:
|
||||
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
|
||||
else:
|
||||
# Submit an import request
|
||||
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
|
||||
|
||||
if len(task) > 1:
|
||||
# found multiple roles associated with github_user/github_repo
|
||||
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
|
||||
color='yellow')
|
||||
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
|
||||
for t in task:
|
||||
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
|
||||
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
|
||||
color=C.COLOR_CHANGED)
|
||||
return 0
|
||||
# found a single role as expected
|
||||
display.display("Successfully submitted import request %d" % task[0]['id'])
|
||||
if not self.options.wait:
|
||||
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
|
||||
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
|
||||
|
||||
if self.options.check_status or self.options.wait:
|
||||
# Get the status of the import
|
||||
msg_list = []
|
||||
finished = False
|
||||
while not finished:
|
||||
task = self.api.get_import_task(task_id=task[0]['id'])
|
||||
for msg in task[0]['summary_fields']['task_messages']:
|
||||
if msg['id'] not in msg_list:
|
||||
display.display(msg['message_text'], color=colors[msg['message_type']])
|
||||
msg_list.append(msg['id'])
|
||||
if task[0]['state'] in ['SUCCESS', 'FAILED']:
|
||||
finished = True
|
||||
else:
|
||||
time.sleep(10)
|
||||
|
||||
return 0
|
||||
|
||||
def execute_setup(self):
|
||||
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
|
||||
|
||||
if self.options.setup_list:
|
||||
# List existing integration secrets
|
||||
secrets = self.api.list_secrets()
|
||||
if len(secrets) == 0:
|
||||
# None found
|
||||
display.display("No integrations found.")
|
||||
return 0
|
||||
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
|
||||
display.display("---------- ---------- ----------", color=C.COLOR_OK)
|
||||
for secret in secrets:
|
||||
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
|
||||
secret['github_repo']), color=C.COLOR_OK)
|
||||
return 0
|
||||
|
||||
if self.options.remove_id:
|
||||
# Remove a secret
|
||||
self.api.remove_secret(self.options.remove_id)
|
||||
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
|
||||
return 0
|
||||
|
||||
if len(self.args) < 4:
|
||||
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
|
||||
|
||||
secret = self.args.pop()
|
||||
github_repo = self.args.pop()
|
||||
github_user = self.args.pop()
|
||||
source = self.args.pop()
|
||||
|
||||
resp = self.api.add_secret(source, github_user, github_repo, secret)
|
||||
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
|
||||
|
||||
return 0
|
||||
|
||||
def execute_delete(self):
|
||||
""" Delete a role from Ansible Galaxy. """
|
||||
|
||||
if len(self.args) < 2:
|
||||
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
|
||||
|
||||
github_repo = self.args.pop()
|
||||
github_user = self.args.pop()
|
||||
resp = self.api.delete_role(github_user, github_repo)
|
||||
|
||||
if len(resp['deleted_roles']) > 1:
|
||||
display.display("Deleted the following roles:")
|
||||
display.display("ID User Name")
|
||||
display.display("------ --------------- ----------")
|
||||
for role in resp['deleted_roles']:
|
||||
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
|
||||
|
||||
display.display(resp['status'])
|
||||
|
||||
return True
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/galaxy.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/galaxy.pyc
Normal file
Binary file not shown.
385
.ve/lib/python2.7/site-packages/ansible/cli/inventory.py
Normal file
385
.ve/lib/python2.7/site-packages/ansible/cli/inventory.py
Normal file
@@ -0,0 +1,385 @@
|
||||
# (c) 2017, Brian Coca <bcoca@ansible.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import optparse
|
||||
from operator import attrgetter
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.plugins.loader import vars_loader
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.utils.vars import combine_vars
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
INTERNAL_VARS = frozenset(['ansible_diff_mode',
|
||||
'ansible_facts',
|
||||
'ansible_forks',
|
||||
'ansible_inventory_sources',
|
||||
'ansible_limit',
|
||||
'ansible_playbook_python',
|
||||
'ansible_run_tags',
|
||||
'ansible_skip_tags',
|
||||
'ansible_verbosity',
|
||||
'ansible_version',
|
||||
'inventory_dir',
|
||||
'inventory_file',
|
||||
'inventory_hostname',
|
||||
'inventory_hostname_short',
|
||||
'groups',
|
||||
'group_names',
|
||||
'omit',
|
||||
'playbook_dir', ])
|
||||
|
||||
|
||||
class InventoryCLI(CLI):
|
||||
''' used to display or dump the configured inventory as Ansible sees it '''
|
||||
|
||||
ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
|
||||
'group': 'The name of a group in the inventory, relevant when using --graph', }
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
super(InventoryCLI, self).__init__(args)
|
||||
self.vm = None
|
||||
self.loader = None
|
||||
self.inventory = None
|
||||
|
||||
self._new_api = True
|
||||
|
||||
def parse(self):
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage='usage: %prog [options] [host|group]',
|
||||
epilog='Show Ansible inventory information, by default it uses the inventory script JSON format',
|
||||
inventory_opts=True,
|
||||
vault_opts=True,
|
||||
basedir_opts=True,
|
||||
)
|
||||
|
||||
# remove unused default options
|
||||
self.parser.remove_option('--limit')
|
||||
self.parser.remove_option('--list-hosts')
|
||||
|
||||
# Actions
|
||||
action_group = optparse.OptionGroup(self.parser, "Actions", "One of following must be used on invocation, ONLY ONE!")
|
||||
action_group.add_option("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
|
||||
action_group.add_option("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
|
||||
action_group.add_option("--graph", action="store_true", default=False, dest='graph',
|
||||
help='create inventory graph, if supplying pattern it must be a valid group name')
|
||||
self.parser.add_option_group(action_group)
|
||||
|
||||
# graph
|
||||
self.parser.add_option("-y", "--yaml", action="store_true", default=False, dest='yaml',
|
||||
help='Use YAML format instead of default JSON, ignored for --graph')
|
||||
self.parser.add_option("--vars", action="store_true", default=False, dest='show_vars',
|
||||
help='Add vars to graph display, ignored unless used with --graph')
|
||||
|
||||
# list
|
||||
self.parser.add_option("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
|
||||
help="When doing an --list, represent in a way that is optimized for export,"
|
||||
"not as an accurate representation of how Ansible has processed it")
|
||||
# self.parser.add_option("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
|
||||
# help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
|
||||
|
||||
super(InventoryCLI, self).parse()
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
self.validate_conflicts(vault_opts=True)
|
||||
|
||||
# there can be only one! and, at least, one!
|
||||
used = 0
|
||||
for opt in (self.options.list, self.options.host, self.options.graph):
|
||||
if opt:
|
||||
used += 1
|
||||
if used == 0:
|
||||
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
|
||||
elif used > 1:
|
||||
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
|
||||
|
||||
# set host pattern to default if not supplied
|
||||
if len(self.args) > 0:
|
||||
self.options.pattern = self.args[0]
|
||||
else:
|
||||
self.options.pattern = 'all'
|
||||
|
||||
def run(self):
|
||||
|
||||
results = None
|
||||
|
||||
super(InventoryCLI, self).run()
|
||||
|
||||
# Initialize needed objects
|
||||
if getattr(self, '_play_prereqs', False):
|
||||
self.loader, self.inventory, self.vm = self._play_prereqs(self.options)
|
||||
else:
|
||||
# fallback to pre 2.4 way of initialzing
|
||||
from ansible.vars import VariableManager
|
||||
from ansible.inventory import Inventory
|
||||
|
||||
self._new_api = False
|
||||
self.loader = DataLoader()
|
||||
self.vm = VariableManager()
|
||||
|
||||
# use vault if needed
|
||||
if self.options.vault_password_file:
|
||||
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader)
|
||||
elif self.options.ask_vault_pass:
|
||||
vault_pass = self.ask_vault_passwords()
|
||||
else:
|
||||
vault_pass = None
|
||||
|
||||
if vault_pass:
|
||||
self.loader.set_vault_password(vault_pass)
|
||||
# actually get inventory and vars
|
||||
|
||||
self.inventory = Inventory(loader=self.loader, variable_manager=self.vm, host_list=self.options.inventory)
|
||||
self.vm.set_inventory(self.inventory)
|
||||
|
||||
if self.options.host:
|
||||
hosts = self.inventory.get_hosts(self.options.host)
|
||||
if len(hosts) != 1:
|
||||
raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
|
||||
|
||||
myvars = self._get_host_variables(host=hosts[0])
|
||||
self._remove_internal(myvars)
|
||||
|
||||
# FIXME: should we template first?
|
||||
results = self.dump(myvars)
|
||||
|
||||
elif self.options.graph:
|
||||
results = self.inventory_graph()
|
||||
elif self.options.list:
|
||||
top = self._get_group('all')
|
||||
if self.options.yaml:
|
||||
results = self.yaml_inventory(top)
|
||||
else:
|
||||
results = self.json_inventory(top)
|
||||
results = self.dump(results)
|
||||
|
||||
if results:
|
||||
# FIXME: pager?
|
||||
display.display(results)
|
||||
exit(0)
|
||||
|
||||
exit(1)
|
||||
|
||||
def dump(self, stuff):
|
||||
|
||||
if self.options.yaml:
|
||||
import yaml
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
|
||||
else:
|
||||
import json
|
||||
from ansible.parsing.ajson import AnsibleJSONEncoder
|
||||
results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
|
||||
|
||||
return results
|
||||
|
||||
# FIXME: refactor to use same for VM
|
||||
def get_plugin_vars(self, path, entity):
|
||||
|
||||
data = {}
|
||||
|
||||
def _get_plugin_vars(plugin, path, entities):
|
||||
data = {}
|
||||
try:
|
||||
data = plugin.get_vars(self.loader, path, entity)
|
||||
except AttributeError:
|
||||
try:
|
||||
if isinstance(entity, Host):
|
||||
data = combine_vars(data, plugin.get_host_vars(entity.name))
|
||||
else:
|
||||
data = combine_vars(data, plugin.get_group_vars(entity.name))
|
||||
except AttributeError:
|
||||
if hasattr(plugin, 'run'):
|
||||
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
|
||||
else:
|
||||
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
|
||||
return data
|
||||
|
||||
for plugin in vars_loader.all():
|
||||
data = combine_vars(data, _get_plugin_vars(plugin, path, entity))
|
||||
|
||||
return data
|
||||
|
||||
def _get_group_variables(self, group):
|
||||
|
||||
# get info from inventory source
|
||||
res = group.get_vars()
|
||||
|
||||
# FIXME: add switch to skip vars plugins, add vars plugin info
|
||||
for inventory_dir in self.inventory._sources:
|
||||
res = combine_vars(res, self.get_plugin_vars(inventory_dir, group))
|
||||
|
||||
if group.priority != 1:
|
||||
res['ansible_group_priority'] = group.priority
|
||||
|
||||
return res
|
||||
|
||||
def _get_host_variables(self, host):
|
||||
|
||||
if self.options.export:
|
||||
hostvars = host.get_vars()
|
||||
|
||||
# FIXME: add switch to skip vars plugins
|
||||
# add vars plugin info
|
||||
for inventory_dir in self.inventory._sources:
|
||||
hostvars = combine_vars(hostvars, self.get_plugin_vars(inventory_dir, host))
|
||||
else:
|
||||
if self._new_api:
|
||||
hostvars = self.vm.get_vars(host=host, include_hostvars=False)
|
||||
else:
|
||||
hostvars = self.vm.get_vars(self.loader, host=host, include_hostvars=False)
|
||||
|
||||
return hostvars
|
||||
|
||||
def _get_group(self, gname):
|
||||
if self._new_api:
|
||||
group = self.inventory.groups.get(gname)
|
||||
else:
|
||||
group = self.inventory.get_group(gname)
|
||||
return group
|
||||
|
||||
def _remove_internal(self, dump):
|
||||
|
||||
for internal in INTERNAL_VARS:
|
||||
if internal in dump:
|
||||
del dump[internal]
|
||||
|
||||
def _remove_empty(self, dump):
|
||||
# remove empty keys
|
||||
for x in ('hosts', 'vars', 'children'):
|
||||
if x in dump and not dump[x]:
|
||||
del dump[x]
|
||||
|
||||
def _show_vars(self, dump, depth):
|
||||
result = []
|
||||
self._remove_internal(dump)
|
||||
if self.options.show_vars:
|
||||
for (name, val) in sorted(dump.items()):
|
||||
result.append(self._graph_name('{%s = %s}' % (name, val), depth))
|
||||
return result
|
||||
|
||||
def _graph_name(self, name, depth=0):
|
||||
if depth:
|
||||
name = " |" * (depth) + "--%s" % name
|
||||
return name
|
||||
|
||||
def _graph_group(self, group, depth=0):
|
||||
|
||||
result = [self._graph_name('@%s:' % group.name, depth)]
|
||||
depth = depth + 1
|
||||
for kid in sorted(group.child_groups, key=attrgetter('name')):
|
||||
result.extend(self._graph_group(kid, depth))
|
||||
|
||||
if group.name != 'all':
|
||||
for host in sorted(group.hosts, key=attrgetter('name')):
|
||||
result.append(self._graph_name(host.name, depth))
|
||||
result.extend(self._show_vars(host.get_vars(), depth + 1))
|
||||
|
||||
result.extend(self._show_vars(self._get_group_variables(group), depth))
|
||||
|
||||
return result
|
||||
|
||||
def inventory_graph(self):
|
||||
|
||||
start_at = self._get_group(self.options.pattern)
|
||||
if start_at:
|
||||
return '\n'.join(self._graph_group(start_at))
|
||||
else:
|
||||
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
|
||||
|
||||
def json_inventory(self, top):
|
||||
|
||||
def format_group(group):
|
||||
results = {}
|
||||
results[group.name] = {}
|
||||
if group.name != 'all':
|
||||
results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
|
||||
results[group.name]['children'] = []
|
||||
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
|
||||
results[group.name]['children'].append(subgroup.name)
|
||||
results.update(format_group(subgroup))
|
||||
if self.options.export:
|
||||
results[group.name]['vars'] = self._get_group_variables(group)
|
||||
|
||||
self._remove_empty(results[group.name])
|
||||
|
||||
return results
|
||||
|
||||
results = format_group(top)
|
||||
|
||||
# populate meta
|
||||
results['_meta'] = {'hostvars': {}}
|
||||
hosts = self.inventory.get_hosts()
|
||||
for host in hosts:
|
||||
hvars = self._get_host_variables(host)
|
||||
if hvars:
|
||||
self._remove_internal(hvars)
|
||||
results['_meta']['hostvars'][host.name] = hvars
|
||||
|
||||
return results
|
||||
|
||||
def yaml_inventory(self, top):
|
||||
|
||||
seen = []
|
||||
|
||||
def format_group(group):
|
||||
results = {}
|
||||
|
||||
# initialize group + vars
|
||||
results[group.name] = {}
|
||||
|
||||
# subgroups
|
||||
results[group.name]['children'] = {}
|
||||
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
|
||||
if subgroup.name != 'all':
|
||||
results[group.name]['children'].update(format_group(subgroup))
|
||||
|
||||
# hosts for group
|
||||
results[group.name]['hosts'] = {}
|
||||
if group.name != 'all':
|
||||
for h in sorted(group.hosts, key=attrgetter('name')):
|
||||
myvars = {}
|
||||
if h.name not in seen: # avoid defining host vars more than once
|
||||
seen.append(h.name)
|
||||
myvars = self._get_host_variables(host=h)
|
||||
self._remove_internal(myvars)
|
||||
results[group.name]['hosts'][h.name] = myvars
|
||||
|
||||
if self.options.export:
|
||||
|
||||
gvars = self._get_group_variables(group)
|
||||
if gvars:
|
||||
results[group.name]['vars'] = gvars
|
||||
|
||||
self._remove_empty(results[group.name])
|
||||
|
||||
return results
|
||||
|
||||
return format_group(top)
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/inventory.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/inventory.pyc
Normal file
Binary file not shown.
196
.ve/lib/python2.7/site-packages/ansible/cli/playbook.py
Normal file
196
.ve/lib/python2.7/site-packages/ansible/cli/playbook.py
Normal file
@@ -0,0 +1,196 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
########################################################
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import stat
|
||||
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleError, AnsibleOptionsError
|
||||
from ansible.executor.playbook_executor import PlaybookExecutor
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.play_context import PlayContext
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class PlaybookCLI(CLI):
|
||||
''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
|
||||
See the project home page (https://docs.ansible.com) for more information. '''
|
||||
|
||||
def parse(self):
|
||||
|
||||
# create parser for CLI options
|
||||
parser = CLI.base_parser(
|
||||
usage="%prog [options] playbook.yml [playbook2 ...]",
|
||||
connect_opts=True,
|
||||
meta_opts=True,
|
||||
runas_opts=True,
|
||||
subset_opts=True,
|
||||
check_opts=True,
|
||||
inventory_opts=True,
|
||||
runtask_opts=True,
|
||||
vault_opts=True,
|
||||
fork_opts=True,
|
||||
module_opts=True,
|
||||
desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.",
|
||||
)
|
||||
|
||||
# ansible playbook specific opts
|
||||
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
|
||||
help="list all tasks that would be executed")
|
||||
parser.add_option('--list-tags', dest='listtags', action='store_true',
|
||||
help="list all available tags")
|
||||
parser.add_option('--step', dest='step', action='store_true',
|
||||
help="one-step-at-a-time: confirm each task before running")
|
||||
parser.add_option('--start-at-task', dest='start_at_task',
|
||||
help="start the playbook at the task matching this name")
|
||||
|
||||
self.parser = parser
|
||||
super(PlaybookCLI, self).parse()
|
||||
|
||||
if len(self.args) == 0:
|
||||
raise AnsibleOptionsError("You must specify a playbook file to run")
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
|
||||
|
||||
def run(self):
|
||||
|
||||
super(PlaybookCLI, self).run()
|
||||
|
||||
# Note: slightly wrong, this is written so that implicit localhost
|
||||
# Manage passwords
|
||||
sshpass = None
|
||||
becomepass = None
|
||||
passwords = {}
|
||||
|
||||
# initial error check, to make sure all specified playbooks are accessible
|
||||
# before we start running anything through the playbook executor
|
||||
for playbook in self.args:
|
||||
if not os.path.exists(playbook):
|
||||
raise AnsibleError("the playbook: %s could not be found" % playbook)
|
||||
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
|
||||
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
|
||||
|
||||
# don't deal with privilege escalation or passwords when we don't need to
|
||||
if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
|
||||
self.normalize_become_options()
|
||||
(sshpass, becomepass) = self.ask_passwords()
|
||||
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
|
||||
|
||||
loader, inventory, variable_manager = self._play_prereqs(self.options)
|
||||
|
||||
# (which is not returned in list_hosts()) is taken into account for
|
||||
# warning if inventory is empty. But it can't be taken into account for
|
||||
# checking if limit doesn't match any hosts. Instead we don't worry about
|
||||
# limit if only implicit localhost was in inventory to start with.
|
||||
#
|
||||
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
|
||||
hosts = CLI.get_host_list(inventory, self.options.subset)
|
||||
|
||||
# flush fact cache if requested
|
||||
if self.options.flush_cache:
|
||||
self._flush_cache(inventory, variable_manager)
|
||||
|
||||
# create the playbook executor, which manages running the plays via a task queue manager
|
||||
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options,
|
||||
passwords=passwords)
|
||||
|
||||
results = pbex.run()
|
||||
|
||||
if isinstance(results, list):
|
||||
for p in results:
|
||||
|
||||
display.display('\nplaybook: %s' % p['playbook'])
|
||||
for idx, play in enumerate(p['plays']):
|
||||
if play._included_path is not None:
|
||||
loader.set_basedir(play._included_path)
|
||||
else:
|
||||
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
|
||||
loader.set_basedir(pb_dir)
|
||||
|
||||
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
|
||||
mytags = set(play.tags)
|
||||
msg += '\tTAGS: [%s]' % (','.join(mytags))
|
||||
|
||||
if self.options.listhosts:
|
||||
playhosts = set(inventory.get_hosts(play.hosts))
|
||||
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
|
||||
for host in playhosts:
|
||||
msg += "\n %s" % host
|
||||
|
||||
display.display(msg)
|
||||
|
||||
all_tags = set()
|
||||
if self.options.listtags or self.options.listtasks:
|
||||
taskmsg = ''
|
||||
if self.options.listtasks:
|
||||
taskmsg = ' tasks:\n'
|
||||
|
||||
def _process_block(b):
|
||||
taskmsg = ''
|
||||
for task in b.block:
|
||||
if isinstance(task, Block):
|
||||
taskmsg += _process_block(task)
|
||||
else:
|
||||
if task.action == 'meta':
|
||||
continue
|
||||
|
||||
all_tags.update(task.tags)
|
||||
if self.options.listtasks:
|
||||
cur_tags = list(mytags.union(set(task.tags)))
|
||||
cur_tags.sort()
|
||||
if task.name:
|
||||
taskmsg += " %s" % task.get_name()
|
||||
else:
|
||||
taskmsg += " %s" % task.action
|
||||
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
|
||||
|
||||
return taskmsg
|
||||
|
||||
all_vars = variable_manager.get_vars(play=play)
|
||||
play_context = PlayContext(play=play, options=self.options)
|
||||
for block in play.compile():
|
||||
block = block.filter_tagged_tasks(play_context, all_vars)
|
||||
if not block.has_tasks():
|
||||
continue
|
||||
taskmsg += _process_block(block)
|
||||
|
||||
if self.options.listtags:
|
||||
cur_tags = list(mytags.union(all_tags))
|
||||
cur_tags.sort()
|
||||
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
|
||||
|
||||
display.display(taskmsg)
|
||||
|
||||
return 0
|
||||
else:
|
||||
return results
|
||||
|
||||
def _flush_cache(self, inventory, variable_manager):
|
||||
for host in inventory.list_hosts():
|
||||
hostname = host.get_name()
|
||||
variable_manager.clear_facts(hostname)
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/playbook.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/playbook.pyc
Normal file
Binary file not shown.
340
.ve/lib/python2.7/site-packages/ansible/cli/pull.py
Normal file
340
.ve/lib/python2.7/site-packages/ansible/cli/pull.py
Normal file
@@ -0,0 +1,340 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
########################################################
|
||||
import datetime
|
||||
import os
|
||||
import platform
|
||||
import random
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ansible.cli import CLI
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleOptionsError
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.plugins.loader import module_loader
|
||||
from ansible.utils.cmd_functions import run_cmd
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
########################################################
|
||||
|
||||
class PullCLI(CLI):
|
||||
''' is used to up a remote copy of ansible on each managed node,
|
||||
each set to run via cron and update playbook source via a source repository.
|
||||
This inverts the default *push* architecture of ansible into a *pull* architecture,
|
||||
which has near-limitless scaling potential.
|
||||
|
||||
The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull.
|
||||
This is useful both for extreme scale-out as well as periodic remediation.
|
||||
Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an
|
||||
excellent way to gather and analyze remote logs from ansible-pull.
|
||||
'''
|
||||
|
||||
DEFAULT_REPO_TYPE = 'git'
|
||||
DEFAULT_PLAYBOOK = 'local.yml'
|
||||
REPO_CHOICES = ('git', 'subversion', 'hg', 'bzr')
|
||||
PLAYBOOK_ERRORS = {
|
||||
1: 'File does not exist',
|
||||
2: 'File is not readable',
|
||||
}
|
||||
SUPPORTED_REPO_MODULES = ['git']
|
||||
ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook.'
|
||||
'This can be a relative path within the checkout. By default, Ansible will'
|
||||
"look for a playbook based on the host's fully-qualified domain name,"
|
||||
'on the host hostname and finally a playbook named *local.yml*.', }
|
||||
|
||||
SKIP_INVENTORY_DEFAULTS = True
|
||||
|
||||
def _get_inv_cli(self):
|
||||
|
||||
inv_opts = ''
|
||||
if getattr(self.options, 'inventory'):
|
||||
for inv in self.options.inventory:
|
||||
if isinstance(inv, list):
|
||||
inv_opts += " -i '%s' " % ','.join(inv)
|
||||
elif ',' in inv or os.path.exists(inv):
|
||||
inv_opts += ' -i %s ' % inv
|
||||
|
||||
return inv_opts
|
||||
|
||||
def parse(self):
|
||||
''' create an options parser for bin/ansible '''
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage='%prog -U <repository> [options] [<playbook.yml>]',
|
||||
connect_opts=True,
|
||||
vault_opts=True,
|
||||
runtask_opts=True,
|
||||
subset_opts=True,
|
||||
check_opts=False, # prevents conflict of --checkout/-C and --check/-C
|
||||
inventory_opts=True,
|
||||
module_opts=True,
|
||||
runas_prompt_opts=True,
|
||||
desc="pulls playbooks from a VCS repo and executes them for the local host",
|
||||
)
|
||||
|
||||
# options unique to pull
|
||||
self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run')
|
||||
self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
|
||||
help='only run the playbook if the repository has been updated')
|
||||
self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
|
||||
help='sleep for random interval (between 0 and n number of seconds) before starting. '
|
||||
'This is a useful way to disperse git requests')
|
||||
self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
|
||||
help='run the playbook even if the repository could not be updated')
|
||||
self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to')
|
||||
self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
|
||||
self.parser.add_option('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
|
||||
self.parser.add_option('-C', '--checkout', dest='checkout',
|
||||
help='branch/tag/commit to checkout. Defaults to behavior of repository module.')
|
||||
self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
|
||||
help='adds the hostkey for the repo url if not already added')
|
||||
self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
|
||||
help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.'
|
||||
% (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE))
|
||||
self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true',
|
||||
help='verify GPG signature of checked out commit, if it fails abort running the playbook. '
|
||||
'This needs the corresponding VCS module to support such an operation')
|
||||
self.parser.add_option('--clean', dest='clean', default=False, action='store_true',
|
||||
help='modified files in the working repository will be discarded')
|
||||
self.parser.add_option('--track-subs', dest='tracksubs', default=False, action='store_true',
|
||||
help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update')
|
||||
# add a subset of the check_opts flag group manually, as the full set's
|
||||
# shortcodes conflict with above --checkout/-C
|
||||
self.parser.add_option("--check", default=False, dest='check', action='store_true',
|
||||
help="don't make any changes; instead, try to predict some of the changes that may occur")
|
||||
self.parser.add_option("--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
|
||||
help="when changing (small) files and templates, show the differences in those files; works great with --check")
|
||||
|
||||
super(PullCLI, self).parse()
|
||||
|
||||
if not self.options.dest:
|
||||
hostname = socket.getfqdn()
|
||||
# use a hostname dependent directory, in case of $HOME on nfs
|
||||
self.options.dest = os.path.join('~/.ansible/pull', hostname)
|
||||
self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest))
|
||||
|
||||
if os.path.exists(self.options.dest) and not os.path.isdir(self.options.dest):
|
||||
raise AnsibleOptionsError("%s is not a valid or accessible directory." % self.options.dest)
|
||||
|
||||
if self.options.sleep:
|
||||
try:
|
||||
secs = random.randint(0, int(self.options.sleep))
|
||||
self.options.sleep = secs
|
||||
except ValueError:
|
||||
raise AnsibleOptionsError("%s is not a number." % self.options.sleep)
|
||||
|
||||
if not self.options.url:
|
||||
raise AnsibleOptionsError("URL for repository not specified, use -h for help")
|
||||
|
||||
if self.options.module_name not in self.SUPPORTED_REPO_MODULES:
|
||||
raise AnsibleOptionsError("Unsupported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
self.validate_conflicts(vault_opts=True)
|
||||
|
||||
def run(self):
|
||||
''' use Runner lib to do SSH things '''
|
||||
|
||||
super(PullCLI, self).run()
|
||||
|
||||
# log command line
|
||||
now = datetime.datetime.now()
|
||||
display.display(now.strftime("Starting Ansible Pull at %F %T"))
|
||||
display.display(' '.join(sys.argv))
|
||||
|
||||
# Build Checkout command
|
||||
# Now construct the ansible command
|
||||
node = platform.node()
|
||||
host = socket.getfqdn()
|
||||
limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
|
||||
base_opts = '-c local '
|
||||
if self.options.verbosity > 0:
|
||||
base_opts += ' -%s' % ''.join(["v" for x in range(0, self.options.verbosity)])
|
||||
|
||||
# Attempt to use the inventory passed in as an argument
|
||||
# It might not yet have been downloaded so use localhost as default
|
||||
inv_opts = self._get_inv_cli()
|
||||
if not inv_opts:
|
||||
inv_opts = " -i localhost, "
|
||||
|
||||
# SCM specific options
|
||||
if self.options.module_name == 'git':
|
||||
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
|
||||
if self.options.checkout:
|
||||
repo_opts += ' version=%s' % self.options.checkout
|
||||
|
||||
if self.options.accept_host_key:
|
||||
repo_opts += ' accept_hostkey=yes'
|
||||
|
||||
if self.options.private_key_file:
|
||||
repo_opts += ' key_file=%s' % self.options.private_key_file
|
||||
|
||||
if self.options.verify:
|
||||
repo_opts += ' verify_commit=yes'
|
||||
|
||||
if self.options.tracksubs:
|
||||
repo_opts += ' track_submodules=yes'
|
||||
|
||||
if not self.options.fullclone:
|
||||
repo_opts += ' depth=1'
|
||||
elif self.options.module_name == 'subversion':
|
||||
repo_opts = "repo=%s dest=%s" % (self.options.url, self.options.dest)
|
||||
if self.options.checkout:
|
||||
repo_opts += ' revision=%s' % self.options.checkout
|
||||
if not self.options.fullclone:
|
||||
repo_opts += ' export=yes'
|
||||
elif self.options.module_name == 'hg':
|
||||
repo_opts = "repo=%s dest=%s" % (self.options.url, self.options.dest)
|
||||
if self.options.checkout:
|
||||
repo_opts += ' revision=%s' % self.options.checkout
|
||||
elif self.options.module_name == 'bzr':
|
||||
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
|
||||
if self.options.checkout:
|
||||
repo_opts += ' version=%s' % self.options.checkout
|
||||
else:
|
||||
raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s' % (self.options.module_name, ','.join(self.REPO_CHOICES)))
|
||||
|
||||
# options common to all supported SCMS
|
||||
if self.options.clean:
|
||||
repo_opts += ' force=yes'
|
||||
|
||||
path = module_loader.find_plugin(self.options.module_name)
|
||||
if path is None:
|
||||
raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
|
||||
|
||||
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||
# hardcode local and inventory/host as this is just meant to fetch the repo
|
||||
cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts)
|
||||
|
||||
for ev in self.options.extra_vars:
|
||||
cmd += ' -e "%s"' % ev
|
||||
|
||||
# Nap?
|
||||
if self.options.sleep:
|
||||
display.display("Sleeping for %d seconds..." % self.options.sleep)
|
||||
time.sleep(self.options.sleep)
|
||||
|
||||
# RUN the Checkout command
|
||||
display.debug("running ansible with VCS module to checkout repo")
|
||||
display.vvvv('EXEC: %s' % cmd)
|
||||
rc, b_out, b_err = run_cmd(cmd, live=True)
|
||||
|
||||
if rc != 0:
|
||||
if self.options.force:
|
||||
display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
|
||||
else:
|
||||
return rc
|
||||
elif self.options.ifchanged and b'"changed": true' not in b_out:
|
||||
display.display("Repository has not changed, quitting.")
|
||||
return 0
|
||||
|
||||
playbook = self.select_playbook(self.options.dest)
|
||||
if playbook is None:
|
||||
raise AnsibleOptionsError("Could not find a playbook to run.")
|
||||
|
||||
# Build playbook command
|
||||
cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
|
||||
if self.options.vault_password_files:
|
||||
for vault_password_file in self.options.vault_password_files:
|
||||
cmd += " --vault-password-file=%s" % vault_password_file
|
||||
if self.options.vault_ids:
|
||||
for vault_id in self.options.vault_ids:
|
||||
cmd += " --vault-id=%s" % vault_id
|
||||
|
||||
for ev in self.options.extra_vars:
|
||||
cmd += ' -e "%s"' % ev
|
||||
if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass:
|
||||
cmd += ' --ask-become-pass'
|
||||
if self.options.skip_tags:
|
||||
cmd += ' --skip-tags "%s"' % to_native(u','.join(self.options.skip_tags))
|
||||
if self.options.tags:
|
||||
cmd += ' -t "%s"' % to_native(u','.join(self.options.tags))
|
||||
if self.options.subset:
|
||||
cmd += ' -l "%s"' % self.options.subset
|
||||
else:
|
||||
cmd += ' -l "%s"' % limit_opts
|
||||
if self.options.check:
|
||||
cmd += ' -C'
|
||||
if self.options.diff:
|
||||
cmd += ' -D'
|
||||
|
||||
os.chdir(self.options.dest)
|
||||
|
||||
# redo inventory options as new files might exist now
|
||||
inv_opts = self._get_inv_cli()
|
||||
if inv_opts:
|
||||
cmd += inv_opts
|
||||
|
||||
# RUN THE PLAYBOOK COMMAND
|
||||
display.debug("running ansible-playbook to do actual work")
|
||||
display.debug('EXEC: %s' % cmd)
|
||||
rc, b_out, b_err = run_cmd(cmd, live=True)
|
||||
|
||||
if self.options.purge:
|
||||
os.chdir('/')
|
||||
try:
|
||||
shutil.rmtree(self.options.dest)
|
||||
except Exception as e:
|
||||
display.error(u"Failed to remove %s: %s" % (self.options.dest, to_text(e)))
|
||||
|
||||
return rc
|
||||
|
||||
def try_playbook(self, path):
|
||||
if not os.path.exists(path):
|
||||
return 1
|
||||
if not os.access(path, os.R_OK):
|
||||
return 2
|
||||
return 0
|
||||
|
||||
def select_playbook(self, path):
|
||||
playbook = None
|
||||
if len(self.args) > 0 and self.args[0] is not None:
|
||||
playbook = os.path.join(path, self.args[0])
|
||||
rc = self.try_playbook(playbook)
|
||||
if rc != 0:
|
||||
display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc]))
|
||||
return None
|
||||
return playbook
|
||||
else:
|
||||
fqdn = socket.getfqdn()
|
||||
hostpb = os.path.join(path, fqdn + '.yml')
|
||||
shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
|
||||
localpb = os.path.join(path, self.DEFAULT_PLAYBOOK)
|
||||
errors = []
|
||||
for pb in [hostpb, shorthostpb, localpb]:
|
||||
rc = self.try_playbook(pb)
|
||||
if rc == 0:
|
||||
playbook = pb
|
||||
break
|
||||
else:
|
||||
errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc]))
|
||||
if playbook is None:
|
||||
display.warning("\n".join(errors))
|
||||
return playbook
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/pull.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/pull.pyc
Normal file
Binary file not shown.
467
.ve/lib/python2.7/site-packages/ansible/cli/vault.py
Normal file
467
.ve/lib/python2.7/site-packages/ansible/cli/vault.py
Normal file
@@ -0,0 +1,467 @@
|
||||
# (c) 2014, James Tanner <tanner.jc@gmail.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# ansible-vault is a script that encrypts/decrypts YAML files. See
|
||||
# https://docs.ansible.com/playbooks_vault.html for more details.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ansible.cli import CLI
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleOptionsError
|
||||
from ansible.module_utils._text import to_text, to_bytes
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class VaultCLI(CLI):
|
||||
''' can encrypt any structured data file used by Ansible.
|
||||
This can include *group_vars/* or *host_vars/* inventory variables,
|
||||
variables loaded by *include_vars* or *vars_files*, or variable files
|
||||
passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
|
||||
Role variables and defaults are also included!
|
||||
|
||||
Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
|
||||
If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
|
||||
|
||||
The password used with vault currently must be the same for all files you wish to use together at the same time.
|
||||
'''
|
||||
|
||||
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "encrypt_string", "rekey", "view")
|
||||
|
||||
FROM_STDIN = "stdin"
|
||||
FROM_ARGS = "the command line args"
|
||||
FROM_PROMPT = "the interactive prompt"
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
self.b_vault_pass = None
|
||||
self.b_new_vault_pass = None
|
||||
self.encrypt_string_read_stdin = False
|
||||
|
||||
self.encrypt_secret = None
|
||||
self.encrypt_vault_id = None
|
||||
self.new_encrypt_secret = None
|
||||
self.new_encrypt_vault_id = None
|
||||
|
||||
self.can_output = ['encrypt', 'decrypt', 'encrypt_string']
|
||||
|
||||
super(VaultCLI, self).__init__(args)
|
||||
|
||||
def set_action(self):
|
||||
|
||||
super(VaultCLI, self).set_action()
|
||||
|
||||
# add output if needed
|
||||
if self.action in self.can_output:
|
||||
self.parser.add_option('--output', default=None, dest='output_file',
|
||||
help='output file name for encrypt or decrypt; use - for stdout',
|
||||
action="callback", callback=CLI.unfrack_path, type='string')
|
||||
|
||||
# options specific to self.actions
|
||||
if self.action == "create":
|
||||
self.parser.set_usage("usage: %prog create [options] file_name")
|
||||
elif self.action == "decrypt":
|
||||
self.parser.set_usage("usage: %prog decrypt [options] file_name")
|
||||
elif self.action == "edit":
|
||||
self.parser.set_usage("usage: %prog edit [options] file_name")
|
||||
elif self.action == "view":
|
||||
self.parser.set_usage("usage: %prog view [options] file_name")
|
||||
elif self.action == "encrypt":
|
||||
self.parser.set_usage("usage: %prog encrypt [options] file_name")
|
||||
# I have no prefence for either dash or underscore
|
||||
elif self.action == "encrypt_string":
|
||||
self.parser.add_option('-p', '--prompt', dest='encrypt_string_prompt',
|
||||
action='store_true',
|
||||
help="Prompt for the string to encrypt")
|
||||
self.parser.add_option('-n', '--name', dest='encrypt_string_names',
|
||||
action='append',
|
||||
help="Specify the variable name")
|
||||
self.parser.add_option('--stdin-name', dest='encrypt_string_stdin_name',
|
||||
default=None,
|
||||
help="Specify the variable name for stdin")
|
||||
self.parser.set_usage("usage: %prog encrypt_string [--prompt] [options] string_to_encrypt")
|
||||
elif self.action == "rekey":
|
||||
self.parser.set_usage("usage: %prog rekey [options] file_name")
|
||||
|
||||
# For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
|
||||
if self.action in ['create', 'encrypt', 'encrypt_string', 'rekey', 'edit']:
|
||||
self.parser.add_option('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
|
||||
action='store', type='string',
|
||||
help='the vault id used to encrypt (required if more than vault-id is provided)')
|
||||
|
||||
def parse(self):
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
vault_opts=True,
|
||||
vault_rekey_opts=True,
|
||||
usage="usage: %%prog [%s] [options] [vaultfile.yml]" % "|".join(self.VALID_ACTIONS),
|
||||
desc="encryption/decryption utility for Ansible data files",
|
||||
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
|
||||
)
|
||||
|
||||
self.set_action()
|
||||
|
||||
super(VaultCLI, self).parse()
|
||||
self.validate_conflicts(vault_opts=True, vault_rekey_opts=True)
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
if self.options.vault_ids:
|
||||
for vault_id in self.options.vault_ids:
|
||||
if u';' in vault_id:
|
||||
raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
|
||||
|
||||
if self.action not in self.can_output:
|
||||
if len(self.args) == 0:
|
||||
raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
|
||||
else:
|
||||
# This restriction should remain in place until it's possible to
|
||||
# load multiple YAML records from a single file, or it's too easy
|
||||
# to create an encrypted file that can't be read back in. But in
|
||||
# the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
|
||||
# a workaround.
|
||||
if self.options.output_file and len(self.args) > 1:
|
||||
raise AnsibleOptionsError("At most one input file may be used with the --output option")
|
||||
|
||||
if self.action == 'encrypt_string':
|
||||
if '-' in self.args or len(self.args) == 0 or self.options.encrypt_string_stdin_name:
|
||||
self.encrypt_string_read_stdin = True
|
||||
|
||||
# TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
|
||||
if self.options.encrypt_string_prompt and self.encrypt_string_read_stdin:
|
||||
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
|
||||
|
||||
def run(self):
|
||||
super(VaultCLI, self).run()
|
||||
loader = DataLoader()
|
||||
|
||||
# set default restrictive umask
|
||||
old_umask = os.umask(0o077)
|
||||
|
||||
vault_ids = self.options.vault_ids
|
||||
|
||||
# there are 3 types of actions, those that just 'read' (decrypt, view) and only
|
||||
# need to ask for a password once, and those that 'write' (create, encrypt) that
|
||||
# ask for a new password and confirm it, and 'read/write (rekey) that asks for the
|
||||
# old password, then asks for a new one and confirms it.
|
||||
|
||||
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
|
||||
vault_ids = default_vault_ids + vault_ids
|
||||
|
||||
# TODO: instead of prompting for these before, we could let VaultEditor
|
||||
# call a callback when it needs it.
|
||||
if self.action in ['decrypt', 'view', 'rekey', 'edit']:
|
||||
vault_secrets = self.setup_vault_secrets(loader,
|
||||
vault_ids=vault_ids,
|
||||
vault_password_files=self.options.vault_password_files,
|
||||
ask_vault_pass=self.options.ask_vault_pass)
|
||||
if not vault_secrets:
|
||||
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
|
||||
|
||||
if self.action in ['encrypt', 'encrypt_string', 'create']:
|
||||
|
||||
encrypt_vault_id = None
|
||||
# no --encrypt-vault-id self.options.encrypt_vault_id for 'edit'
|
||||
if self.action not in ['edit']:
|
||||
encrypt_vault_id = self.options.encrypt_vault_id or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
|
||||
|
||||
vault_secrets = None
|
||||
vault_secrets = \
|
||||
self.setup_vault_secrets(loader,
|
||||
vault_ids=vault_ids,
|
||||
vault_password_files=self.options.vault_password_files,
|
||||
ask_vault_pass=self.options.ask_vault_pass,
|
||||
create_new_password=True)
|
||||
|
||||
if len(vault_secrets) > 1 and not encrypt_vault_id:
|
||||
raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
|
||||
','.join([x[0] for x in vault_secrets]))
|
||||
|
||||
if not vault_secrets:
|
||||
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
|
||||
|
||||
encrypt_secret = match_encrypt_secret(vault_secrets,
|
||||
encrypt_vault_id=encrypt_vault_id)
|
||||
|
||||
# only one secret for encrypt for now, use the first vault_id and use its first secret
|
||||
# TODO: exception if more than one?
|
||||
self.encrypt_vault_id = encrypt_secret[0]
|
||||
self.encrypt_secret = encrypt_secret[1]
|
||||
|
||||
if self.action in ['rekey']:
|
||||
encrypt_vault_id = self.options.encrypt_vault_id or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
|
||||
# print('encrypt_vault_id: %s' % encrypt_vault_id)
|
||||
# print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
|
||||
|
||||
# new_vault_ids should only ever be one item, from
|
||||
# load the default vault ids if we are using encrypt-vault-id
|
||||
new_vault_ids = []
|
||||
if encrypt_vault_id:
|
||||
new_vault_ids = default_vault_ids
|
||||
if self.options.new_vault_id:
|
||||
new_vault_ids.append(self.options.new_vault_id)
|
||||
|
||||
new_vault_password_files = []
|
||||
if self.options.new_vault_password_file:
|
||||
new_vault_password_files.append(self.options.new_vault_password_file)
|
||||
|
||||
new_vault_secrets = \
|
||||
self.setup_vault_secrets(loader,
|
||||
vault_ids=new_vault_ids,
|
||||
vault_password_files=new_vault_password_files,
|
||||
ask_vault_pass=self.options.ask_vault_pass,
|
||||
create_new_password=True)
|
||||
|
||||
if not new_vault_secrets:
|
||||
raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
|
||||
|
||||
# There is only one new_vault_id currently and one new_vault_secret, or we
|
||||
# use the id specified in --encrypt-vault-id
|
||||
new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
|
||||
encrypt_vault_id=encrypt_vault_id)
|
||||
|
||||
self.new_encrypt_vault_id = new_encrypt_secret[0]
|
||||
self.new_encrypt_secret = new_encrypt_secret[1]
|
||||
|
||||
loader.set_vault_secrets(vault_secrets)
|
||||
|
||||
# FIXME: do we need to create VaultEditor here? its not reused
|
||||
vault = VaultLib(vault_secrets)
|
||||
self.editor = VaultEditor(vault)
|
||||
|
||||
self.execute()
|
||||
|
||||
# and restore umask
|
||||
os.umask(old_umask)
|
||||
|
||||
def execute_encrypt(self):
|
||||
''' encrypt the supplied file using the provided vault secret '''
|
||||
|
||||
if len(self.args) == 0 and sys.stdin.isatty():
|
||||
display.display("Reading plaintext input from stdin", stderr=True)
|
||||
|
||||
for f in self.args or ['-']:
|
||||
# Fixme: use the correct vau
|
||||
self.editor.encrypt_file(f, self.encrypt_secret,
|
||||
vault_id=self.encrypt_vault_id,
|
||||
output_file=self.options.output_file)
|
||||
|
||||
if sys.stdout.isatty():
|
||||
display.display("Encryption successful", stderr=True)
|
||||
|
||||
def format_ciphertext_yaml(self, b_ciphertext, indent=None, name=None):
|
||||
indent = indent or 10
|
||||
|
||||
block_format_var_name = ""
|
||||
if name:
|
||||
block_format_var_name = "%s: " % name
|
||||
|
||||
block_format_header = "%s!vault |" % block_format_var_name
|
||||
lines = []
|
||||
vault_ciphertext = to_text(b_ciphertext)
|
||||
|
||||
lines.append(block_format_header)
|
||||
for line in vault_ciphertext.splitlines():
|
||||
lines.append('%s%s' % (' ' * indent, line))
|
||||
|
||||
yaml_ciphertext = '\n'.join(lines)
|
||||
return yaml_ciphertext
|
||||
|
||||
def execute_encrypt_string(self):
|
||||
''' encrypt the supplied string using the provided vault secret '''
|
||||
b_plaintext = None
|
||||
|
||||
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
|
||||
b_plaintext_list = []
|
||||
|
||||
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
|
||||
# we don't add it to the plaintext list
|
||||
args = [x for x in self.args if x != '-']
|
||||
|
||||
# We can prompt and read input, or read from stdin, but not both.
|
||||
if self.options.encrypt_string_prompt:
|
||||
msg = "String to encrypt: "
|
||||
|
||||
name = None
|
||||
name_prompt_response = display.prompt('Variable name (enter for no name): ')
|
||||
|
||||
# TODO: enforce var naming rules?
|
||||
if name_prompt_response != "":
|
||||
name = name_prompt_response
|
||||
|
||||
# TODO: could prompt for which vault_id to use for each plaintext string
|
||||
# currently, it will just be the default
|
||||
# could use private=True for shadowed input if useful
|
||||
prompt_response = display.prompt(msg)
|
||||
|
||||
if prompt_response == '':
|
||||
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
|
||||
|
||||
b_plaintext = to_bytes(prompt_response)
|
||||
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
|
||||
|
||||
# read from stdin
|
||||
if self.encrypt_string_read_stdin:
|
||||
if sys.stdout.isatty():
|
||||
display.display("Reading plaintext input from stdin. (ctrl-d to end input)", stderr=True)
|
||||
|
||||
stdin_text = sys.stdin.read()
|
||||
if stdin_text == '':
|
||||
raise AnsibleOptionsError('stdin was empty, not encrypting')
|
||||
|
||||
b_plaintext = to_bytes(stdin_text)
|
||||
|
||||
# defaults to None
|
||||
name = self.options.encrypt_string_stdin_name
|
||||
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
|
||||
|
||||
# use any leftover args as strings to encrypt
|
||||
# Try to match args up to --name options
|
||||
if hasattr(self.options, 'encrypt_string_names') and self.options.encrypt_string_names:
|
||||
name_and_text_list = list(zip(self.options.encrypt_string_names, args))
|
||||
|
||||
# Some but not enough --name's to name each var
|
||||
if len(args) > len(name_and_text_list):
|
||||
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
|
||||
display.display('The number of --name options do not match the number of args.',
|
||||
stderr=True)
|
||||
display.display('The last named variable will be "%s". The rest will not have names.' % self.options.encrypt_string_names[-1],
|
||||
stderr=True)
|
||||
|
||||
# Add the rest of the args without specifying a name
|
||||
for extra_arg in args[len(name_and_text_list):]:
|
||||
name_and_text_list.append((None, extra_arg))
|
||||
|
||||
# if no --names are provided, just use the args without a name.
|
||||
else:
|
||||
name_and_text_list = [(None, x) for x in args]
|
||||
|
||||
# Convert the plaintext text objects to bytestrings and collect
|
||||
for name_and_text in name_and_text_list:
|
||||
name, plaintext = name_and_text
|
||||
|
||||
if plaintext == '':
|
||||
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
|
||||
|
||||
b_plaintext = to_bytes(plaintext)
|
||||
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
|
||||
|
||||
# TODO: specify vault_id per string?
|
||||
# Format the encrypted strings and any corresponding stderr output
|
||||
outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
|
||||
|
||||
for output in outputs:
|
||||
err = output.get('err', None)
|
||||
out = output.get('out', '')
|
||||
if err:
|
||||
sys.stderr.write(err)
|
||||
print(out)
|
||||
|
||||
if sys.stdout.isatty():
|
||||
display.display("Encryption successful", stderr=True)
|
||||
|
||||
# TODO: offer block or string ala eyaml
|
||||
|
||||
def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
|
||||
# If we are only showing one item in the output, we don't need to included commented
|
||||
# delimiters in the text
|
||||
show_delimiter = False
|
||||
if len(b_plaintext_list) > 1:
|
||||
show_delimiter = True
|
||||
|
||||
# list of dicts {'out': '', 'err': ''}
|
||||
output = []
|
||||
|
||||
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
|
||||
# For more than one input, show some differentiating info in the stderr output so we can tell them
|
||||
# apart. If we have a var name, we include that in the yaml
|
||||
for index, b_plaintext_info in enumerate(b_plaintext_list):
|
||||
# (the text itself, which input it came from, its name)
|
||||
b_plaintext, src, name = b_plaintext_info
|
||||
|
||||
b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret,
|
||||
vault_id=vault_id)
|
||||
|
||||
# block formatting
|
||||
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
|
||||
|
||||
err_msg = None
|
||||
if show_delimiter:
|
||||
human_index = index + 1
|
||||
if name:
|
||||
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
|
||||
else:
|
||||
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
|
||||
output.append({'out': yaml_text, 'err': err_msg})
|
||||
|
||||
return output
|
||||
|
||||
def execute_decrypt(self):
|
||||
''' decrypt the supplied file using the provided vault secret '''
|
||||
|
||||
if len(self.args) == 0 and sys.stdin.isatty():
|
||||
display.display("Reading ciphertext input from stdin", stderr=True)
|
||||
|
||||
for f in self.args or ['-']:
|
||||
self.editor.decrypt_file(f, output_file=self.options.output_file)
|
||||
|
||||
if sys.stdout.isatty():
|
||||
display.display("Decryption successful", stderr=True)
|
||||
|
||||
def execute_create(self):
|
||||
''' create and open a file in an editor that will be encryped with the provided vault secret when closed'''
|
||||
|
||||
if len(self.args) > 1:
|
||||
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
|
||||
|
||||
self.editor.create_file(self.args[0], self.encrypt_secret,
|
||||
vault_id=self.encrypt_vault_id)
|
||||
|
||||
def execute_edit(self):
|
||||
''' open and decrypt an existing vaulted file in an editor, that will be encryped again when closed'''
|
||||
for f in self.args:
|
||||
self.editor.edit_file(f)
|
||||
|
||||
def execute_view(self):
|
||||
''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret '''
|
||||
|
||||
for f in self.args:
|
||||
# Note: vault should return byte strings because it could encrypt
|
||||
# and decrypt binary files. We are responsible for changing it to
|
||||
# unicode here because we are displaying it and therefore can make
|
||||
# the decision that the display doesn't have to be precisely what
|
||||
# the input was (leave that to decrypt instead)
|
||||
plaintext = self.editor.plaintext(f)
|
||||
self.pager(to_text(plaintext))
|
||||
|
||||
def execute_rekey(self):
|
||||
''' re-encrypt a vaulted file with a new secret, the previous secret is required '''
|
||||
for f in self.args:
|
||||
# FIXME: plumb in vault_id, use the default new_vault_secret for now
|
||||
self.editor.rekey_file(f, self.new_encrypt_secret,
|
||||
self.new_encrypt_vault_id)
|
||||
|
||||
display.display("Rekey successful", stderr=True)
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/cli/vault.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/cli/vault.pyc
Normal file
Binary file not shown.
26
.ve/lib/python2.7/site-packages/ansible/compat/__init__.py
Normal file
26
.ve/lib/python2.7/site-packages/ansible/compat/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
'''
|
||||
Compat library for ansible. This contains compatibility definitions for older python
|
||||
When we need to import a module differently depending on python version, do it
|
||||
here. Then in the code we can simply import from compat in order to get what we want.
|
||||
'''
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/compat/__init__.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/compat/__init__.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,55 @@
|
||||
# (c) 2014, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
'''
|
||||
Compat selectors library. Python-3.5 has this builtin. The selectors2
|
||||
package exists on pypi to backport the functionality as far as python-2.6.
|
||||
'''
|
||||
# The following makes it easier for us to script updates of the bundled code
|
||||
_BUNDLED_METADATA = {"pypi_name": "selectors2", "version": "1.1.0"}
|
||||
# Added these bugfix commits from 2.1.0:
|
||||
# * https://github.com/SethMichaelLarson/selectors2/commit/3bd74f2033363b606e1e849528ccaa76f5067590
|
||||
# Wrap kqueue.control so that timeout is a keyword arg
|
||||
# * https://github.com/SethMichaelLarson/selectors2/commit/6f6a26f42086d8aab273b30be492beecb373646b
|
||||
# Fix formatting of the kqueue.control patch for pylint
|
||||
# * https://github.com/SethMichaelLarson/selectors2/commit/f0c2c6c66cfa7662bc52beaf4e2d65adfa25e189
|
||||
# Fix use of OSError exception for py3 and use the wrapper of kqueue.control so retries of
|
||||
# interrupted syscalls work with kqueue
|
||||
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
try:
|
||||
# Python 3.4+
|
||||
import selectors as _system_selectors
|
||||
except ImportError:
|
||||
try:
|
||||
# backport package installed in the system
|
||||
import selectors2 as _system_selectors
|
||||
except ImportError:
|
||||
_system_selectors = None
|
||||
|
||||
if _system_selectors:
|
||||
selectors = _system_selectors
|
||||
else:
|
||||
# Our bundled copy
|
||||
from . import _selectors2 as selectors
|
||||
sys.modules['ansible.compat.selectors'] = selectors
|
||||
Binary file not shown.
@@ -0,0 +1,654 @@
|
||||
# This file is from the selectors2.py package. It backports the PSF Licensed
|
||||
# selectors module from the Python-3.5 stdlib to older versions of Python.
|
||||
# The author, Seth Michael Larson, dual licenses his modifications under the
|
||||
# PSF License and MIT License:
|
||||
# https://github.com/SethMichaelLarson/selectors2#license
|
||||
#
|
||||
# Copyright (c) 2016 Seth Michael Larson
|
||||
#
|
||||
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
|
||||
# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
|
||||
#
|
||||
|
||||
|
||||
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
|
||||
# Also has the behavior specified in PEP 475 which is to retry syscalls
|
||||
# in the case of an EINTR error. This module is required because selectors34
|
||||
# does not follow this behavior and instead returns that no dile descriptor
|
||||
# events have occurred rather than retry the syscall. The decision to drop
|
||||
# support for select.devpoll is made to maintain 100% test coverage.
|
||||
|
||||
import errno
|
||||
import math
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
from collections import namedtuple, Mapping
|
||||
|
||||
try:
|
||||
monotonic = time.monotonic
|
||||
except (AttributeError, ImportError): # Python 3.3<
|
||||
monotonic = time.time
|
||||
|
||||
__author__ = 'Seth Michael Larson'
|
||||
__email__ = 'sethmichaellarson@protonmail.com'
|
||||
__version__ = '1.1.0'
|
||||
__license__ = 'MIT'
|
||||
|
||||
__all__ = [
|
||||
'EVENT_READ',
|
||||
'EVENT_WRITE',
|
||||
'SelectorError',
|
||||
'SelectorKey',
|
||||
'DefaultSelector'
|
||||
]
|
||||
|
||||
EVENT_READ = (1 << 0)
|
||||
EVENT_WRITE = (1 << 1)
|
||||
|
||||
HAS_SELECT = True # Variable that shows whether the platform has a selector.
|
||||
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
|
||||
|
||||
|
||||
class SelectorError(Exception):
|
||||
def __init__(self, errcode):
|
||||
super(SelectorError, self).__init__()
|
||||
self.errno = errcode
|
||||
|
||||
def __repr__(self):
|
||||
return "<SelectorError errno={0}>".format(self.errno)
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
|
||||
def _fileobj_to_fd(fileobj):
|
||||
""" Return a file descriptor from a file object. If
|
||||
given an integer will simply return that integer back. """
|
||||
if isinstance(fileobj, int):
|
||||
fd = fileobj
|
||||
else:
|
||||
try:
|
||||
fd = int(fileobj.fileno())
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
raise ValueError("Invalid file object: {0!r}".format(fileobj))
|
||||
if fd < 0:
|
||||
raise ValueError("Invalid file descriptor: {0}".format(fd))
|
||||
return fd
|
||||
|
||||
|
||||
# Python 3.5 uses a more direct route to wrap system calls to increase speed.
|
||||
if sys.version_info >= (3, 5):
|
||||
def _syscall_wrapper(func, _, *args, **kwargs):
|
||||
""" This is the short-circuit version of the below logic
|
||||
because in Python 3.5+ all selectors restart system calls. """
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except (OSError, IOError, select.error) as e:
|
||||
errcode = None
|
||||
if hasattr(e, "errno"):
|
||||
errcode = e.errno
|
||||
elif hasattr(e, "args"):
|
||||
errcode = e.args[0]
|
||||
raise SelectorError(errcode)
|
||||
else:
|
||||
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
|
||||
""" Wrapper function for syscalls that could fail due to EINTR.
|
||||
All functions should be retried if there is time left in the timeout
|
||||
in accordance with PEP 475. """
|
||||
timeout = kwargs.get("timeout", None)
|
||||
if timeout is None:
|
||||
expires = None
|
||||
recalc_timeout = False
|
||||
else:
|
||||
timeout = float(timeout)
|
||||
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
|
||||
expires = None
|
||||
else:
|
||||
expires = monotonic() + timeout
|
||||
|
||||
args = list(args)
|
||||
if recalc_timeout and "timeout" not in kwargs:
|
||||
raise ValueError(
|
||||
"Timeout must be in args or kwargs to be recalculated")
|
||||
|
||||
result = _SYSCALL_SENTINEL
|
||||
while result is _SYSCALL_SENTINEL:
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
# OSError is thrown by select.select
|
||||
# IOError is thrown by select.epoll.poll
|
||||
# select.error is thrown by select.poll.poll
|
||||
# Aren't we thankful for Python 3.x rework for exceptions?
|
||||
except (OSError, IOError, select.error) as e:
|
||||
# select.error wasn't a subclass of OSError in the past.
|
||||
errcode = None
|
||||
if hasattr(e, "errno"):
|
||||
errcode = e.errno
|
||||
elif hasattr(e, "args"):
|
||||
errcode = e.args[0]
|
||||
|
||||
# Also test for the Windows equivalent of EINTR.
|
||||
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
|
||||
errcode == errno.WSAEINTR))
|
||||
|
||||
if is_interrupt:
|
||||
if expires is not None:
|
||||
current_time = monotonic()
|
||||
if current_time > expires:
|
||||
raise OSError(errno.ETIMEDOUT)
|
||||
if recalc_timeout:
|
||||
if "timeout" in kwargs:
|
||||
kwargs["timeout"] = expires - current_time
|
||||
continue
|
||||
if errcode:
|
||||
raise SelectorError(errcode)
|
||||
else:
|
||||
raise
|
||||
return result
|
||||
|
||||
|
||||
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
|
||||
|
||||
|
||||
class _SelectorMapping(Mapping):
|
||||
""" Mapping of file objects to selector keys """
|
||||
|
||||
def __init__(self, selector):
|
||||
self._selector = selector
|
||||
|
||||
def __len__(self):
|
||||
return len(self._selector._fd_to_key)
|
||||
|
||||
def __getitem__(self, fileobj):
|
||||
try:
|
||||
fd = self._selector._fileobj_lookup(fileobj)
|
||||
return self._selector._fd_to_key[fd]
|
||||
except KeyError:
|
||||
raise KeyError("{0!r} is not registered.".format(fileobj))
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._selector._fd_to_key)
|
||||
|
||||
|
||||
class BaseSelector(object):
|
||||
""" Abstract Selector class
|
||||
|
||||
A selector supports registering file objects to be monitored
|
||||
for specific I/O events.
|
||||
|
||||
A file object is a file descriptor or any object with a
|
||||
`fileno()` method. An arbitrary object can be attached to the
|
||||
file object which can be used for example to store context info,
|
||||
a callback, etc.
|
||||
|
||||
A selector can use various implementations (select(), poll(), epoll(),
|
||||
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
|
||||
the most efficient implementation for the current platform.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Maps file descriptors to keys.
|
||||
self._fd_to_key = {}
|
||||
|
||||
# Read-only mapping returned by get_map()
|
||||
self._map = _SelectorMapping(self)
|
||||
|
||||
def _fileobj_lookup(self, fileobj):
|
||||
""" Return a file descriptor from a file object.
|
||||
This wraps _fileobj_to_fd() to do an exhaustive
|
||||
search in case the object is invalid but we still
|
||||
have it in our map. Used by unregister() so we can
|
||||
unregister an object that was previously registered
|
||||
even if it is closed. It is also used by _SelectorMapping
|
||||
"""
|
||||
try:
|
||||
return _fileobj_to_fd(fileobj)
|
||||
except ValueError:
|
||||
|
||||
# Search through all our mapped keys.
|
||||
for key in self._fd_to_key.values():
|
||||
if key.fileobj is fileobj:
|
||||
return key.fd
|
||||
|
||||
# Raise ValueError after all.
|
||||
raise
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
""" Register a file object for a set of events to monitor. """
|
||||
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
|
||||
raise ValueError("Invalid events: {0!r}".format(events))
|
||||
|
||||
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
|
||||
|
||||
if key.fd in self._fd_to_key:
|
||||
raise KeyError("{0!r} (FD {1}) is already registered"
|
||||
.format(fileobj, key.fd))
|
||||
|
||||
self._fd_to_key[key.fd] = key
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
""" Unregister a file object from being monitored. """
|
||||
try:
|
||||
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
|
||||
except KeyError:
|
||||
raise KeyError("{0!r} is not registered".format(fileobj))
|
||||
|
||||
# Getting the fileno of a closed socket on Windows errors with EBADF.
|
||||
except socket.error as err:
|
||||
if err.errno != errno.EBADF:
|
||||
raise
|
||||
else:
|
||||
for key in self._fd_to_key.values():
|
||||
if key.fileobj is fileobj:
|
||||
self._fd_to_key.pop(key.fd)
|
||||
break
|
||||
else:
|
||||
raise KeyError("{0!r} is not registered".format(fileobj))
|
||||
return key
|
||||
|
||||
def modify(self, fileobj, events, data=None):
|
||||
""" Change a registered file object monitored events and data. """
|
||||
# NOTE: Some subclasses optimize this operation even further.
|
||||
try:
|
||||
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
|
||||
except KeyError:
|
||||
raise KeyError("{0!r} is not registered".format(fileobj))
|
||||
|
||||
if events != key.events:
|
||||
self.unregister(fileobj)
|
||||
key = self.register(fileobj, events, data)
|
||||
|
||||
elif data != key.data:
|
||||
# Use a shortcut to update the data.
|
||||
key = key._replace(data=data)
|
||||
self._fd_to_key[key.fd] = key
|
||||
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
""" Perform the actual selection until some monitored file objects
|
||||
are ready or the timeout expires. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self):
|
||||
""" Close the selector. This must be called to ensure that all
|
||||
underlying resources are freed. """
|
||||
self._fd_to_key.clear()
|
||||
self._map = None
|
||||
|
||||
def get_key(self, fileobj):
|
||||
""" Return the key associated with a registered file object. """
|
||||
mapping = self.get_map()
|
||||
if mapping is None:
|
||||
raise RuntimeError("Selector is closed")
|
||||
try:
|
||||
return mapping[fileobj]
|
||||
except KeyError:
|
||||
raise KeyError("{0!r} is not registered".format(fileobj))
|
||||
|
||||
def get_map(self):
|
||||
""" Return a mapping of file objects to selector keys """
|
||||
return self._map
|
||||
|
||||
def _key_from_fd(self, fd):
|
||||
""" Return the key associated to a given file descriptor
|
||||
Return None if it is not found. """
|
||||
try:
|
||||
return self._fd_to_key[fd]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
|
||||
# Almost all platforms have select.select()
|
||||
if hasattr(select, "select"):
|
||||
class SelectSelector(BaseSelector):
|
||||
""" Select-based selector. """
|
||||
def __init__(self):
|
||||
super(SelectSelector, self).__init__()
|
||||
self._readers = set()
|
||||
self._writers = set()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super(SelectSelector, self).register(fileobj, events, data)
|
||||
if events & EVENT_READ:
|
||||
self._readers.add(key.fd)
|
||||
if events & EVENT_WRITE:
|
||||
self._writers.add(key.fd)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super(SelectSelector, self).unregister(fileobj)
|
||||
self._readers.discard(key.fd)
|
||||
self._writers.discard(key.fd)
|
||||
return key
|
||||
|
||||
def _select(self, r, w, timeout=None):
|
||||
""" Wrapper for select.select because timeout is a positional arg """
|
||||
return select.select(r, w, [], timeout)
|
||||
|
||||
def select(self, timeout=None):
|
||||
# Selecting on empty lists on Windows errors out.
|
||||
if not len(self._readers) and not len(self._writers):
|
||||
return []
|
||||
|
||||
timeout = None if timeout is None else max(timeout, 0.0)
|
||||
ready = []
|
||||
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
|
||||
self._writers, timeout=timeout)
|
||||
r = set(r)
|
||||
w = set(w)
|
||||
for fd in r | w:
|
||||
events = 0
|
||||
if fd in r:
|
||||
events |= EVENT_READ
|
||||
if fd in w:
|
||||
events |= EVENT_WRITE
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
__all__.append('SelectSelector')
|
||||
|
||||
|
||||
if hasattr(select, "poll"):
|
||||
class PollSelector(BaseSelector):
|
||||
""" Poll-based selector """
|
||||
def __init__(self):
|
||||
super(PollSelector, self).__init__()
|
||||
self._poll = select.poll()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super(PollSelector, self).register(fileobj, events, data)
|
||||
event_mask = 0
|
||||
if events & EVENT_READ:
|
||||
event_mask |= select.POLLIN
|
||||
if events & EVENT_WRITE:
|
||||
event_mask |= select.POLLOUT
|
||||
self._poll.register(key.fd, event_mask)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super(PollSelector, self).unregister(fileobj)
|
||||
self._poll.unregister(key.fd)
|
||||
return key
|
||||
|
||||
def _wrap_poll(self, timeout=None):
|
||||
""" Wrapper function for select.poll.poll() so that
|
||||
_syscall_wrapper can work with only seconds. """
|
||||
if timeout is not None:
|
||||
if timeout <= 0:
|
||||
timeout = 0
|
||||
else:
|
||||
# select.poll.poll() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
timeout = math.ceil(timeout * 1e3)
|
||||
|
||||
result = self._poll.poll(timeout)
|
||||
return result
|
||||
|
||||
def select(self, timeout=None):
|
||||
ready = []
|
||||
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
|
||||
for fd, event_mask in fd_events:
|
||||
events = 0
|
||||
if event_mask & ~select.POLLIN:
|
||||
events |= EVENT_WRITE
|
||||
if event_mask & ~select.POLLOUT:
|
||||
events |= EVENT_READ
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
|
||||
return ready
|
||||
|
||||
__all__.append('PollSelector')
|
||||
|
||||
if hasattr(select, "epoll"):
|
||||
class EpollSelector(BaseSelector):
|
||||
""" Epoll-based selector """
|
||||
def __init__(self):
|
||||
super(EpollSelector, self).__init__()
|
||||
self._epoll = select.epoll()
|
||||
|
||||
def fileno(self):
|
||||
return self._epoll.fileno()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super(EpollSelector, self).register(fileobj, events, data)
|
||||
events_mask = 0
|
||||
if events & EVENT_READ:
|
||||
events_mask |= select.EPOLLIN
|
||||
if events & EVENT_WRITE:
|
||||
events_mask |= select.EPOLLOUT
|
||||
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super(EpollSelector, self).unregister(fileobj)
|
||||
try:
|
||||
_syscall_wrapper(self._epoll.unregister, False, key.fd)
|
||||
except SelectorError:
|
||||
# This can occur when the fd was closed since registry.
|
||||
pass
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
if timeout is not None:
|
||||
if timeout <= 0:
|
||||
timeout = 0.0
|
||||
else:
|
||||
# select.epoll.poll() has a resolution of 1 millisecond
|
||||
# but luckily takes seconds so we don't need a wrapper
|
||||
# like PollSelector. Just for better rounding.
|
||||
timeout = math.ceil(timeout * 1e3) * 1e-3
|
||||
timeout = float(timeout)
|
||||
else:
|
||||
timeout = -1.0 # epoll.poll() must have a float.
|
||||
|
||||
# We always want at least 1 to ensure that select can be called
|
||||
# with no file descriptors registered. Otherwise will fail.
|
||||
max_events = max(len(self._fd_to_key), 1)
|
||||
|
||||
ready = []
|
||||
fd_events = _syscall_wrapper(self._epoll.poll, True,
|
||||
timeout=timeout,
|
||||
maxevents=max_events)
|
||||
for fd, event_mask in fd_events:
|
||||
events = 0
|
||||
if event_mask & ~select.EPOLLIN:
|
||||
events |= EVENT_WRITE
|
||||
if event_mask & ~select.EPOLLOUT:
|
||||
events |= EVENT_READ
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
return ready
|
||||
|
||||
def close(self):
|
||||
self._epoll.close()
|
||||
super(EpollSelector, self).close()
|
||||
|
||||
__all__.append('EpollSelector')
|
||||
|
||||
|
||||
if hasattr(select, "devpoll"):
|
||||
class DevpollSelector(BaseSelector):
|
||||
"""Solaris /dev/poll selector."""
|
||||
|
||||
def __init__(self):
|
||||
super(DevpollSelector, self).__init__()
|
||||
self._devpoll = select.devpoll()
|
||||
|
||||
def fileno(self):
|
||||
return self._devpoll.fileno()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super(DevpollSelector, self).register(fileobj, events, data)
|
||||
poll_events = 0
|
||||
if events & EVENT_READ:
|
||||
poll_events |= select.POLLIN
|
||||
if events & EVENT_WRITE:
|
||||
poll_events |= select.POLLOUT
|
||||
self._devpoll.register(key.fd, poll_events)
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super(DevpollSelector, self).unregister(fileobj)
|
||||
self._devpoll.unregister(key.fd)
|
||||
return key
|
||||
|
||||
def _wrap_poll(self, timeout=None):
|
||||
""" Wrapper function for select.poll.poll() so that
|
||||
_syscall_wrapper can work with only seconds. """
|
||||
if timeout is not None:
|
||||
if timeout <= 0:
|
||||
timeout = 0
|
||||
else:
|
||||
# select.devpoll.poll() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
timeout = math.ceil(timeout * 1e3)
|
||||
|
||||
result = self._devpoll.poll(timeout)
|
||||
return result
|
||||
|
||||
def select(self, timeout=None):
|
||||
ready = []
|
||||
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
|
||||
for fd, event_mask in fd_events:
|
||||
events = 0
|
||||
if event_mask & ~select.POLLIN:
|
||||
events |= EVENT_WRITE
|
||||
if event_mask & ~select.POLLOUT:
|
||||
events |= EVENT_READ
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
ready.append((key, events & key.events))
|
||||
|
||||
return ready
|
||||
|
||||
def close(self):
|
||||
self._devpoll.close()
|
||||
super(DevpollSelector, self).close()
|
||||
|
||||
__all__.append('DevpollSelector')
|
||||
|
||||
|
||||
if hasattr(select, "kqueue"):
|
||||
class KqueueSelector(BaseSelector):
|
||||
""" Kqueue / Kevent-based selector """
|
||||
def __init__(self):
|
||||
super(KqueueSelector, self).__init__()
|
||||
self._kqueue = select.kqueue()
|
||||
|
||||
def fileno(self):
|
||||
return self._kqueue.fileno()
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = super(KqueueSelector, self).register(fileobj, events, data)
|
||||
if events & EVENT_READ:
|
||||
kevent = select.kevent(key.fd,
|
||||
select.KQ_FILTER_READ,
|
||||
select.KQ_EV_ADD)
|
||||
|
||||
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
|
||||
|
||||
if events & EVENT_WRITE:
|
||||
kevent = select.kevent(key.fd,
|
||||
select.KQ_FILTER_WRITE,
|
||||
select.KQ_EV_ADD)
|
||||
|
||||
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
|
||||
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
key = super(KqueueSelector, self).unregister(fileobj)
|
||||
if key.events & EVENT_READ:
|
||||
kevent = select.kevent(key.fd,
|
||||
select.KQ_FILTER_READ,
|
||||
select.KQ_EV_DELETE)
|
||||
try:
|
||||
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
|
||||
except SelectorError:
|
||||
pass
|
||||
if key.events & EVENT_WRITE:
|
||||
kevent = select.kevent(key.fd,
|
||||
select.KQ_FILTER_WRITE,
|
||||
select.KQ_EV_DELETE)
|
||||
try:
|
||||
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
|
||||
except SelectorError:
|
||||
pass
|
||||
|
||||
return key
|
||||
|
||||
def select(self, timeout=None):
|
||||
if timeout is not None:
|
||||
timeout = max(timeout, 0)
|
||||
|
||||
max_events = len(self._fd_to_key) * 2
|
||||
ready_fds = {}
|
||||
|
||||
kevent_list = _syscall_wrapper(self._wrap_control, True,
|
||||
None, max_events, timeout=timeout)
|
||||
|
||||
for kevent in kevent_list:
|
||||
fd = kevent.ident
|
||||
event_mask = kevent.filter
|
||||
events = 0
|
||||
if event_mask == select.KQ_FILTER_READ:
|
||||
events |= EVENT_READ
|
||||
if event_mask == select.KQ_FILTER_WRITE:
|
||||
events |= EVENT_WRITE
|
||||
|
||||
key = self._key_from_fd(fd)
|
||||
if key:
|
||||
if key.fd not in ready_fds:
|
||||
ready_fds[key.fd] = (key, events & key.events)
|
||||
else:
|
||||
old_events = ready_fds[key.fd][1]
|
||||
ready_fds[key.fd] = (key, (events | old_events) & key.events)
|
||||
|
||||
return list(ready_fds.values())
|
||||
|
||||
def close(self):
|
||||
self._kqueue.close()
|
||||
super(KqueueSelector, self).close()
|
||||
|
||||
def _wrap_control(self, changelist, max_events, timeout):
|
||||
return self._kqueue.control(changelist, max_events, timeout)
|
||||
|
||||
__all__.append('KqueueSelector')
|
||||
|
||||
|
||||
# Choose the best implementation, roughly:
|
||||
# kqueue == epoll == devpoll > poll > select.
|
||||
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
|
||||
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
|
||||
DefaultSelector = KqueueSelector
|
||||
elif 'DevpollSelector' in globals():
|
||||
DefaultSelector = DevpollSelector
|
||||
elif 'EpollSelector' in globals(): # Platform-specific: Linux
|
||||
DefaultSelector = EpollSelector
|
||||
elif 'PollSelector' in globals(): # Platform-specific: Linux
|
||||
DefaultSelector = PollSelector
|
||||
elif 'SelectSelector' in globals(): # Platform-specific: Windows
|
||||
DefaultSelector = SelectSelector
|
||||
else: # Platform-specific: AppEngine
|
||||
def no_selector(_):
|
||||
raise ValueError("Platform does not have a selector")
|
||||
DefaultSelector = no_selector
|
||||
HAS_SELECT = False
|
||||
Binary file not shown.
@@ -0,0 +1,39 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
'''
|
||||
This module contains things that are only needed for compat in the testsuites,
|
||||
not in ansible itself. If you are not installing the test suite, you can
|
||||
safely remove this subdirectory.
|
||||
'''
|
||||
|
||||
#
|
||||
# Compat for python2.7
|
||||
#
|
||||
|
||||
# One unittest needs to import builtins via __import__() so we need to have
|
||||
# the string that represents it
|
||||
try:
|
||||
import __builtin__
|
||||
except ImportError:
|
||||
BUILTINS = 'builtins'
|
||||
else:
|
||||
BUILTINS = '__builtin__'
|
||||
Binary file not shown.
122
.ve/lib/python2.7/site-packages/ansible/compat/tests/mock.py
Normal file
122
.ve/lib/python2.7/site-packages/ansible/compat/tests/mock.py
Normal file
@@ -0,0 +1,122 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
'''
|
||||
Compat module for Python3.x's unittest.mock module
|
||||
'''
|
||||
import sys
|
||||
|
||||
# Python 2.7
|
||||
|
||||
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
|
||||
# is the same as the python3 stdlib mock library
|
||||
|
||||
try:
|
||||
# Allow wildcard import because we really do want to import all of mock's
|
||||
# symbols into this compat shim
|
||||
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||
from unittest.mock import *
|
||||
except ImportError:
|
||||
# Python 2
|
||||
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||
try:
|
||||
from mock import *
|
||||
except ImportError:
|
||||
print('You need the mock library installed on python2.x to run tests')
|
||||
|
||||
|
||||
# Prior to 3.4.4, mock_open cannot handle binary read_data
|
||||
if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
|
||||
file_spec = None
|
||||
|
||||
def _iterate_read_data(read_data):
|
||||
# Helper for mock_open:
|
||||
# Retrieve lines from read_data via a generator so that separate calls to
|
||||
# readline, read, and readlines are properly interleaved
|
||||
sep = b'\n' if isinstance(read_data, bytes) else '\n'
|
||||
data_as_list = [l + sep for l in read_data.split(sep)]
|
||||
|
||||
if data_as_list[-1] == sep:
|
||||
# If the last line ended in a newline, the list comprehension will have an
|
||||
# extra entry that's just a newline. Remove this.
|
||||
data_as_list = data_as_list[:-1]
|
||||
else:
|
||||
# If there wasn't an extra newline by itself, then the file being
|
||||
# emulated doesn't have a newline to end the last line remove the
|
||||
# newline that our naive format() added
|
||||
data_as_list[-1] = data_as_list[-1][:-1]
|
||||
|
||||
for line in data_as_list:
|
||||
yield line
|
||||
|
||||
def mock_open(mock=None, read_data=''):
|
||||
"""
|
||||
A helper function to create a mock to replace the use of `open`. It works
|
||||
for `open` called directly or used as a context manager.
|
||||
|
||||
The `mock` argument is the mock object to configure. If `None` (the
|
||||
default) then a `MagicMock` will be created for you, with the API limited
|
||||
to methods or attributes available on standard file handles.
|
||||
|
||||
`read_data` is a string for the `read` methoddline`, and `readlines` of the
|
||||
file handle to return. This is an empty string by default.
|
||||
"""
|
||||
def _readlines_side_effect(*args, **kwargs):
|
||||
if handle.readlines.return_value is not None:
|
||||
return handle.readlines.return_value
|
||||
return list(_data)
|
||||
|
||||
def _read_side_effect(*args, **kwargs):
|
||||
if handle.read.return_value is not None:
|
||||
return handle.read.return_value
|
||||
return type(read_data)().join(_data)
|
||||
|
||||
def _readline_side_effect():
|
||||
if handle.readline.return_value is not None:
|
||||
while True:
|
||||
yield handle.readline.return_value
|
||||
for line in _data:
|
||||
yield line
|
||||
|
||||
global file_spec
|
||||
if file_spec is None:
|
||||
import _io
|
||||
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
|
||||
|
||||
if mock is None:
|
||||
mock = MagicMock(name='open', spec=open)
|
||||
|
||||
handle = MagicMock(spec=file_spec)
|
||||
handle.__enter__.return_value = handle
|
||||
|
||||
_data = _iterate_read_data(read_data)
|
||||
|
||||
handle.write.return_value = None
|
||||
handle.read.return_value = None
|
||||
handle.readline.return_value = None
|
||||
handle.readlines.return_value = None
|
||||
|
||||
handle.read.side_effect = _read_side_effect
|
||||
handle.readline.side_effect = _readline_side_effect()
|
||||
handle.readlines.side_effect = _readlines_side_effect
|
||||
|
||||
mock.return_value = handle
|
||||
return mock
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/compat/tests/mock.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/compat/tests/mock.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,38 @@
|
||||
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
'''
|
||||
Compat module for Python2.7's unittest module
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
# Allow wildcard import because we really do want to import all of
|
||||
# unittests's symbols into this compat shim
|
||||
# pylint: disable=wildcard-import,unused-wildcard-import
|
||||
if sys.version_info < (2, 7):
|
||||
try:
|
||||
# Need unittest2 on python2.6
|
||||
from unittest2 import *
|
||||
except ImportError:
|
||||
print('You need unittest2 installed on python2.6.x to run tests')
|
||||
else:
|
||||
from unittest import *
|
||||
Binary file not shown.
BIN
.ve/lib/python2.7/site-packages/ansible/config/__init__.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/config/__init__.pyc
Normal file
Binary file not shown.
1660
.ve/lib/python2.7/site-packages/ansible/config/base.yml
Normal file
1660
.ve/lib/python2.7/site-packages/ansible/config/base.yml
Normal file
File diff suppressed because it is too large
Load Diff
43
.ve/lib/python2.7/site-packages/ansible/config/data.py
Normal file
43
.ve/lib/python2.7/site-packages/ansible/config/data.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ConfigData(object):
|
||||
|
||||
def __init__(self):
|
||||
self._global_settings = {}
|
||||
self._plugins = {}
|
||||
|
||||
def get_setting(self, name, plugin=None):
|
||||
|
||||
setting = None
|
||||
if plugin is None:
|
||||
setting = self._global_settings.get(name)
|
||||
elif plugin.type in self._plugins and plugin.name in self._plugins[plugin.type]:
|
||||
setting = self._plugins[plugin.type][plugin.name].get(name)
|
||||
|
||||
return setting
|
||||
|
||||
def get_settings(self, plugin=None):
|
||||
|
||||
settings = []
|
||||
if plugin is None:
|
||||
settings = [self._global_settings[k] for k in self._global_settings]
|
||||
elif plugin.type in self._plugins and plugin.name in self._plugins[plugin.type]:
|
||||
settings = [self._plugins[plugin.type][plugin.name][k] for k in self._plugins[plugin.type][plugin.name]]
|
||||
|
||||
return settings
|
||||
|
||||
def update_setting(self, setting, plugin=None):
|
||||
|
||||
if plugin is None:
|
||||
self._global_settings[setting.name] = setting
|
||||
else:
|
||||
if plugin.type not in self._plugins:
|
||||
self._plugins[plugin.type] = {}
|
||||
if plugin.name not in self._plugins[plugin.type]:
|
||||
self._plugins[plugin.type][plugin.name] = {}
|
||||
self._plugins[plugin.type][plugin.name][setting.name] = setting
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/config/data.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/config/data.pyc
Normal file
Binary file not shown.
502
.ve/lib/python2.7/site-packages/ansible/config/manager.py
Normal file
502
.ve/lib/python2.7/site-packages/ansible/config/manager.py
Normal file
@@ -0,0 +1,502 @@
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import io
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import stat
|
||||
import tempfile
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
|
||||
from yaml import load as yaml_load
|
||||
try:
|
||||
# use C version if possible for speedup
|
||||
from yaml import CSafeLoader as SafeLoader
|
||||
except ImportError:
|
||||
from yaml import SafeLoader
|
||||
|
||||
from ansible.config.data import ConfigData
|
||||
from ansible.errors import AnsibleOptionsError, AnsibleError
|
||||
from ansible.module_utils.six import PY3, string_types
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from ansible.module_utils._text import to_text, to_bytes, to_native
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.parsing.quoting import unquote
|
||||
from ansible.utils import py3compat
|
||||
from ansible.utils.path import unfrackpath
|
||||
from ansible.utils.path import makedirs_safe
|
||||
|
||||
|
||||
Plugin = namedtuple('Plugin', 'name type')
|
||||
Setting = namedtuple('Setting', 'name value origin type')
|
||||
|
||||
INTERNAL_DEFS = {'lookup': ('_terms',)}
|
||||
|
||||
|
||||
# FIXME: see if we can unify in module_utils with similar function used by argspec
|
||||
def ensure_type(value, value_type, origin=None):
|
||||
''' return a configuration variable with casting
|
||||
:arg value: The value to ensure correct typing of
|
||||
:kwarg value_type: The type of the value. This can be any of the following strings:
|
||||
:boolean: sets the value to a True or False value
|
||||
:integer: Sets the value to an integer or raises a ValueType error
|
||||
:float: Sets the value to a float or raises a ValueType error
|
||||
:list: Treats the value as a comma separated list. Split the value
|
||||
and return it as a python list.
|
||||
:none: Sets the value to None
|
||||
:path: Expands any environment variables and tilde's in the value.
|
||||
:tmp_path: Create a unique temporary directory inside of the directory
|
||||
specified by value and return its path.
|
||||
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
|
||||
means colon separated strings.) Split the value and then expand
|
||||
each part for environment variables and tildes.
|
||||
'''
|
||||
|
||||
basedir = None
|
||||
if origin and os.path.isabs(origin) and os.path.exists(origin):
|
||||
basedir = origin
|
||||
|
||||
if value_type:
|
||||
value_type = value_type.lower()
|
||||
|
||||
if value_type in ('boolean', 'bool'):
|
||||
value = boolean(value, strict=False)
|
||||
|
||||
elif value is not None:
|
||||
if value_type in ('integer', 'int'):
|
||||
value = int(value)
|
||||
|
||||
elif value_type == 'float':
|
||||
value = float(value)
|
||||
|
||||
elif value_type == 'list':
|
||||
if isinstance(value, string_types):
|
||||
value = [x.strip() for x in value.split(',')]
|
||||
|
||||
elif value_type == 'none':
|
||||
if value == "None":
|
||||
value = None
|
||||
|
||||
elif value_type == 'path':
|
||||
value = resolve_path(value, basedir=basedir)
|
||||
|
||||
elif value_type in ('tmp', 'temppath', 'tmppath'):
|
||||
value = resolve_path(value, basedir=basedir)
|
||||
if not os.path.exists(value):
|
||||
makedirs_safe(value, 0o700)
|
||||
prefix = 'ansible-local-%s' % os.getpid()
|
||||
value = tempfile.mkdtemp(prefix=prefix, dir=value)
|
||||
|
||||
elif value_type == 'pathspec':
|
||||
if isinstance(value, string_types):
|
||||
value = value.split(os.pathsep)
|
||||
value = [resolve_path(x, basedir=basedir) for x in value]
|
||||
|
||||
elif value_type == 'pathlist':
|
||||
if isinstance(value, string_types):
|
||||
value = value.split(',')
|
||||
value = [resolve_path(x, basedir=basedir) for x in value]
|
||||
|
||||
elif value_type in ('str', 'string'):
|
||||
value = unquote(to_text(value, errors='surrogate_or_strict'))
|
||||
|
||||
# defaults to string type
|
||||
elif isinstance(value, string_types):
|
||||
value = unquote(value)
|
||||
|
||||
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
|
||||
|
||||
|
||||
# FIXME: see if this can live in utils/path
|
||||
def resolve_path(path, basedir=None):
|
||||
''' resolve relative or 'varaible' paths '''
|
||||
if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}}
|
||||
path = path.replace('{{CWD}}', os.getcwd())
|
||||
|
||||
return unfrackpath(path, follow=False, basedir=basedir)
|
||||
|
||||
|
||||
# FIXME: generic file type?
|
||||
def get_config_type(cfile):
|
||||
|
||||
ftype = None
|
||||
if cfile is not None:
|
||||
ext = os.path.splitext(cfile)[-1]
|
||||
if ext in ('.ini', '.cfg'):
|
||||
ftype = 'ini'
|
||||
elif ext in ('.yaml', '.yml'):
|
||||
ftype = 'yaml'
|
||||
else:
|
||||
raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext)))
|
||||
|
||||
return ftype
|
||||
|
||||
|
||||
# FIXME: can move to module_utils for use for ini plugins also?
|
||||
def get_ini_config_value(p, entry):
|
||||
''' returns the value of last ini entry found '''
|
||||
value = None
|
||||
if p is not None:
|
||||
try:
|
||||
value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True)
|
||||
except Exception: # FIXME: actually report issues here
|
||||
pass
|
||||
return value
|
||||
|
||||
|
||||
def find_ini_config_file(warnings=None):
|
||||
''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
|
||||
# FIXME: eventually deprecate ini configs
|
||||
|
||||
if warnings is None:
|
||||
# Note: In this case, warnings does nothing
|
||||
warnings = set()
|
||||
|
||||
# A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
|
||||
# We can't use None because we could set path to None.
|
||||
SENTINEL = object
|
||||
|
||||
potential_paths = []
|
||||
|
||||
# Environment setting
|
||||
path_from_env = os.getenv("ANSIBLE_CONFIG", SENTINEL)
|
||||
if path_from_env is not SENTINEL:
|
||||
path_from_env = unfrackpath(path_from_env, follow=False)
|
||||
if os.path.isdir(path_from_env):
|
||||
path_from_env = os.path.join(path_from_env, "ansible.cfg")
|
||||
potential_paths.append(path_from_env)
|
||||
|
||||
# Current working directory
|
||||
warn_cmd_public = False
|
||||
try:
|
||||
cwd = os.getcwd()
|
||||
perms = os.stat(cwd)
|
||||
cwd_cfg = os.path.join(cwd, "ansible.cfg")
|
||||
if perms.st_mode & stat.S_IWOTH:
|
||||
# Working directory is world writable so we'll skip it.
|
||||
# Still have to look for a file here, though, so that we know if we have to warn
|
||||
if os.path.exists(cwd_cfg):
|
||||
warn_cmd_public = True
|
||||
else:
|
||||
potential_paths.append(cwd_cfg)
|
||||
except OSError:
|
||||
# If we can't access cwd, we'll simply skip it as a possible config source
|
||||
pass
|
||||
|
||||
# Per user location
|
||||
potential_paths.append(unfrackpath("~/.ansible.cfg", follow=False))
|
||||
|
||||
# System location
|
||||
potential_paths.append("/etc/ansible/ansible.cfg")
|
||||
|
||||
for path in potential_paths:
|
||||
if os.path.exists(path):
|
||||
break
|
||||
else:
|
||||
path = None
|
||||
|
||||
# Emit a warning if all the following are true:
|
||||
# * We did not use a config from ANSIBLE_CONFIG
|
||||
# * There's an ansible.cfg in the current working directory that we skipped
|
||||
if path_from_env != path and warn_cmd_public:
|
||||
warnings.add(u"Ansible is being run in a world writable directory (%s),"
|
||||
u" ignoring it as an ansible.cfg source."
|
||||
u" For more information see"
|
||||
u" https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir"
|
||||
% to_text(cwd))
|
||||
|
||||
return path
|
||||
|
||||
|
||||
class ConfigManager(object):
|
||||
|
||||
DEPRECATED = []
|
||||
WARNINGS = set()
|
||||
|
||||
def __init__(self, conf_file=None, defs_file=None):
|
||||
|
||||
self._base_defs = {}
|
||||
self._plugins = {}
|
||||
self._parsers = {}
|
||||
|
||||
self._config_file = conf_file
|
||||
self.data = ConfigData()
|
||||
|
||||
self._base_defs = self._read_config_yaml_file(defs_file or ('%s/base.yml' % os.path.dirname(__file__)))
|
||||
|
||||
if self._config_file is None:
|
||||
# set config using ini
|
||||
self._config_file = find_ini_config_file(self.WARNINGS)
|
||||
|
||||
# consume configuration
|
||||
if self._config_file:
|
||||
if os.path.exists(self._config_file):
|
||||
# initialize parser and read config
|
||||
self._parse_config_file()
|
||||
|
||||
# update constants
|
||||
self.update_config_data()
|
||||
try:
|
||||
self.update_module_defaults_groups()
|
||||
except Exception as e:
|
||||
# Since this is a 2.7 preview feature, we want to have it fail as gracefully as possible when there are issues.
|
||||
sys.stderr.write('Could not load module_defaults_groups: %s: %s\n\n' % (type(e).__name__, e))
|
||||
self.module_defaults_groups = {}
|
||||
|
||||
def _read_config_yaml_file(self, yml_file):
|
||||
# TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD
|
||||
# Currently this is only used with absolute paths to the `ansible/config` directory
|
||||
yml_file = to_bytes(yml_file)
|
||||
if os.path.exists(yml_file):
|
||||
with open(yml_file, 'rb') as config_def:
|
||||
return yaml_load(config_def, Loader=SafeLoader) or {}
|
||||
raise AnsibleError(
|
||||
"Missing base YAML definition file (bad install?): %s" % to_native(yml_file))
|
||||
|
||||
def _parse_config_file(self, cfile=None):
|
||||
''' return flat configuration settings from file(s) '''
|
||||
# TODO: take list of files with merge/nomerge
|
||||
|
||||
if cfile is None:
|
||||
cfile = self._config_file
|
||||
|
||||
ftype = get_config_type(cfile)
|
||||
if cfile is not None:
|
||||
if ftype == 'ini':
|
||||
self._parsers[cfile] = configparser.ConfigParser()
|
||||
with open(cfile, 'rb') as f:
|
||||
try:
|
||||
cfg_text = to_text(f.read(), errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleOptionsError("Error reading config file(%s) because the config file was not utf8 encoded: %s" % (cfile, to_native(e)))
|
||||
try:
|
||||
if PY3:
|
||||
self._parsers[cfile].read_string(cfg_text)
|
||||
else:
|
||||
cfg_file = io.StringIO(cfg_text)
|
||||
self._parsers[cfile].readfp(cfg_file)
|
||||
except configparser.Error as e:
|
||||
raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e)))
|
||||
# FIXME: this should eventually handle yaml config files
|
||||
# elif ftype == 'yaml':
|
||||
# with open(cfile, 'rb') as config_stream:
|
||||
# self._parsers[cfile] = yaml.safe_load(config_stream)
|
||||
else:
|
||||
raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype))
|
||||
|
||||
def _find_yaml_config_files(self):
|
||||
''' Load YAML Config Files in order, check merge flags, keep origin of settings'''
|
||||
pass
|
||||
|
||||
def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
|
||||
|
||||
options = {}
|
||||
defs = self.get_configuration_definitions(plugin_type, name)
|
||||
for option in defs:
|
||||
options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
|
||||
|
||||
return options
|
||||
|
||||
def get_plugin_vars(self, plugin_type, name):
|
||||
|
||||
pvars = []
|
||||
for pdef in self.get_configuration_definitions(plugin_type, name).values():
|
||||
if 'vars' in pdef and pdef['vars']:
|
||||
for var_entry in pdef['vars']:
|
||||
pvars.append(var_entry['name'])
|
||||
return pvars
|
||||
|
||||
def get_configuration_definitions(self, plugin_type=None, name=None):
|
||||
''' just list the possible settings, either base or for specific plugins or plugin '''
|
||||
|
||||
ret = {}
|
||||
if plugin_type is None:
|
||||
ret = self._base_defs
|
||||
elif name is None:
|
||||
ret = self._plugins.get(plugin_type, {})
|
||||
else:
|
||||
ret = self._plugins.get(plugin_type, {}).get(name, {})
|
||||
|
||||
return ret
|
||||
|
||||
def _loop_entries(self, container, entry_list):
|
||||
''' repeat code for value entry assignment '''
|
||||
|
||||
value = None
|
||||
origin = None
|
||||
for entry in entry_list:
|
||||
name = entry.get('name')
|
||||
temp_value = container.get(name, None)
|
||||
if temp_value is not None: # only set if env var is defined
|
||||
value = temp_value
|
||||
origin = name
|
||||
|
||||
# deal with deprecation of setting source, if used
|
||||
if 'deprecated' in entry:
|
||||
self.DEPRECATED.append((entry['name'], entry['deprecated']))
|
||||
|
||||
return value, origin
|
||||
|
||||
def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
|
||||
''' wrapper '''
|
||||
|
||||
try:
|
||||
value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name,
|
||||
keys=keys, variables=variables, direct=direct)
|
||||
except AnsibleError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise AnsibleError("Unhandled exception when retrieving %s:\n%s" % (config, traceback.format_exc()))
|
||||
return value
|
||||
|
||||
def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
|
||||
''' Given a config key figure out the actual value and report on the origin of the settings '''
|
||||
if cfile is None:
|
||||
# use default config
|
||||
cfile = self._config_file
|
||||
|
||||
# Note: sources that are lists listed in low to high precedence (last one wins)
|
||||
value = None
|
||||
origin = None
|
||||
defs = {}
|
||||
if plugin_type is None:
|
||||
defs = self._base_defs
|
||||
elif plugin_name is None:
|
||||
defs = self._plugins[plugin_type]
|
||||
else:
|
||||
defs = self._plugins[plugin_type][plugin_name]
|
||||
|
||||
if config in defs:
|
||||
|
||||
# direct setting via plugin arguments, can set to None so we bypass rest of processing/defaults
|
||||
if direct and config in direct:
|
||||
value = direct[config]
|
||||
origin = 'Direct'
|
||||
|
||||
else:
|
||||
# Use 'variable overrides' if present, highest precedence, but only present when querying running play
|
||||
if variables and defs[config].get('vars'):
|
||||
value, origin = self._loop_entries(variables, defs[config]['vars'])
|
||||
origin = 'var: %s' % origin
|
||||
|
||||
# use playbook keywords if you have em
|
||||
if value is None and keys and defs[config].get('keywords'):
|
||||
value, origin = self._loop_entries(keys, defs[config]['keywords'])
|
||||
origin = 'keyword: %s' % origin
|
||||
|
||||
# env vars are next precedence
|
||||
if value is None and defs[config].get('env'):
|
||||
value, origin = self._loop_entries(py3compat.environ, defs[config]['env'])
|
||||
origin = 'env: %s' % origin
|
||||
|
||||
# try config file entries next, if we have one
|
||||
if self._parsers.get(cfile, None) is None:
|
||||
self._parse_config_file(cfile)
|
||||
|
||||
if value is None and cfile is not None:
|
||||
ftype = get_config_type(cfile)
|
||||
if ftype and defs[config].get(ftype):
|
||||
if ftype == 'ini':
|
||||
# load from ini config
|
||||
try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe
|
||||
for ini_entry in defs[config]['ini']:
|
||||
temp_value = get_ini_config_value(self._parsers[cfile], ini_entry)
|
||||
if temp_value is not None:
|
||||
value = temp_value
|
||||
origin = cfile
|
||||
if 'deprecated' in ini_entry:
|
||||
self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated']))
|
||||
except Exception as e:
|
||||
sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
|
||||
elif ftype == 'yaml':
|
||||
# FIXME: implement, also , break down key from defs (. notation???)
|
||||
origin = cfile
|
||||
|
||||
# set default if we got here w/o a value
|
||||
if value is None:
|
||||
if defs[config].get('required', False):
|
||||
entry = ''
|
||||
if plugin_type:
|
||||
entry += 'plugin_type: %s ' % plugin_type
|
||||
if plugin_name:
|
||||
entry += 'plugin: %s ' % plugin_name
|
||||
entry += 'setting: %s ' % config
|
||||
if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
|
||||
raise AnsibleError("No setting was provided for required configuration %s" % (entry))
|
||||
else:
|
||||
value = defs[config].get('default')
|
||||
origin = 'default'
|
||||
# skip typing as this is a temlated default that will be resolved later in constants, which has needed vars
|
||||
if plugin_type is None and isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')):
|
||||
return value, origin
|
||||
|
||||
# ensure correct type, can raise exceptoins on mismatched types
|
||||
value = ensure_type(value, defs[config].get('type'), origin=origin)
|
||||
|
||||
# deal with deprecation of the setting
|
||||
if 'deprecated' in defs[config] and origin != 'default':
|
||||
self.DEPRECATED.append((config, defs[config].get('deprecated')))
|
||||
else:
|
||||
raise AnsibleError('Requested option %s was not defined in configuration' % to_native(config))
|
||||
|
||||
return value, origin
|
||||
|
||||
def initialize_plugin_configuration_definitions(self, plugin_type, name, defs):
|
||||
|
||||
if plugin_type not in self._plugins:
|
||||
self._plugins[plugin_type] = {}
|
||||
|
||||
self._plugins[plugin_type][name] = defs
|
||||
|
||||
def update_module_defaults_groups(self):
|
||||
defaults_config = self._read_config_yaml_file(
|
||||
'%s/module_defaults.yml' % os.path.join(os.path.dirname(__file__))
|
||||
)
|
||||
if defaults_config.get('version') not in ('1', '1.0', 1, 1.0):
|
||||
raise AnsibleError('module_defaults.yml has an invalid version "%s" for configuration. Could be a bad install.' % defaults_config.get('version'))
|
||||
self.module_defaults_groups = defaults_config.get('groupings', {})
|
||||
|
||||
def update_config_data(self, defs=None, configfile=None):
|
||||
''' really: update constants '''
|
||||
|
||||
if defs is None:
|
||||
defs = self._base_defs
|
||||
|
||||
if configfile is None:
|
||||
configfile = self._config_file
|
||||
|
||||
if not isinstance(defs, dict):
|
||||
raise AnsibleOptionsError("Invalid configuration definition type: %s for %s" % (type(defs), defs))
|
||||
|
||||
# update the constant for config file
|
||||
self.data.update_setting(Setting('CONFIG_FILE', configfile, '', 'string'))
|
||||
|
||||
origin = None
|
||||
# env and config defs can have several entries, ordered in list from lowest to highest precedence
|
||||
for config in defs:
|
||||
if not isinstance(defs[config], dict):
|
||||
raise AnsibleOptionsError("Invalid configuration definition '%s': type is %s" % (to_native(config), type(defs[config])))
|
||||
|
||||
# get value and origin
|
||||
try:
|
||||
value, origin = self.get_config_value_and_origin(config, configfile)
|
||||
except Exception as e:
|
||||
# Printing the problem here because, in the current code:
|
||||
# (1) we can't reach the error handler for AnsibleError before we
|
||||
# hit a different error due to lack of working config.
|
||||
# (2) We don't have access to display yet because display depends on config
|
||||
# being properly loaded.
|
||||
#
|
||||
# If we start getting double errors printed from this section of code, then the
|
||||
# above problem #1 has been fixed. Revamp this to be more like the try: except
|
||||
# in get_config_value() at that time.
|
||||
sys.stderr.write("Unhandled error:\n %s\n\n" % traceback.format_exc())
|
||||
raise AnsibleError("Invalid settings supplied for %s: %s\n%s" % (config, to_native(e), traceback.format_exc()))
|
||||
|
||||
# set the constant
|
||||
self.data.update_setting(Setting(config, value, origin, defs[config].get('type', 'string')))
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/config/manager.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/config/manager.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,594 @@
|
||||
version: '1.0'
|
||||
groupings:
|
||||
aws_acm_facts:
|
||||
- aws
|
||||
aws_api_gateway:
|
||||
- aws
|
||||
aws_application_scaling_policy:
|
||||
- aws
|
||||
aws_az_facts:
|
||||
- aws
|
||||
aws_batch_compute_environment:
|
||||
- aws
|
||||
aws_batch_job_definition:
|
||||
- aws
|
||||
aws_batch_job_queue:
|
||||
- aws
|
||||
aws_caller_facts:
|
||||
- aws
|
||||
aws_config_aggregation_authorization:
|
||||
- aws
|
||||
aws_config_aggregator:
|
||||
- aws
|
||||
aws_config_delivery_channel:
|
||||
- aws
|
||||
aws_config_recorder:
|
||||
- aws
|
||||
aws_config_rule:
|
||||
- aws
|
||||
aws_direct_connect_connection:
|
||||
- aws
|
||||
aws_direct_connect_gateway:
|
||||
- aws
|
||||
aws_direct_connect_link_aggregation_group:
|
||||
- aws
|
||||
aws_direct_connect_virtual_interface:
|
||||
- aws
|
||||
aws_eks_cluster:
|
||||
- aws
|
||||
aws_elasticbeanstalk_app:
|
||||
- aws
|
||||
aws_glue_connection:
|
||||
- aws
|
||||
aws_glue_job:
|
||||
- aws
|
||||
aws_inspector_target:
|
||||
- aws
|
||||
aws_kms:
|
||||
- aws
|
||||
aws_kms_facts:
|
||||
- aws
|
||||
aws_region_facts:
|
||||
- aws
|
||||
aws_s3:
|
||||
- aws
|
||||
aws_s3_bucket_facts:
|
||||
- aws
|
||||
aws_s3_cors:
|
||||
- aws
|
||||
aws_ses_identity:
|
||||
- aws
|
||||
aws_ses_identity_policy:
|
||||
- aws
|
||||
aws_sgw_facts:
|
||||
- aws
|
||||
aws_ssm_parameter_store:
|
||||
- aws
|
||||
aws_waf_condition:
|
||||
- aws
|
||||
aws_waf_facts:
|
||||
- aws
|
||||
aws_waf_rule:
|
||||
- aws
|
||||
aws_waf_web_acl:
|
||||
- aws
|
||||
cloudformation:
|
||||
- aws
|
||||
cloudformation_facts:
|
||||
- aws
|
||||
cloudfront_distribution:
|
||||
- aws
|
||||
cloudfront_facts:
|
||||
- aws
|
||||
cloudfront_invalidation:
|
||||
- aws
|
||||
cloudfront_origin_access_identity:
|
||||
- aws
|
||||
cloudtrail:
|
||||
- aws
|
||||
cloudwatchevent_rule:
|
||||
- aws
|
||||
cloudwatchlogs_log_group:
|
||||
- aws
|
||||
cloudwatchlogs_log_group_facts:
|
||||
- aws
|
||||
data_pipeline:
|
||||
- aws
|
||||
dynamodb_table:
|
||||
- aws
|
||||
dynamodb_ttl:
|
||||
- aws
|
||||
ec2:
|
||||
- aws
|
||||
ec2_ami:
|
||||
- aws
|
||||
ec2_ami_copy:
|
||||
- aws
|
||||
ec2_ami_facts:
|
||||
- aws
|
||||
ec2_asg:
|
||||
- aws
|
||||
ec2_asg_facts:
|
||||
- aws
|
||||
ec2_asg_lifecycle_hook:
|
||||
- aws
|
||||
ec2_customer_gateway:
|
||||
- aws
|
||||
ec2_customer_gateway_facts:
|
||||
- aws
|
||||
ec2_eip:
|
||||
- aws
|
||||
ec2_eip_facts:
|
||||
- aws
|
||||
ec2_elb:
|
||||
- aws
|
||||
ec2_elb_facts:
|
||||
- aws
|
||||
ec2_elb_lb:
|
||||
- aws
|
||||
ec2_eni:
|
||||
- aws
|
||||
ec2_eni_facts:
|
||||
- aws
|
||||
ec2_group:
|
||||
- aws
|
||||
ec2_group_facts:
|
||||
- aws
|
||||
ec2_instance:
|
||||
- aws
|
||||
ec2_instance_facts:
|
||||
- aws
|
||||
ec2_key:
|
||||
- aws
|
||||
ec2_lc:
|
||||
- aws
|
||||
ec2_lc_facts:
|
||||
- aws
|
||||
ec2_lc_find:
|
||||
- aws
|
||||
ec2_metric_alarm:
|
||||
- aws
|
||||
ec2_placement_group:
|
||||
- aws
|
||||
ec2_placement_group_facts:
|
||||
- aws
|
||||
ec2_scaling_policy:
|
||||
- aws
|
||||
ec2_snapshot:
|
||||
- aws
|
||||
ec2_snapshot_copy:
|
||||
- aws
|
||||
ec2_snapshot_facts:
|
||||
- aws
|
||||
ec2_tag:
|
||||
- aws
|
||||
ec2_vol:
|
||||
- aws
|
||||
ec2_vol_facts:
|
||||
- aws
|
||||
ec2_vpc_dhcp_option:
|
||||
- aws
|
||||
ec2_vpc_dhcp_option_facts:
|
||||
- aws
|
||||
ec2_vpc_egress_igw:
|
||||
- aws
|
||||
ec2_vpc_endpoint:
|
||||
- aws
|
||||
ec2_vpc_endpoint_facts:
|
||||
- aws
|
||||
ec2_vpc_igw:
|
||||
- aws
|
||||
ec2_vpc_igw_facts:
|
||||
- aws
|
||||
ec2_vpc_nacl:
|
||||
- aws
|
||||
ec2_vpc_nacl_facts:
|
||||
- aws
|
||||
ec2_vpc_nat_gateway:
|
||||
- aws
|
||||
ec2_vpc_nat_gateway_facts:
|
||||
- aws
|
||||
ec2_vpc_net:
|
||||
- aws
|
||||
ec2_vpc_net_facts:
|
||||
- aws
|
||||
ec2_vpc_peer:
|
||||
- aws
|
||||
ec2_vpc_peering_facts:
|
||||
- aws
|
||||
ec2_vpc_route_table:
|
||||
- aws
|
||||
ec2_vpc_route_table_facts:
|
||||
- aws
|
||||
ec2_vpc_subnet:
|
||||
- aws
|
||||
ec2_vpc_subnet_facts:
|
||||
- aws
|
||||
ec2_vpc_vgw:
|
||||
- aws
|
||||
ec2_vpc_vgw_facts:
|
||||
- aws
|
||||
ec2_vpc_vpn:
|
||||
- aws
|
||||
ec2_vpc_vpn_facts:
|
||||
- aws
|
||||
ec2_win_password:
|
||||
- aws
|
||||
ecs_attribute:
|
||||
- aws
|
||||
ecs_cluster:
|
||||
- aws
|
||||
ecs_ecr:
|
||||
- aws
|
||||
ecs_service:
|
||||
- aws
|
||||
ecs_service_facts:
|
||||
- aws
|
||||
ecs_task:
|
||||
- aws
|
||||
ecs_taskdefinition:
|
||||
- aws
|
||||
ecs_taskdefinition_facts:
|
||||
- aws
|
||||
efs:
|
||||
- aws
|
||||
efs_facts:
|
||||
- aws
|
||||
elasticache:
|
||||
- aws
|
||||
elasticache_facts:
|
||||
- aws
|
||||
elasticache_parameter_group:
|
||||
- aws
|
||||
elasticache_snapshot:
|
||||
- aws
|
||||
elasticache_subnet_group:
|
||||
- aws
|
||||
elb_application_lb:
|
||||
- aws
|
||||
elb_application_lb_facts:
|
||||
- aws
|
||||
elb_classic_lb:
|
||||
- aws
|
||||
elb_classic_lb_facts:
|
||||
- aws
|
||||
elb_instance:
|
||||
- aws
|
||||
elb_network_lb:
|
||||
- aws
|
||||
elb_target:
|
||||
- aws
|
||||
elb_target_group:
|
||||
- aws
|
||||
elb_target_group_facts:
|
||||
- aws
|
||||
execute_lambda:
|
||||
- aws
|
||||
iam:
|
||||
- aws
|
||||
iam_cert:
|
||||
- aws
|
||||
iam_group:
|
||||
- aws
|
||||
iam_managed_policy:
|
||||
- aws
|
||||
iam_mfa_device_facts:
|
||||
- aws
|
||||
iam_policy:
|
||||
- aws
|
||||
iam_role:
|
||||
- aws
|
||||
iam_role_facts:
|
||||
- aws
|
||||
iam_server_certificate_facts:
|
||||
- aws
|
||||
iam_user:
|
||||
- aws
|
||||
kinesis_stream:
|
||||
- aws
|
||||
lambda:
|
||||
- aws
|
||||
lambda_alias:
|
||||
- aws
|
||||
lambda_event:
|
||||
- aws
|
||||
lambda_facts:
|
||||
- aws
|
||||
lambda_policy:
|
||||
- aws
|
||||
lightsail:
|
||||
- aws
|
||||
rds:
|
||||
- aws
|
||||
rds_instance:
|
||||
- aws
|
||||
rds_instance_facts:
|
||||
- aws
|
||||
rds_param_group:
|
||||
- aws
|
||||
rds_snapshot_facts:
|
||||
- aws
|
||||
rds_subnet_group:
|
||||
- aws
|
||||
redshift:
|
||||
- aws
|
||||
redshift_facts:
|
||||
- aws
|
||||
redshift_subnet_group:
|
||||
- aws
|
||||
route53:
|
||||
- aws
|
||||
route53_facts:
|
||||
- aws
|
||||
route53_health_check:
|
||||
- aws
|
||||
route53_zone:
|
||||
- aws
|
||||
s3_bucket:
|
||||
- aws
|
||||
s3_lifecycle:
|
||||
- aws
|
||||
s3_logging:
|
||||
- aws
|
||||
s3_sync:
|
||||
- aws
|
||||
s3_website:
|
||||
- aws
|
||||
sns:
|
||||
- aws
|
||||
sns_topic:
|
||||
- aws
|
||||
sqs_queue:
|
||||
- aws
|
||||
sts_assume_role:
|
||||
- aws
|
||||
sts_session_token:
|
||||
- aws
|
||||
gcp_compute_address:
|
||||
- gcp
|
||||
gcp_compute_address_facts:
|
||||
- gcp
|
||||
gcp_compute_backend_bucket:
|
||||
- gcp
|
||||
gcp_compute_backend_bucket_facts:
|
||||
- gcp
|
||||
gcp_compute_backend_service:
|
||||
- gcp
|
||||
gcp_compute_backend_service_facts:
|
||||
- gcp
|
||||
gcp_compute_disk:
|
||||
- gcp
|
||||
gcp_compute_disk_facts:
|
||||
- gcp
|
||||
gcp_compute_firewall:
|
||||
- gcp
|
||||
gcp_compute_firewall_facts:
|
||||
- gcp
|
||||
gcp_compute_forwarding_rule:
|
||||
- gcp
|
||||
gcp_compute_forwarding_rule_facts:
|
||||
- gcp
|
||||
gcp_compute_global_address:
|
||||
- gcp
|
||||
gcp_compute_global_address_facts:
|
||||
- gcp
|
||||
gcp_compute_global_forwarding_rule:
|
||||
- gcp
|
||||
gcp_compute_global_forwarding_rule_facts:
|
||||
- gcp
|
||||
gcp_compute_health_check:
|
||||
- gcp
|
||||
gcp_compute_health_check_facts:
|
||||
- gcp
|
||||
gcp_compute_http_health_check:
|
||||
- gcp
|
||||
gcp_compute_http_health_check_facts:
|
||||
- gcp
|
||||
gcp_compute_https_health_check:
|
||||
- gcp
|
||||
gcp_compute_https_health_check_facts:
|
||||
- gcp
|
||||
gcp_compute_image:
|
||||
- gcp
|
||||
gcp_compute_image_facts:
|
||||
- gcp
|
||||
gcp_compute_instance:
|
||||
- gcp
|
||||
gcp_compute_instance_facts:
|
||||
- gcp
|
||||
gcp_compute_instance_group:
|
||||
- gcp
|
||||
gcp_compute_instance_group_facts:
|
||||
- gcp
|
||||
gcp_compute_instance_group_manager:
|
||||
- gcp
|
||||
gcp_compute_instance_group_manager_facts:
|
||||
- gcp
|
||||
gcp_compute_instance_template:
|
||||
- gcp
|
||||
gcp_compute_instance_template_facts:
|
||||
- gcp
|
||||
gcp_compute_network:
|
||||
- gcp
|
||||
gcp_compute_network_facts:
|
||||
- gcp
|
||||
gcp_compute_route:
|
||||
- gcp
|
||||
gcp_compute_route_facts:
|
||||
- gcp
|
||||
gcp_compute_router_facts:
|
||||
- gcp
|
||||
gcp_compute_ssl_certificate:
|
||||
- gcp
|
||||
gcp_compute_ssl_certificate_facts:
|
||||
- gcp
|
||||
gcp_compute_ssl_policy:
|
||||
- gcp
|
||||
gcp_compute_ssl_policy_facts:
|
||||
- gcp
|
||||
gcp_compute_subnetwork:
|
||||
- gcp
|
||||
gcp_compute_subnetwork_facts:
|
||||
- gcp
|
||||
gcp_compute_target_http_proxy:
|
||||
- gcp
|
||||
gcp_compute_target_http_proxy_facts:
|
||||
- gcp
|
||||
gcp_compute_target_https_proxy:
|
||||
- gcp
|
||||
gcp_compute_target_https_proxy_facts:
|
||||
- gcp
|
||||
gcp_compute_target_pool:
|
||||
- gcp
|
||||
gcp_compute_target_pool_facts:
|
||||
- gcp
|
||||
gcp_compute_target_ssl_proxy:
|
||||
- gcp
|
||||
gcp_compute_target_ssl_proxy_facts:
|
||||
- gcp
|
||||
gcp_compute_target_tcp_proxy:
|
||||
- gcp
|
||||
gcp_compute_target_tcp_proxy_facts:
|
||||
- gcp
|
||||
gcp_compute_target_vpn_gateway:
|
||||
- gcp
|
||||
gcp_compute_target_vpn_gateway_facts:
|
||||
- gcp
|
||||
gcp_compute_url_map:
|
||||
- gcp
|
||||
gcp_compute_url_map_facts:
|
||||
- gcp
|
||||
gcp_compute_vpn_tunnel:
|
||||
- gcp
|
||||
gcp_compute_vpn_tunnel_facts:
|
||||
- gcp
|
||||
gcp_container_cluster:
|
||||
- gcp
|
||||
gcp_container_node_pool:
|
||||
- gcp
|
||||
gcp_dns_managed_zone:
|
||||
- gcp
|
||||
gcp_dns_resource_record_set:
|
||||
- gcp
|
||||
gcp_pubsub_subscription:
|
||||
- gcp
|
||||
gcp_pubsub_topic:
|
||||
- gcp
|
||||
gcp_storage_bucket:
|
||||
- gcp
|
||||
gcp_storage_bucket_access_control:
|
||||
- gcp
|
||||
azure_rm_acs:
|
||||
- azure
|
||||
azure_rm_aks:
|
||||
- azure
|
||||
azure_rm_aks_facts:
|
||||
- azure
|
||||
azure_rm_appserviceplan:
|
||||
- azure
|
||||
azure_rm_appserviceplan_facts:
|
||||
- azure
|
||||
azure_rm_availabilityset:
|
||||
- azure
|
||||
azure_rm_availabilityset_facts:
|
||||
- azure
|
||||
azure_rm_containerinstance:
|
||||
- azure
|
||||
azure_rm_containerregistry:
|
||||
- azure
|
||||
azure_rm_deployment:
|
||||
- azure
|
||||
azure_rm_dnsrecordset:
|
||||
- azure
|
||||
azure_rm_dnsrecordset_facts:
|
||||
- azure
|
||||
azure_rm_dnszone:
|
||||
- azure
|
||||
azure_rm_dnszone_facts:
|
||||
- azure
|
||||
azure_rm_functionapp:
|
||||
- azure
|
||||
azure_rm_functionapp_facts:
|
||||
- azure
|
||||
azure_rm_image:
|
||||
- azure
|
||||
azure_rm_keyvault:
|
||||
- azure
|
||||
azure_rm_keyvaultkey:
|
||||
- azure
|
||||
azure_rm_keyvaultsecret:
|
||||
- azure
|
||||
azure_rm_loadbalancer:
|
||||
- azure
|
||||
azure_rm_loadbalancer_facts:
|
||||
- azure
|
||||
azure_rm_managed_disk:
|
||||
- azure
|
||||
azure_rm_managed_disk_facts:
|
||||
- azure
|
||||
azure_rm_mysqldatabase:
|
||||
- azure
|
||||
azure_rm_mysqldatabase_facts:
|
||||
- azure
|
||||
azure_rm_mysqlserver:
|
||||
- azure
|
||||
azure_rm_mysqlserver_facts:
|
||||
- azure
|
||||
azure_rm_networkinterface:
|
||||
- azure
|
||||
azure_rm_networkinterface_facts:
|
||||
- azure
|
||||
azure_rm_postgresqldatabase:
|
||||
- azure
|
||||
azure_rm_postgresqldatabase_facts:
|
||||
- azure
|
||||
azure_rm_postgresqlserver:
|
||||
- azure
|
||||
azure_rm_publicipaddress:
|
||||
- azure
|
||||
azure_rm_publicipaddress_facts:
|
||||
- azure
|
||||
azure_rm_resource:
|
||||
- azure
|
||||
azure_rm_resource_facts:
|
||||
- azure
|
||||
azure_rm_resourcegroup:
|
||||
- azure
|
||||
azure_rm_resourcegroup_facts:
|
||||
- azure
|
||||
azure_rm_securitygroup:
|
||||
- azure
|
||||
azure_rm_securitygroup_facts:
|
||||
- azure
|
||||
azure_rm_sqldatabase:
|
||||
- azure
|
||||
azure_rm_sqlserver:
|
||||
- azure
|
||||
azure_rm_sqlserver_facts:
|
||||
- azure
|
||||
azure_rm_storageaccount:
|
||||
- azure
|
||||
azure_rm_storageaccount_facts:
|
||||
- azure
|
||||
azure_rm_storageblob:
|
||||
- azure
|
||||
azure_rm_subnet:
|
||||
- azure
|
||||
azure_rm_virtualmachine:
|
||||
- azure
|
||||
azure_rm_virtualmachine_extension:
|
||||
- azure
|
||||
azure_rm_virtualmachine_facts:
|
||||
- azure
|
||||
azure_rm_virtualmachineimage_facts:
|
||||
- azure
|
||||
azure_rm_virtualmachine_scaleset:
|
||||
- azure
|
||||
azure_rm_virtualmachine_scaleset_facts:
|
||||
- azure
|
||||
azure_rm_virtualnetwork:
|
||||
- azure
|
||||
azure_rm_virtualnetwork_facts:
|
||||
- azure
|
||||
azure_rm_webapp:
|
||||
- azure
|
||||
208
.ve/lib/python2.7/site-packages/ansible/constants.py
Normal file
208
.ve/lib/python2.7/site-packages/ansible/constants.py
Normal file
@@ -0,0 +1,208 @@
|
||||
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from ast import literal_eval
|
||||
from jinja2 import Template
|
||||
from string import ascii_letters, digits
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_TRUE
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
|
||||
|
||||
|
||||
def _warning(msg):
|
||||
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
|
||||
try:
|
||||
from __main__ import display
|
||||
display.warning(msg)
|
||||
except Exception:
|
||||
import sys
|
||||
sys.stderr.write(' [WARNING] %s\n' % (msg))
|
||||
|
||||
|
||||
def _deprecated(msg, version='2.8'):
|
||||
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
|
||||
try:
|
||||
from __main__ import display
|
||||
display.deprecated(msg, version=version)
|
||||
except Exception:
|
||||
import sys
|
||||
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
|
||||
|
||||
|
||||
def mk_boolean(value):
|
||||
''' moved to module_utils'''
|
||||
_deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
|
||||
return boolean(value, strict=False)
|
||||
|
||||
|
||||
def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
|
||||
''' kept for backwarsd compatibility, but deprecated '''
|
||||
_deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
|
||||
|
||||
value = None
|
||||
# small reconstruction of the old code env/ini/default
|
||||
value = os.environ.get(env_var, None)
|
||||
if value is None:
|
||||
try:
|
||||
value = get_ini_config_value(parser, {'key': key, 'section': section})
|
||||
except Exception:
|
||||
pass
|
||||
if value is None:
|
||||
value = default_value
|
||||
|
||||
value = ensure_type(value, value_type)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def set_constant(name, value, export=vars()):
|
||||
''' sets constants and returns resolved options dict '''
|
||||
export[name] = value
|
||||
|
||||
|
||||
# CONSTANTS ### yes, actual ones
|
||||
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun', 'enable', 'machinectl']
|
||||
BECOME_ERROR_STRINGS = {
|
||||
'sudo': 'Sorry, try again.',
|
||||
'su': 'Authentication failure',
|
||||
'pbrun': '',
|
||||
'pfexec': '',
|
||||
'doas': 'Permission denied',
|
||||
'dzdo': '',
|
||||
'ksu': 'Password incorrect',
|
||||
'pmrun': 'You are not permitted to run this command',
|
||||
'enable': '',
|
||||
'machinectl': '',
|
||||
} # FIXME: deal with i18n
|
||||
BECOME_MISSING_STRINGS = {
|
||||
'sudo': 'sorry, a password is required to run sudo',
|
||||
'su': '',
|
||||
'pbrun': '',
|
||||
'pfexec': '',
|
||||
'doas': 'Authorization required',
|
||||
'dzdo': '',
|
||||
'ksu': 'No password given',
|
||||
'pmrun': '',
|
||||
'enable': '',
|
||||
'machinectl': '',
|
||||
} # FIXME: deal with i18n
|
||||
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
|
||||
BOOL_TRUE = BOOLEANS_TRUE
|
||||
CONTROLER_LANG = os.getenv('LANG', 'en_US.UTF-8')
|
||||
DEFAULT_BECOME_PASS = None
|
||||
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
|
||||
DEFAULT_SUDO_PASS = None
|
||||
DEFAULT_REMOTE_PASS = None
|
||||
DEFAULT_SUBSET = None
|
||||
DEFAULT_SU_PASS = None
|
||||
# FIXME: expand to other plugins, but never doc fragments
|
||||
CONFIGURABLE_PLUGINS = ('cache', 'callback', 'connection', 'inventory', 'lookup', 'shell', 'cliconf', 'httpapi')
|
||||
# NOTE: always update the docs/docsite/Makefile to match
|
||||
DOCUMENTABLE_PLUGINS = ('cache', 'callback', 'connection', 'inventory', 'lookup', 'shell', 'module', 'strategy', 'vars')
|
||||
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
|
||||
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
|
||||
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
|
||||
MODULE_REQUIRE_ARGS = ('command', 'win_command', 'shell', 'win_shell', 'raw', 'script')
|
||||
MODULE_NO_JSON = ('command', 'win_command', 'shell', 'win_shell', 'raw')
|
||||
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python')
|
||||
TREE_DIR = None
|
||||
VAULT_VERSION_MIN = 1.0
|
||||
VAULT_VERSION_MAX = 1.0
|
||||
|
||||
# FIXME: remove once play_context mangling is removed
|
||||
# the magic variable mapping dictionary below is used to translate
|
||||
# host/inventory variables to fields in the PlayContext
|
||||
# object. The dictionary values are tuples, to account for aliases
|
||||
# in variable names.
|
||||
|
||||
COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
|
||||
'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
|
||||
'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
|
||||
|
||||
MAGIC_VARIABLE_MAPPING = dict(
|
||||
|
||||
# base
|
||||
connection=('ansible_connection', ),
|
||||
module_compression=('ansible_module_compression', ),
|
||||
shell=('ansible_shell_type', ),
|
||||
executable=('ansible_shell_executable', ),
|
||||
|
||||
# connection common
|
||||
remote_addr=('ansible_ssh_host', 'ansible_host'),
|
||||
remote_user=('ansible_ssh_user', 'ansible_user'),
|
||||
password=('ansible_ssh_pass', 'ansible_password'),
|
||||
port=('ansible_ssh_port', 'ansible_port'),
|
||||
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
|
||||
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
|
||||
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
|
||||
|
||||
# networking modules
|
||||
network_os=('ansible_network_os', ),
|
||||
connection_user=('ansible_connection_user',),
|
||||
|
||||
# ssh TODO: remove
|
||||
ssh_executable=('ansible_ssh_executable', ),
|
||||
ssh_common_args=('ansible_ssh_common_args', ),
|
||||
sftp_extra_args=('ansible_sftp_extra_args', ),
|
||||
scp_extra_args=('ansible_scp_extra_args', ),
|
||||
ssh_extra_args=('ansible_ssh_extra_args', ),
|
||||
ssh_transfer_method=('ansible_ssh_transfer_method', ),
|
||||
|
||||
# docker TODO: remove
|
||||
docker_extra_args=('ansible_docker_extra_args', ),
|
||||
|
||||
# become
|
||||
become=('ansible_become', ),
|
||||
become_method=('ansible_become_method', ),
|
||||
become_user=('ansible_become_user', ),
|
||||
become_pass=('ansible_become_password', 'ansible_become_pass'),
|
||||
become_exe=('ansible_become_exe', ),
|
||||
become_flags=('ansible_become_flags', ),
|
||||
|
||||
# deprecated
|
||||
sudo=('ansible_sudo', ),
|
||||
sudo_user=('ansible_sudo_user', ),
|
||||
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
|
||||
sudo_exe=('ansible_sudo_exe', ),
|
||||
sudo_flags=('ansible_sudo_flags', ),
|
||||
su=('ansible_su', ),
|
||||
su_user=('ansible_su_user', ),
|
||||
su_pass=('ansible_su_password', 'ansible_su_pass'),
|
||||
su_exe=('ansible_su_exe', ),
|
||||
su_flags=('ansible_su_flags', ),
|
||||
)
|
||||
|
||||
# POPULATE SETTINGS FROM CONFIG ###
|
||||
config = ConfigManager()
|
||||
|
||||
# Generate constants from config
|
||||
for setting in config.data.get_settings():
|
||||
|
||||
value = setting.value
|
||||
if setting.origin == 'default' and \
|
||||
isinstance(setting.value, string_types) and \
|
||||
(setting.value.startswith('{{') and setting.value.endswith('}}')):
|
||||
try:
|
||||
t = Template(setting.value)
|
||||
value = t.render(vars())
|
||||
try:
|
||||
value = literal_eval(value)
|
||||
except ValueError:
|
||||
pass # not a python data structure
|
||||
except Exception:
|
||||
pass # not templatable
|
||||
|
||||
value = ensure_type(value, setting.type)
|
||||
|
||||
set_constant(setting.name, value)
|
||||
|
||||
for warn in config.WARNINGS:
|
||||
_warning(warn)
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/constants.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/constants.pyc
Normal file
Binary file not shown.
292
.ve/lib/python2.7/site-packages/ansible/errors/__init__.py
Normal file
292
.ve/lib/python2.7/site-packages/ansible/errors/__init__.py
Normal file
@@ -0,0 +1,292 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from collections import Sequence
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
from ansible.errors.yaml_strings import (
|
||||
YAML_COMMON_DICT_ERROR,
|
||||
YAML_COMMON_LEADING_TAB_ERROR,
|
||||
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
|
||||
YAML_COMMON_UNBALANCED_QUOTES_ERROR,
|
||||
YAML_COMMON_UNQUOTED_COLON_ERROR,
|
||||
YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
|
||||
YAML_POSITION_DETAILS,
|
||||
)
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
|
||||
|
||||
class AnsibleError(Exception):
|
||||
'''
|
||||
This is the base class for all errors raised from Ansible code,
|
||||
and can be instantiated with two optional parameters beyond the
|
||||
error message to control whether detailed information is displayed
|
||||
when the error occurred while parsing a data file of some kind.
|
||||
|
||||
Usage:
|
||||
|
||||
raise AnsibleError('some message here', obj=obj, show_content=True)
|
||||
|
||||
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
|
||||
which should be returned by the DataLoader() class.
|
||||
'''
|
||||
|
||||
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
|
||||
super(AnsibleError, self).__init__(message)
|
||||
|
||||
# we import this here to prevent an import loop problem,
|
||||
# since the objects code also imports ansible.errors
|
||||
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
|
||||
|
||||
self._obj = obj
|
||||
self._show_content = show_content
|
||||
if obj and isinstance(obj, AnsibleBaseYAMLObject):
|
||||
extended_error = self._get_extended_error()
|
||||
if extended_error and not suppress_extended_error:
|
||||
self.message = '%s\n\n%s' % (to_native(message), to_native(extended_error))
|
||||
else:
|
||||
self.message = '%s' % to_native(message)
|
||||
else:
|
||||
self.message = '%s' % to_native(message)
|
||||
if orig_exc:
|
||||
self.orig_exc = orig_exc
|
||||
|
||||
self.tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
|
||||
def __repr__(self):
|
||||
return self.message
|
||||
|
||||
def _get_error_lines_from_file(self, file_name, line_number):
|
||||
'''
|
||||
Returns the line in the file which corresponds to the reported error
|
||||
location, as well as the line preceding it (if the error did not
|
||||
occur on the first line), to provide context to the error.
|
||||
'''
|
||||
|
||||
target_line = ''
|
||||
prev_line = ''
|
||||
|
||||
with open(file_name, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
target_line = lines[line_number]
|
||||
if line_number > 0:
|
||||
prev_line = lines[line_number - 1]
|
||||
|
||||
return (target_line, prev_line)
|
||||
|
||||
def _get_extended_error(self):
|
||||
'''
|
||||
Given an object reporting the location of the exception in a file, return
|
||||
detailed information regarding it including:
|
||||
|
||||
* the line which caused the error as well as the one preceding it
|
||||
* causes and suggested remedies for common syntax errors
|
||||
|
||||
If this error was created with show_content=False, the reporting of content
|
||||
is suppressed, as the file contents may be sensitive (ie. vault data).
|
||||
'''
|
||||
|
||||
error_message = ''
|
||||
|
||||
try:
|
||||
(src_file, line_number, col_number) = self._obj.ansible_pos
|
||||
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
|
||||
if src_file not in ('<string>', '<unicode>') and self._show_content:
|
||||
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
|
||||
target_line = to_text(target_line)
|
||||
prev_line = to_text(prev_line)
|
||||
if target_line:
|
||||
stripped_line = target_line.replace(" ", "")
|
||||
arrow_line = (" " * (col_number - 1)) + "^ here"
|
||||
# header_line = ("=" * 73)
|
||||
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
|
||||
|
||||
# TODO: There may be cases where there is a valid tab in a line that has other errors.
|
||||
if '\t' in target_line:
|
||||
error_message += YAML_COMMON_LEADING_TAB_ERROR
|
||||
# common error/remediation checking here:
|
||||
# check for unquoted vars starting lines
|
||||
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
|
||||
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
|
||||
# check for common dictionary mistakes
|
||||
elif ":{{" in stripped_line and "}}" in stripped_line:
|
||||
error_message += YAML_COMMON_DICT_ERROR
|
||||
# check for common unquoted colon mistakes
|
||||
elif (len(target_line) and
|
||||
len(target_line) > 1 and
|
||||
len(target_line) > col_number and
|
||||
target_line[col_number] == ":" and
|
||||
target_line.count(':') > 1):
|
||||
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
|
||||
# otherwise, check for some common quoting mistakes
|
||||
else:
|
||||
parts = target_line.split(":")
|
||||
if len(parts) > 1:
|
||||
middle = parts[1].strip()
|
||||
match = False
|
||||
unbalanced = False
|
||||
|
||||
if middle.startswith("'") and not middle.endswith("'"):
|
||||
match = True
|
||||
elif middle.startswith('"') and not middle.endswith('"'):
|
||||
match = True
|
||||
|
||||
if (len(middle) > 0 and
|
||||
middle[0] in ['"', "'"] and
|
||||
middle[-1] in ['"', "'"] and
|
||||
target_line.count("'") > 2 or
|
||||
target_line.count('"') > 2):
|
||||
unbalanced = True
|
||||
|
||||
if match:
|
||||
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
|
||||
if unbalanced:
|
||||
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
|
||||
|
||||
except (IOError, TypeError):
|
||||
error_message += '\n(could not open file to display line)'
|
||||
except IndexError:
|
||||
error_message += '\n(specified line no longer in file, maybe it changed?)'
|
||||
|
||||
return error_message
|
||||
|
||||
|
||||
class AnsibleAssertionError(AnsibleError, AssertionError):
|
||||
'''Invalid assertion'''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleOptionsError(AnsibleError):
|
||||
''' bad or incomplete options passed '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleParserError(AnsibleError):
|
||||
''' something was detected early that is wrong about a playbook or data file '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleInternalError(AnsibleError):
|
||||
''' internal safeguards tripped, something happened in the code that should never happen '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleRuntimeError(AnsibleError):
|
||||
''' ansible had a problem while running a playbook '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleModuleError(AnsibleRuntimeError):
|
||||
''' a module failed somehow '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleConnectionFailure(AnsibleRuntimeError):
|
||||
''' the transport / connection_plugin had a fatal error '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleFilterError(AnsibleRuntimeError):
|
||||
''' a templating failure '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleLookupError(AnsibleRuntimeError):
|
||||
''' a lookup failure '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleCallbackError(AnsibleRuntimeError):
|
||||
''' a callback failure '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleUndefinedVariable(AnsibleRuntimeError):
|
||||
''' a templating failure '''
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleFileNotFound(AnsibleRuntimeError):
|
||||
''' a file missing failure '''
|
||||
|
||||
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None):
|
||||
|
||||
self.file_name = file_name
|
||||
self.paths = paths
|
||||
|
||||
if self.file_name:
|
||||
if message:
|
||||
message += "\n"
|
||||
message += "Could not find or access '%s'" % to_text(self.file_name)
|
||||
|
||||
if self.paths and isinstance(self.paths, Sequence):
|
||||
searched = to_text('\n\t'.join(self.paths))
|
||||
if message:
|
||||
message += "\n"
|
||||
message += "Searched in:\n\t%s" % searched
|
||||
|
||||
message += " on the Ansible Controller.\nIf you are using a module and expect the file to exist on the remote, see the remote_src option"
|
||||
|
||||
super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content,
|
||||
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
|
||||
|
||||
|
||||
# These Exceptions are temporary, using them as flow control until we can get a better solution.
|
||||
# DO NOT USE as they will probably be removed soon.
|
||||
# We will port the action modules in our tree to use a context manager instead.
|
||||
class AnsibleAction(AnsibleRuntimeError):
|
||||
''' Base Exception for Action plugin flow control '''
|
||||
|
||||
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
|
||||
|
||||
super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content,
|
||||
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
|
||||
if result is None:
|
||||
self.result = {}
|
||||
else:
|
||||
self.result = result
|
||||
|
||||
|
||||
class AnsibleActionSkip(AnsibleAction):
|
||||
''' an action runtime skip'''
|
||||
|
||||
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
|
||||
super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content,
|
||||
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
|
||||
self.result.update({'skipped': True, 'msg': message})
|
||||
|
||||
|
||||
class AnsibleActionFail(AnsibleAction):
|
||||
''' an action runtime failure'''
|
||||
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
|
||||
super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
|
||||
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
|
||||
self.result.update({'failed': True, 'msg': message})
|
||||
|
||||
|
||||
class _AnsibleActionDone(AnsibleAction):
|
||||
''' an action runtime early exit'''
|
||||
pass
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/errors/__init__.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/errors/__init__.pyc
Normal file
Binary file not shown.
135
.ve/lib/python2.7/site-packages/ansible/errors/yaml_strings.py
Normal file
135
.ve/lib/python2.7/site-packages/ansible/errors/yaml_strings.py
Normal file
@@ -0,0 +1,135 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
__all__ = [
|
||||
'YAML_SYNTAX_ERROR',
|
||||
'YAML_POSITION_DETAILS',
|
||||
'YAML_COMMON_DICT_ERROR',
|
||||
'YAML_COMMON_UNQUOTED_VARIABLE_ERROR',
|
||||
'YAML_COMMON_UNQUOTED_COLON_ERROR',
|
||||
'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR',
|
||||
'YAML_COMMON_UNBALANCED_QUOTES_ERROR',
|
||||
]
|
||||
|
||||
YAML_SYNTAX_ERROR = """\
|
||||
Syntax Error while loading YAML.
|
||||
%s"""
|
||||
|
||||
YAML_POSITION_DETAILS = """\
|
||||
The error appears to have been in '%s': line %s, column %s, but may
|
||||
be elsewhere in the file depending on the exact syntax problem.
|
||||
"""
|
||||
|
||||
YAML_COMMON_DICT_ERROR = """\
|
||||
This one looks easy to fix. YAML thought it was looking for the start of a
|
||||
hash/dictionary and was confused to see a second "{". Most likely this was
|
||||
meant to be an ansible template evaluation instead, so we have to give the
|
||||
parser a small hint that we wanted a string instead. The solution here is to
|
||||
just quote the entire value.
|
||||
|
||||
For instance, if the original line was:
|
||||
|
||||
app_path: {{ base_path }}/foo
|
||||
|
||||
It should be written as:
|
||||
|
||||
app_path: "{{ base_path }}/foo"
|
||||
"""
|
||||
|
||||
YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\
|
||||
We could be wrong, but this one looks like it might be an issue with
|
||||
missing quotes. Always quote template expression brackets when they
|
||||
start a value. For instance:
|
||||
|
||||
with_items:
|
||||
- {{ foo }}
|
||||
|
||||
Should be written as:
|
||||
|
||||
with_items:
|
||||
- "{{ foo }}"
|
||||
"""
|
||||
|
||||
YAML_COMMON_UNQUOTED_COLON_ERROR = """\
|
||||
This one looks easy to fix. There seems to be an extra unquoted colon in the line
|
||||
and this is confusing the parser. It was only expecting to find one free
|
||||
colon. The solution is just add some quotes around the colon, or quote the
|
||||
entire line after the first colon.
|
||||
|
||||
For instance, if the original line was:
|
||||
|
||||
copy: src=file.txt dest=/path/filename:with_colon.txt
|
||||
|
||||
It can be written as:
|
||||
|
||||
copy: src=file.txt dest='/path/filename:with_colon.txt'
|
||||
|
||||
Or:
|
||||
|
||||
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
|
||||
"""
|
||||
|
||||
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\
|
||||
This one looks easy to fix. It seems that there is a value started
|
||||
with a quote, and the YAML parser is expecting to see the line ended
|
||||
with the same kind of quote. For instance:
|
||||
|
||||
when: "ok" in result.stdout
|
||||
|
||||
Could be written as:
|
||||
|
||||
when: '"ok" in result.stdout'
|
||||
|
||||
Or equivalently:
|
||||
|
||||
when: "'ok' in result.stdout"
|
||||
"""
|
||||
|
||||
YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\
|
||||
We could be wrong, but this one looks like it might be an issue with
|
||||
unbalanced quotes. If starting a value with a quote, make sure the
|
||||
line ends with the same set of quotes. For instance this arbitrary
|
||||
example:
|
||||
|
||||
foo: "bad" "wolf"
|
||||
|
||||
Could be written as:
|
||||
|
||||
foo: '"bad" "wolf"'
|
||||
"""
|
||||
|
||||
YAML_COMMON_LEADING_TAB_ERROR = """\
|
||||
There appears to be a tab character at the start of the line.
|
||||
|
||||
YAML does not use tabs for formatting. Tabs should be replaced with spaces.
|
||||
|
||||
For example:
|
||||
- name: update tooling
|
||||
vars:
|
||||
version: 1.2.3
|
||||
# ^--- there is a tab there.
|
||||
|
||||
Should be written as:
|
||||
- name: update tooling
|
||||
vars:
|
||||
version: 1.2.3
|
||||
# ^--- all spaces here.
|
||||
"""
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/errors/yaml_strings.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/errors/yaml_strings.pyc
Normal file
Binary file not shown.
20
.ve/lib/python2.7/site-packages/ansible/executor/__init__.py
Normal file
20
.ve/lib/python2.7/site-packages/ansible/executor/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/executor/__init__.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/executor/__init__.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,44 @@
|
||||
# (c) 2016 - Red Hat, Inc. <info@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from multiprocessing import Lock
|
||||
|
||||
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
|
||||
|
||||
if 'action_write_locks' not in globals():
|
||||
# Do not initialize this more than once because it seems to bash
|
||||
# the existing one. multiprocessing must be reloading the module
|
||||
# when it forks?
|
||||
action_write_locks = dict()
|
||||
|
||||
# Below is a Lock for use when we weren't expecting a named module. It gets used when an action
|
||||
# plugin invokes a module whose name does not match with the action's name. Slightly less
|
||||
# efficient as all processes with unexpected module names will wait on this lock
|
||||
action_write_locks[None] = Lock()
|
||||
|
||||
# These plugins are known to be called directly by action plugins with names differing from the
|
||||
# action plugin name. We precreate them here as an optimization.
|
||||
# If a list of service managers is created in the future we can do the same for them.
|
||||
mods = set(p['name'] for p in PKG_MGRS)
|
||||
|
||||
mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
|
||||
for mod_name in mods:
|
||||
action_write_locks[mod_name] = Lock()
|
||||
Binary file not shown.
1002
.ve/lib/python2.7/site-packages/ansible/executor/module_common.py
Normal file
1002
.ve/lib/python2.7/site-packages/ansible/executor/module_common.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -0,0 +1,568 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import fnmatch
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
__all__ = ['PlayIterator']
|
||||
|
||||
|
||||
class HostState:
|
||||
def __init__(self, blocks):
|
||||
self._blocks = blocks[:]
|
||||
|
||||
self.cur_block = 0
|
||||
self.cur_regular_task = 0
|
||||
self.cur_rescue_task = 0
|
||||
self.cur_always_task = 0
|
||||
self.cur_dep_chain = None
|
||||
self.run_state = PlayIterator.ITERATING_SETUP
|
||||
self.fail_state = PlayIterator.FAILED_NONE
|
||||
self.pending_setup = False
|
||||
self.tasks_child_state = None
|
||||
self.rescue_child_state = None
|
||||
self.always_child_state = None
|
||||
self.did_rescue = False
|
||||
self.did_start_at_task = False
|
||||
|
||||
def __repr__(self):
|
||||
return "HostState(%r)" % self._blocks
|
||||
|
||||
def __str__(self):
|
||||
def _run_state_to_string(n):
|
||||
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
|
||||
try:
|
||||
return states[n]
|
||||
except IndexError:
|
||||
return "UNKNOWN STATE"
|
||||
|
||||
def _failed_state_to_string(n):
|
||||
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
|
||||
if n == 0:
|
||||
return "FAILED_NONE"
|
||||
else:
|
||||
ret = []
|
||||
for i in (1, 2, 4, 8):
|
||||
if n & i:
|
||||
ret.append(states[i])
|
||||
return "|".join(ret)
|
||||
|
||||
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
|
||||
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
|
||||
self.cur_block,
|
||||
self.cur_regular_task,
|
||||
self.cur_rescue_task,
|
||||
self.cur_always_task,
|
||||
_run_state_to_string(self.run_state),
|
||||
_failed_state_to_string(self.fail_state),
|
||||
self.pending_setup,
|
||||
self.tasks_child_state,
|
||||
self.rescue_child_state,
|
||||
self.always_child_state,
|
||||
self.did_rescue,
|
||||
self.did_start_at_task,
|
||||
))
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, HostState):
|
||||
return False
|
||||
|
||||
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
|
||||
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
|
||||
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
|
||||
if getattr(self, attr) != getattr(other, attr):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_current_block(self):
|
||||
return self._blocks[self.cur_block]
|
||||
|
||||
def copy(self):
|
||||
new_state = HostState(self._blocks)
|
||||
new_state.cur_block = self.cur_block
|
||||
new_state.cur_regular_task = self.cur_regular_task
|
||||
new_state.cur_rescue_task = self.cur_rescue_task
|
||||
new_state.cur_always_task = self.cur_always_task
|
||||
new_state.run_state = self.run_state
|
||||
new_state.fail_state = self.fail_state
|
||||
new_state.pending_setup = self.pending_setup
|
||||
new_state.did_rescue = self.did_rescue
|
||||
new_state.did_start_at_task = self.did_start_at_task
|
||||
if self.cur_dep_chain is not None:
|
||||
new_state.cur_dep_chain = self.cur_dep_chain[:]
|
||||
if self.tasks_child_state is not None:
|
||||
new_state.tasks_child_state = self.tasks_child_state.copy()
|
||||
if self.rescue_child_state is not None:
|
||||
new_state.rescue_child_state = self.rescue_child_state.copy()
|
||||
if self.always_child_state is not None:
|
||||
new_state.always_child_state = self.always_child_state.copy()
|
||||
return new_state
|
||||
|
||||
|
||||
class PlayIterator:
|
||||
|
||||
# the primary running states for the play iteration
|
||||
ITERATING_SETUP = 0
|
||||
ITERATING_TASKS = 1
|
||||
ITERATING_RESCUE = 2
|
||||
ITERATING_ALWAYS = 3
|
||||
ITERATING_COMPLETE = 4
|
||||
|
||||
# the failure states for the play iteration, which are powers
|
||||
# of 2 as they may be or'ed together in certain circumstances
|
||||
FAILED_NONE = 0
|
||||
FAILED_SETUP = 1
|
||||
FAILED_TASKS = 2
|
||||
FAILED_RESCUE = 4
|
||||
FAILED_ALWAYS = 8
|
||||
|
||||
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
|
||||
self._play = play
|
||||
self._blocks = []
|
||||
self._variable_manager = variable_manager
|
||||
|
||||
# Default options to gather
|
||||
gather_subset = play_context.gather_subset
|
||||
gather_timeout = play_context.gather_timeout
|
||||
fact_path = play_context.fact_path
|
||||
|
||||
# Retrieve subset to gather
|
||||
if self._play.gather_subset is not None:
|
||||
gather_subset = self._play.gather_subset
|
||||
# Retrieve timeout for gather
|
||||
if self._play.gather_timeout is not None:
|
||||
gather_timeout = self._play.gather_timeout
|
||||
# Retrieve fact_path
|
||||
if self._play.fact_path is not None:
|
||||
fact_path = self._play.fact_path
|
||||
|
||||
setup_block = Block(play=self._play)
|
||||
# Gathering facts with run_once would copy the facts from one host to
|
||||
# the others.
|
||||
setup_block.run_once = False
|
||||
setup_task = Task(block=setup_block)
|
||||
setup_task.action = 'setup'
|
||||
setup_task.name = 'Gathering Facts'
|
||||
setup_task.tags = ['always']
|
||||
setup_task.args = {
|
||||
'gather_subset': gather_subset,
|
||||
}
|
||||
if gather_timeout:
|
||||
setup_task.args['gather_timeout'] = gather_timeout
|
||||
if fact_path:
|
||||
setup_task.args['fact_path'] = fact_path
|
||||
setup_task.set_loader(self._play._loader)
|
||||
# short circuit fact gathering if the entire playbook is conditional
|
||||
if self._play._included_conditional is not None:
|
||||
setup_task.when = self._play._included_conditional[:]
|
||||
setup_block.block = [setup_task]
|
||||
|
||||
setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
|
||||
self._blocks.append(setup_block)
|
||||
self.cache_block_tasks(setup_block)
|
||||
|
||||
for block in self._play.compile():
|
||||
new_block = block.filter_tagged_tasks(play_context, all_vars)
|
||||
if new_block.has_tasks():
|
||||
self.cache_block_tasks(new_block)
|
||||
self._blocks.append(new_block)
|
||||
|
||||
for handler_block in self._play.handlers:
|
||||
self.cache_block_tasks(handler_block)
|
||||
|
||||
self._host_states = {}
|
||||
start_at_matched = False
|
||||
batch = inventory.get_hosts(self._play.hosts)
|
||||
self.batch_size = len(batch)
|
||||
for host in batch:
|
||||
self._host_states[host.name] = HostState(blocks=self._blocks)
|
||||
# if we're looking to start at a specific task, iterate through
|
||||
# the tasks for this host until we find the specified task
|
||||
if play_context.start_at_task is not None and not start_at_done:
|
||||
while True:
|
||||
(s, task) = self.get_next_task_for_host(host, peek=True)
|
||||
if s.run_state == self.ITERATING_COMPLETE:
|
||||
break
|
||||
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
|
||||
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
|
||||
start_at_matched = True
|
||||
break
|
||||
else:
|
||||
self.get_next_task_for_host(host)
|
||||
|
||||
# finally, reset the host's state to ITERATING_SETUP
|
||||
if start_at_matched:
|
||||
self._host_states[host.name].did_start_at_task = True
|
||||
self._host_states[host.name].run_state = self.ITERATING_SETUP
|
||||
|
||||
if start_at_matched:
|
||||
# we have our match, so clear the start_at_task field on the
|
||||
# play context to flag that we've started at a task (and future
|
||||
# plays won't try to advance)
|
||||
play_context.start_at_task = None
|
||||
|
||||
def get_host_state(self, host):
|
||||
# Since we're using the PlayIterator to carry forward failed hosts,
|
||||
# in the event that a previous host was not in the current inventory
|
||||
# we create a stub state for it now
|
||||
if host.name not in self._host_states:
|
||||
self._host_states[host.name] = HostState(blocks=[])
|
||||
|
||||
return self._host_states[host.name].copy()
|
||||
|
||||
def cache_block_tasks(self, block):
|
||||
# now a noop, we've changed the way we do caching and finding of
|
||||
# original task entries, but just in case any 3rd party strategies
|
||||
# are using this we're leaving it here for now
|
||||
return
|
||||
|
||||
def get_next_task_for_host(self, host, peek=False):
|
||||
|
||||
display.debug("getting the next task for host %s" % host.name)
|
||||
s = self.get_host_state(host)
|
||||
|
||||
task = None
|
||||
if s.run_state == self.ITERATING_COMPLETE:
|
||||
display.debug("host %s is done iterating, returning" % host.name)
|
||||
return (s, None)
|
||||
|
||||
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
|
||||
|
||||
if not peek:
|
||||
self._host_states[host.name] = s
|
||||
|
||||
display.debug("done getting next task for host %s" % host.name)
|
||||
display.debug(" ^ task is: %s" % task)
|
||||
display.debug(" ^ state is: %s" % s)
|
||||
return (s, task)
|
||||
|
||||
def _get_next_task_from_state(self, state, host, peek, in_child=False):
|
||||
|
||||
task = None
|
||||
|
||||
# try and find the next task, given the current state.
|
||||
while True:
|
||||
# try to get the current block from the list of blocks, and
|
||||
# if we run past the end of the list we know we're done with
|
||||
# this block
|
||||
try:
|
||||
block = state._blocks[state.cur_block]
|
||||
except IndexError:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
return (state, None)
|
||||
|
||||
if state.run_state == self.ITERATING_SETUP:
|
||||
# First, we check to see if we were pending setup. If not, this is
|
||||
# the first trip through ITERATING_SETUP, so we set the pending_setup
|
||||
# flag and try to determine if we do in fact want to gather facts for
|
||||
# the specified host.
|
||||
if not state.pending_setup:
|
||||
state.pending_setup = True
|
||||
|
||||
# Gather facts if the default is 'smart' and we have not yet
|
||||
# done it for this host; or if 'explicit' and the play sets
|
||||
# gather_facts to True; or if 'implicit' and the play does
|
||||
# NOT explicitly set gather_facts to False.
|
||||
|
||||
gathering = C.DEFAULT_GATHERING
|
||||
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
|
||||
|
||||
if (gathering == 'implicit' and implied) or \
|
||||
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
|
||||
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('module_setup', False))):
|
||||
# The setup block is always self._blocks[0], as we inject it
|
||||
# during the play compilation in __init__ above.
|
||||
setup_block = self._blocks[0]
|
||||
if setup_block.has_tasks() and len(setup_block.block) > 0:
|
||||
task = setup_block.block[0]
|
||||
else:
|
||||
# This is the second trip through ITERATING_SETUP, so we clear
|
||||
# the flag and move onto the next block in the list while setting
|
||||
# the run state to ITERATING_TASKS
|
||||
state.pending_setup = False
|
||||
|
||||
state.run_state = self.ITERATING_TASKS
|
||||
if not state.did_start_at_task:
|
||||
state.cur_block += 1
|
||||
state.cur_regular_task = 0
|
||||
state.cur_rescue_task = 0
|
||||
state.cur_always_task = 0
|
||||
state.child_state = None
|
||||
|
||||
elif state.run_state == self.ITERATING_TASKS:
|
||||
# clear the pending setup flag, since we're past that and it didn't fail
|
||||
if state.pending_setup:
|
||||
state.pending_setup = False
|
||||
|
||||
# First, we check for a child task state that is not failed, and if we
|
||||
# have one recurse into it for the next task. If we're done with the child
|
||||
# state, we clear it and drop back to getting the next task from the list.
|
||||
if state.tasks_child_state:
|
||||
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
|
||||
if self._check_failed_state(state.tasks_child_state):
|
||||
# failed child state, so clear it and move into the rescue portion
|
||||
state.tasks_child_state = None
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
# get the next task recursively
|
||||
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
# we're done with the child state, so clear it and continue
|
||||
# back to the top of the loop to get the next task
|
||||
state.tasks_child_state = None
|
||||
continue
|
||||
else:
|
||||
# First here, we check to see if we've failed anywhere down the chain
|
||||
# of states we have, and if so we move onto the rescue portion. Otherwise,
|
||||
# we check to see if we've moved past the end of the list of tasks. If so,
|
||||
# we move into the always portion of the block, otherwise we get the next
|
||||
# task from the list.
|
||||
if self._check_failed_state(state):
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
elif state.cur_regular_task >= len(block.block):
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = block.block[state.cur_regular_task]
|
||||
# if the current task is actually a child block, create a child
|
||||
# state for us to recurse into on the next pass
|
||||
if isinstance(task, Block) or state.tasks_child_state is not None:
|
||||
state.tasks_child_state = HostState(blocks=[task])
|
||||
state.tasks_child_state.run_state = self.ITERATING_TASKS
|
||||
# since we've created the child state, clear the task
|
||||
# so we can pick up the child state on the next pass
|
||||
task = None
|
||||
state.cur_regular_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_RESCUE:
|
||||
# The process here is identical to ITERATING_TASKS, except instead
|
||||
# we move into the always portion of the block.
|
||||
if host.name in self._play._removed_hosts:
|
||||
self._play._removed_hosts.remove(host.name)
|
||||
|
||||
if state.rescue_child_state:
|
||||
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
|
||||
if self._check_failed_state(state.rescue_child_state):
|
||||
state.rescue_child_state = None
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
state.rescue_child_state = None
|
||||
continue
|
||||
else:
|
||||
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
elif state.cur_rescue_task >= len(block.rescue):
|
||||
if len(block.rescue) > 0:
|
||||
state.fail_state = self.FAILED_NONE
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
state.did_rescue = True
|
||||
else:
|
||||
task = block.rescue[state.cur_rescue_task]
|
||||
if isinstance(task, Block) or state.rescue_child_state is not None:
|
||||
state.rescue_child_state = HostState(blocks=[task])
|
||||
state.rescue_child_state.run_state = self.ITERATING_TASKS
|
||||
task = None
|
||||
state.cur_rescue_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_ALWAYS:
|
||||
# And again, the process here is identical to ITERATING_TASKS, except
|
||||
# instead we either move onto the next block in the list, or we set the
|
||||
# run state to ITERATING_COMPLETE in the event of any errors, or when we
|
||||
# have hit the end of the list of blocks.
|
||||
if state.always_child_state:
|
||||
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
|
||||
if self._check_failed_state(state.always_child_state):
|
||||
state.always_child_state = None
|
||||
self._set_failed_state(state)
|
||||
else:
|
||||
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
|
||||
state.always_child_state = None
|
||||
continue
|
||||
else:
|
||||
if state.cur_always_task >= len(block.always):
|
||||
if state.fail_state != self.FAILED_NONE:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
else:
|
||||
state.cur_block += 1
|
||||
state.cur_regular_task = 0
|
||||
state.cur_rescue_task = 0
|
||||
state.cur_always_task = 0
|
||||
state.run_state = self.ITERATING_TASKS
|
||||
state.tasks_child_state = None
|
||||
state.rescue_child_state = None
|
||||
state.always_child_state = None
|
||||
state.did_rescue = False
|
||||
|
||||
# we're advancing blocks, so if this was an end-of-role block we
|
||||
# mark the current role complete
|
||||
if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
|
||||
block._role._completed[host.name] = True
|
||||
else:
|
||||
task = block.always[state.cur_always_task]
|
||||
if isinstance(task, Block) or state.always_child_state is not None:
|
||||
state.always_child_state = HostState(blocks=[task])
|
||||
state.always_child_state.run_state = self.ITERATING_TASKS
|
||||
task = None
|
||||
state.cur_always_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_COMPLETE:
|
||||
return (state, None)
|
||||
|
||||
# if something above set the task, break out of the loop now
|
||||
if task:
|
||||
break
|
||||
|
||||
return (state, task)
|
||||
|
||||
def _set_failed_state(self, state):
|
||||
if state.run_state == self.ITERATING_SETUP:
|
||||
state.fail_state |= self.FAILED_SETUP
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
elif state.run_state == self.ITERATING_TASKS:
|
||||
if state.tasks_child_state is not None:
|
||||
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
|
||||
else:
|
||||
state.fail_state |= self.FAILED_TASKS
|
||||
if state._blocks[state.cur_block].rescue:
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
elif state._blocks[state.cur_block].always:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
elif state.run_state == self.ITERATING_RESCUE:
|
||||
if state.rescue_child_state is not None:
|
||||
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
|
||||
else:
|
||||
state.fail_state |= self.FAILED_RESCUE
|
||||
if state._blocks[state.cur_block].always:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
elif state.run_state == self.ITERATING_ALWAYS:
|
||||
if state.always_child_state is not None:
|
||||
state.always_child_state = self._set_failed_state(state.always_child_state)
|
||||
else:
|
||||
state.fail_state |= self.FAILED_ALWAYS
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
return state
|
||||
|
||||
def mark_host_failed(self, host):
|
||||
s = self.get_host_state(host)
|
||||
display.debug("marking host %s failed, current state: %s" % (host, s))
|
||||
s = self._set_failed_state(s)
|
||||
display.debug("^ failed state is now: %s" % s)
|
||||
self._host_states[host.name] = s
|
||||
self._play._removed_hosts.append(host.name)
|
||||
|
||||
def get_failed_hosts(self):
|
||||
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
|
||||
|
||||
def _check_failed_state(self, state):
|
||||
if state is None:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
|
||||
return True
|
||||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
elif state.fail_state != self.FAILED_NONE:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
|
||||
return False
|
||||
else:
|
||||
return not state.did_rescue
|
||||
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
|
||||
cur_block = self._blocks[state.cur_block]
|
||||
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_failed(self, host):
|
||||
s = self.get_host_state(host)
|
||||
return self._check_failed_state(s)
|
||||
|
||||
def get_active_state(self, state):
|
||||
'''
|
||||
Finds the active state, recursively if necessary when there are child states.
|
||||
'''
|
||||
if state.run_state == self.ITERATING_TASKS and state.tasks_child_state is not None:
|
||||
return self.get_active_state(state.tasks_child_state)
|
||||
elif state.run_state == self.ITERATING_RESCUE and state.rescue_child_state is not None:
|
||||
return self.get_active_state(state.rescue_child_state)
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.always_child_state is not None:
|
||||
return self.get_active_state(state.always_child_state)
|
||||
return state
|
||||
|
||||
def get_original_task(self, host, task):
|
||||
# now a noop because we've changed the way we do caching
|
||||
return (None, None)
|
||||
|
||||
def _insert_tasks_into_state(self, state, task_list):
|
||||
# if we've failed at all, or if the task list is empty, just return the current state
|
||||
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
|
||||
return state
|
||||
|
||||
if state.run_state == self.ITERATING_TASKS:
|
||||
if state.tasks_child_state:
|
||||
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
|
||||
else:
|
||||
target_block = state._blocks[state.cur_block].copy()
|
||||
before = target_block.block[:state.cur_regular_task]
|
||||
after = target_block.block[state.cur_regular_task:]
|
||||
target_block.block = before + task_list + after
|
||||
state._blocks[state.cur_block] = target_block
|
||||
elif state.run_state == self.ITERATING_RESCUE:
|
||||
if state.rescue_child_state:
|
||||
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
|
||||
else:
|
||||
target_block = state._blocks[state.cur_block].copy()
|
||||
before = target_block.rescue[:state.cur_rescue_task]
|
||||
after = target_block.rescue[state.cur_rescue_task:]
|
||||
target_block.rescue = before + task_list + after
|
||||
state._blocks[state.cur_block] = target_block
|
||||
elif state.run_state == self.ITERATING_ALWAYS:
|
||||
if state.always_child_state:
|
||||
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
|
||||
else:
|
||||
target_block = state._blocks[state.cur_block].copy()
|
||||
before = target_block.always[:state.cur_always_task]
|
||||
after = target_block.always[state.cur_always_task:]
|
||||
target_block.always = before + task_list + after
|
||||
state._blocks[state.cur_block] = target_block
|
||||
return state
|
||||
|
||||
def add_tasks(self, host, task_list):
|
||||
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
|
||||
Binary file not shown.
@@ -0,0 +1,291 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.helpers import pct_to_int
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.utils.path import makedirs_safe
|
||||
from ansible.utils.ssh_functions import check_for_controlpersist
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
class PlaybookExecutor:
|
||||
|
||||
'''
|
||||
This is the primary class for executing playbooks, and thus the
|
||||
basis for bin/ansible-playbook operation.
|
||||
'''
|
||||
|
||||
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):
|
||||
self._playbooks = playbooks
|
||||
self._inventory = inventory
|
||||
self._variable_manager = variable_manager
|
||||
self._loader = loader
|
||||
self._options = options
|
||||
self.passwords = passwords
|
||||
self._unreachable_hosts = dict()
|
||||
|
||||
if options.listhosts or options.listtasks or options.listtags or options.syntax:
|
||||
self._tqm = None
|
||||
else:
|
||||
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)
|
||||
|
||||
# Note: We run this here to cache whether the default ansible ssh
|
||||
# executable supports control persist. Sometime in the future we may
|
||||
# need to enhance this to check that ansible_ssh_executable specified
|
||||
# in inventory is also cached. We can't do this caching at the point
|
||||
# where it is used (in task_executor) because that is post-fork and
|
||||
# therefore would be discarded after every task.
|
||||
check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Run the given playbook, based on the settings in the play which
|
||||
may limit the runs to serialized groups, etc.
|
||||
'''
|
||||
|
||||
result = 0
|
||||
entrylist = []
|
||||
entry = {}
|
||||
try:
|
||||
for playbook_path in self._playbooks:
|
||||
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
|
||||
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
|
||||
|
||||
if self._tqm is None: # we are doing a listing
|
||||
entry = {'playbook': playbook_path}
|
||||
entry['plays'] = []
|
||||
else:
|
||||
# make sure the tqm has callbacks loaded
|
||||
self._tqm.load_callbacks()
|
||||
self._tqm.send_callback('v2_playbook_on_start', pb)
|
||||
|
||||
i = 1
|
||||
plays = pb.get_plays()
|
||||
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
|
||||
|
||||
for play in plays:
|
||||
if play._included_path is not None:
|
||||
self._loader.set_basedir(play._included_path)
|
||||
else:
|
||||
self._loader.set_basedir(pb._basedir)
|
||||
|
||||
# clear any filters which may have been applied to the inventory
|
||||
self._inventory.remove_restriction()
|
||||
|
||||
# Allow variables to be used in vars_prompt fields.
|
||||
all_vars = self._variable_manager.get_vars(play=play)
|
||||
templar = Templar(loader=self._loader, variables=all_vars)
|
||||
setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
|
||||
|
||||
if play.vars_prompt:
|
||||
for var in play.vars_prompt:
|
||||
vname = var['name']
|
||||
prompt = var.get("prompt", vname)
|
||||
default = var.get("default", None)
|
||||
private = boolean(var.get("private", True))
|
||||
confirm = boolean(var.get("confirm", False))
|
||||
encrypt = var.get("encrypt", None)
|
||||
salt_size = var.get("salt_size", None)
|
||||
salt = var.get("salt", None)
|
||||
|
||||
if vname not in self._variable_manager.extra_vars:
|
||||
if self._tqm:
|
||||
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
|
||||
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
|
||||
else: # we are either in --list-<option> or syntax check
|
||||
play.vars[vname] = default
|
||||
|
||||
# Post validate so any play level variables are templated
|
||||
all_vars = self._variable_manager.get_vars(play=play)
|
||||
templar = Templar(loader=self._loader, variables=all_vars)
|
||||
play.post_validate(templar)
|
||||
|
||||
if self._options.syntax:
|
||||
continue
|
||||
|
||||
if self._tqm is None:
|
||||
# we are just doing a listing
|
||||
entry['plays'].append(play)
|
||||
|
||||
else:
|
||||
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
|
||||
|
||||
previously_failed = len(self._tqm._failed_hosts)
|
||||
previously_unreachable = len(self._tqm._unreachable_hosts)
|
||||
|
||||
break_play = False
|
||||
# we are actually running plays
|
||||
batches = self._get_serialized_batches(play)
|
||||
if len(batches) == 0:
|
||||
self._tqm.send_callback('v2_playbook_on_play_start', play)
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
|
||||
for batch in batches:
|
||||
# restrict the inventory to the hosts in the serialized batch
|
||||
self._inventory.restrict_to_hosts(batch)
|
||||
# and run it...
|
||||
result = self._tqm.run(play=play)
|
||||
|
||||
# break the play if the result equals the special return code
|
||||
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
|
||||
result = self._tqm.RUN_FAILED_HOSTS
|
||||
break_play = True
|
||||
|
||||
# check the number of failures here, to see if they're above the maximum
|
||||
# failure percentage allowed, or if any errors are fatal. If either of those
|
||||
# conditions are met, we break out, otherwise we only break out if the entire
|
||||
# batch failed
|
||||
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
|
||||
(previously_failed + previously_unreachable)
|
||||
|
||||
if len(batch) == failed_hosts_count:
|
||||
break_play = True
|
||||
break
|
||||
|
||||
# update the previous counts so they don't accumulate incorrectly
|
||||
# over multiple serial batches
|
||||
previously_failed += len(self._tqm._failed_hosts) - previously_failed
|
||||
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
|
||||
|
||||
# save the unreachable hosts from this batch
|
||||
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
|
||||
|
||||
if break_play:
|
||||
break
|
||||
|
||||
i = i + 1 # per play
|
||||
|
||||
if entry:
|
||||
entrylist.append(entry) # per playbook
|
||||
|
||||
# send the stats callback for this playbook
|
||||
if self._tqm is not None:
|
||||
if C.RETRY_FILES_ENABLED:
|
||||
retries = set(self._tqm._failed_hosts.keys())
|
||||
retries.update(self._tqm._unreachable_hosts.keys())
|
||||
retries = sorted(retries)
|
||||
if len(retries) > 0:
|
||||
if C.RETRY_FILES_SAVE_PATH:
|
||||
basedir = C.RETRY_FILES_SAVE_PATH
|
||||
elif playbook_path:
|
||||
basedir = os.path.dirname(os.path.abspath(playbook_path))
|
||||
else:
|
||||
basedir = '~/'
|
||||
|
||||
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
|
||||
filename = os.path.join(basedir, "%s.retry" % retry_name)
|
||||
if self._generate_retry_inventory(filename, retries):
|
||||
display.display("\tto retry, use: --limit @%s\n" % filename)
|
||||
|
||||
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
|
||||
|
||||
# if the last result wasn't zero, break out of the playbook file name loop
|
||||
if result != 0:
|
||||
break
|
||||
|
||||
if entrylist:
|
||||
return entrylist
|
||||
|
||||
finally:
|
||||
if self._tqm is not None:
|
||||
self._tqm.cleanup()
|
||||
if self._loader:
|
||||
self._loader.cleanup_all_tmp_files()
|
||||
|
||||
if self._options.syntax:
|
||||
display.display("No issues encountered")
|
||||
return result
|
||||
|
||||
return result
|
||||
|
||||
def _get_serialized_batches(self, play):
|
||||
'''
|
||||
Returns a list of hosts, subdivided into batches based on
|
||||
the serial size specified in the play.
|
||||
'''
|
||||
|
||||
# make sure we have a unique list of hosts
|
||||
all_hosts = self._inventory.get_hosts(play.hosts)
|
||||
all_hosts_len = len(all_hosts)
|
||||
|
||||
# the serial value can be listed as a scalar or a list of
|
||||
# scalars, so we make sure it's a list here
|
||||
serial_batch_list = play.serial
|
||||
if len(serial_batch_list) == 0:
|
||||
serial_batch_list = [-1]
|
||||
|
||||
cur_item = 0
|
||||
serialized_batches = []
|
||||
|
||||
while len(all_hosts) > 0:
|
||||
# get the serial value from current item in the list
|
||||
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
|
||||
|
||||
# if the serial count was not specified or is invalid, default to
|
||||
# a list of all hosts, otherwise grab a chunk of the hosts equal
|
||||
# to the current serial item size
|
||||
if serial <= 0:
|
||||
serialized_batches.append(all_hosts)
|
||||
break
|
||||
else:
|
||||
play_hosts = []
|
||||
for x in range(serial):
|
||||
if len(all_hosts) > 0:
|
||||
play_hosts.append(all_hosts.pop(0))
|
||||
|
||||
serialized_batches.append(play_hosts)
|
||||
|
||||
# increment the current batch list item number, and if we've hit
|
||||
# the end keep using the last element until we've consumed all of
|
||||
# the hosts in the inventory
|
||||
cur_item += 1
|
||||
if cur_item > len(serial_batch_list) - 1:
|
||||
cur_item = len(serial_batch_list) - 1
|
||||
|
||||
return serialized_batches
|
||||
|
||||
def _generate_retry_inventory(self, retry_path, replay_hosts):
|
||||
'''
|
||||
Called when a playbook run fails. It generates an inventory which allows
|
||||
re-running on ONLY the failed hosts. This may duplicate some variable
|
||||
information in group_vars/host_vars but that is ok, and expected.
|
||||
'''
|
||||
try:
|
||||
makedirs_safe(os.path.dirname(retry_path))
|
||||
with open(retry_path, 'w') as fd:
|
||||
for x in replay_hosts:
|
||||
fd.write("%s\n" % x)
|
||||
except Exception as e:
|
||||
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_native(e)))
|
||||
return False
|
||||
|
||||
return True
|
||||
Binary file not shown.
@@ -0,0 +1,20 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
Binary file not shown.
@@ -0,0 +1,171 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from jinja2.exceptions import TemplateNotFound
|
||||
|
||||
HAS_PYCRYPTO_ATFORK = False
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
HAS_PYCRYPTO_ATFORK = True
|
||||
except:
|
||||
# We only need to call atfork if pycrypto is used because it will need to
|
||||
# reinitialize its RNG. Since old paramiko could be using pycrypto, we
|
||||
# need to take charge of calling it.
|
||||
pass
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.executor.task_executor import TaskExecutor
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
__all__ = ['WorkerProcess']
|
||||
|
||||
|
||||
class WorkerProcess(multiprocessing.Process):
|
||||
'''
|
||||
The worker thread class, which uses TaskExecutor to run tasks
|
||||
read from a job queue and pushes results into a results queue
|
||||
for reading later.
|
||||
'''
|
||||
|
||||
def __init__(self, final_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
|
||||
|
||||
super(WorkerProcess, self).__init__()
|
||||
# takes a task queue manager as the sole param:
|
||||
self._final_q = final_q
|
||||
self._task_vars = task_vars
|
||||
self._host = host
|
||||
self._task = task
|
||||
self._play_context = play_context
|
||||
self._loader = loader
|
||||
self._variable_manager = variable_manager
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
|
||||
if sys.stdin.isatty():
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
if fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
except (AttributeError, ValueError):
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
else:
|
||||
# set to /dev/null
|
||||
self._new_stdin = os.devnull
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Called when the process is started. Pushes the result onto the
|
||||
results queue. We also remove the host from the blocked hosts list, to
|
||||
signify that they are ready for their next task.
|
||||
'''
|
||||
|
||||
# import cProfile, pstats, StringIO
|
||||
# pr = cProfile.Profile()
|
||||
# pr.enable()
|
||||
|
||||
if HAS_PYCRYPTO_ATFORK:
|
||||
atfork()
|
||||
|
||||
try:
|
||||
# execute the task and build a TaskResult from the result
|
||||
display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
|
||||
executor_result = TaskExecutor(
|
||||
self._host,
|
||||
self._task,
|
||||
self._task_vars,
|
||||
self._play_context,
|
||||
self._new_stdin,
|
||||
self._loader,
|
||||
self._shared_loader_obj,
|
||||
self._final_q
|
||||
).run()
|
||||
|
||||
display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
|
||||
self._host.vars = dict()
|
||||
self._host.groups = []
|
||||
task_result = TaskResult(
|
||||
self._host.name,
|
||||
self._task._uuid,
|
||||
executor_result,
|
||||
task_fields=self._task.dump_attrs(),
|
||||
)
|
||||
|
||||
# put the result on the result queue
|
||||
display.debug("sending task result for task %s" % self._task._uuid)
|
||||
self._final_q.put(task_result)
|
||||
display.debug("done sending task result for task %s" % self._task._uuid)
|
||||
|
||||
except AnsibleConnectionFailure:
|
||||
self._host.vars = dict()
|
||||
self._host.groups = []
|
||||
task_result = TaskResult(
|
||||
self._host.name,
|
||||
self._task._uuid,
|
||||
dict(unreachable=True),
|
||||
task_fields=self._task.dump_attrs(),
|
||||
)
|
||||
self._final_q.put(task_result, block=False)
|
||||
|
||||
except Exception as e:
|
||||
if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
|
||||
try:
|
||||
self._host.vars = dict()
|
||||
self._host.groups = []
|
||||
task_result = TaskResult(
|
||||
self._host.name,
|
||||
self._task._uuid,
|
||||
dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
|
||||
task_fields=self._task.dump_attrs(),
|
||||
)
|
||||
self._final_q.put(task_result, block=False)
|
||||
except:
|
||||
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
|
||||
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
|
||||
|
||||
display.debug("WORKER PROCESS EXITING")
|
||||
|
||||
# pr.disable()
|
||||
# s = StringIO.StringIO()
|
||||
# sortby = 'time'
|
||||
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
|
||||
# ps.print_stats()
|
||||
# with open('worker_%06d.stats' % os.getpid(), 'w') as f:
|
||||
# f.write(s.getvalue())
|
||||
Binary file not shown.
96
.ve/lib/python2.7/site-packages/ansible/executor/stats.py
Normal file
96
.ve/lib/python2.7/site-packages/ansible/executor/stats.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from collections import MutableMapping
|
||||
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
|
||||
class AggregateStats:
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.processed = {}
|
||||
self.failures = {}
|
||||
self.ok = {}
|
||||
self.dark = {}
|
||||
self.changed = {}
|
||||
self.skipped = {}
|
||||
|
||||
# user defined stats, which can be per host or global
|
||||
self.custom = {}
|
||||
|
||||
def increment(self, what, host):
|
||||
''' helper function to bump a statistic '''
|
||||
|
||||
self.processed[host] = 1
|
||||
prev = (getattr(self, what)).get(host, 0)
|
||||
getattr(self, what)[host] = prev + 1
|
||||
|
||||
def decrement(self, what, host):
|
||||
_what = getattr(self, what)
|
||||
try:
|
||||
if _what[host] - 1 < 0:
|
||||
# This should never happen, but let's be safe
|
||||
raise KeyError("Don't be so negative")
|
||||
_what[host] -= 1
|
||||
except KeyError:
|
||||
_what[host] = 0
|
||||
|
||||
def summarize(self, host):
|
||||
''' return information about a particular host '''
|
||||
|
||||
return dict(
|
||||
ok=self.ok.get(host, 0),
|
||||
failures=self.failures.get(host, 0),
|
||||
unreachable=self.dark.get(host, 0),
|
||||
changed=self.changed.get(host, 0),
|
||||
skipped=self.skipped.get(host, 0)
|
||||
)
|
||||
|
||||
def set_custom_stats(self, which, what, host=None):
|
||||
''' allow setting of a custom stat'''
|
||||
|
||||
if host is None:
|
||||
host = '_run'
|
||||
if host not in self.custom:
|
||||
self.custom[host] = {which: what}
|
||||
else:
|
||||
self.custom[host][which] = what
|
||||
|
||||
def update_custom_stats(self, which, what, host=None):
|
||||
''' allow aggregation of a custom stat'''
|
||||
|
||||
if host is None:
|
||||
host = '_run'
|
||||
if host not in self.custom or which not in self.custom[host]:
|
||||
return self.set_custom_stats(which, what, host)
|
||||
|
||||
# mismatching types
|
||||
if not isinstance(what, type(self.custom[host][which])):
|
||||
return None
|
||||
|
||||
if isinstance(what, MutableMapping):
|
||||
self.custom[host][which] = merge_hash(self.custom[host][which], what)
|
||||
else:
|
||||
# let overloaded + take care of other types
|
||||
self.custom[host][which] += what
|
||||
BIN
.ve/lib/python2.7/site-packages/ansible/executor/stats.pyc
Normal file
BIN
.ve/lib/python2.7/site-packages/ansible/executor/stats.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,989 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import pty
|
||||
import time
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import termios
|
||||
import traceback
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible.module_utils.six import iteritems, string_types, binary_type
|
||||
from ansible.module_utils._text import to_text, to_native
|
||||
from ansible.module_utils.connection import write_to_file_descriptor
|
||||
from ansible.playbook.conditional import Conditional
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.listify import listify_lookup_plugin_terms
|
||||
from ansible.utils.unsafe_proxy import UnsafeProxy, wrap_var
|
||||
from ansible.vars.clean import namespace_facts, clean_facts
|
||||
from ansible.utils.vars import combine_vars
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
|
||||
__all__ = ['TaskExecutor']
|
||||
|
||||
|
||||
def remove_omit(task_args, omit_token):
|
||||
'''
|
||||
Remove args with a value equal to the ``omit_token`` recursively
|
||||
to align with now having suboptions in the argument_spec
|
||||
'''
|
||||
new_args = {}
|
||||
|
||||
for i in iteritems(task_args):
|
||||
if i[1] == omit_token:
|
||||
continue
|
||||
elif isinstance(i[1], dict):
|
||||
new_args[i[0]] = remove_omit(i[1], omit_token)
|
||||
else:
|
||||
new_args[i[0]] = i[1]
|
||||
|
||||
return new_args
|
||||
|
||||
|
||||
class TaskExecutor:
|
||||
|
||||
'''
|
||||
This is the main worker class for the executor pipeline, which
|
||||
handles loading an action plugin to actually dispatch the task to
|
||||
a given host. This class roughly corresponds to the old Runner()
|
||||
class.
|
||||
'''
|
||||
|
||||
# Modules that we optimize by squashing loop items into a single call to
|
||||
# the module
|
||||
SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
|
||||
|
||||
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q):
|
||||
self._host = host
|
||||
self._task = task
|
||||
self._job_vars = job_vars
|
||||
self._play_context = play_context
|
||||
self._new_stdin = new_stdin
|
||||
self._loader = loader
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
self._connection = None
|
||||
self._final_q = final_q
|
||||
self._loop_eval_error = None
|
||||
|
||||
self._task.squash()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
The main executor entrypoint, where we determine if the specified
|
||||
task requires looping and either runs the task with self._run_loop()
|
||||
or self._execute(). After that, the returned results are parsed and
|
||||
returned as a dict.
|
||||
'''
|
||||
|
||||
display.debug("in run() - task %s" % self._task._uuid)
|
||||
|
||||
try:
|
||||
try:
|
||||
items = self._get_loop_items()
|
||||
except AnsibleUndefinedVariable as e:
|
||||
# save the error raised here for use later
|
||||
items = None
|
||||
self._loop_eval_error = e
|
||||
|
||||
if items is not None:
|
||||
if len(items) > 0:
|
||||
item_results = self._run_loop(items)
|
||||
|
||||
# create the overall result item
|
||||
res = dict(results=item_results)
|
||||
|
||||
# loop through the item results, and set the global changed/failed result flags based on any item.
|
||||
for item in item_results:
|
||||
if 'changed' in item and item['changed'] and not res.get('changed'):
|
||||
res['changed'] = True
|
||||
if 'failed' in item and item['failed']:
|
||||
item_ignore = item.pop('_ansible_ignore_errors')
|
||||
if not res.get('failed'):
|
||||
res['failed'] = True
|
||||
res['msg'] = 'One or more items failed'
|
||||
self._task.ignore_errors = item_ignore
|
||||
elif self._task.ignore_errors and not item_ignore:
|
||||
self._task.ignore_errors = item_ignore
|
||||
|
||||
# ensure to accumulate these
|
||||
for array in ['warnings', 'deprecations']:
|
||||
if array in item and item[array]:
|
||||
if array not in res:
|
||||
res[array] = []
|
||||
if not isinstance(item[array], list):
|
||||
item[array] = [item[array]]
|
||||
res[array] = res[array] + item[array]
|
||||
del item[array]
|
||||
|
||||
if not res.get('Failed', False):
|
||||
res['msg'] = 'All items completed'
|
||||
else:
|
||||
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
|
||||
else:
|
||||
display.debug("calling self._execute()")
|
||||
res = self._execute()
|
||||
display.debug("_execute() done")
|
||||
|
||||
# make sure changed is set in the result, if it's not present
|
||||
if 'changed' not in res:
|
||||
res['changed'] = False
|
||||
|
||||
def _clean_res(res, errors='surrogate_or_strict'):
|
||||
if isinstance(res, UnsafeProxy):
|
||||
return res._obj
|
||||
elif isinstance(res, binary_type):
|
||||
return to_text(res, errors=errors)
|
||||
elif isinstance(res, dict):
|
||||
for k in res:
|
||||
try:
|
||||
res[k] = _clean_res(res[k], errors=errors)
|
||||
except UnicodeError:
|
||||
if k == 'diff':
|
||||
# If this is a diff, substitute a replacement character if the value
|
||||
# is undecodable as utf8. (Fix #21804)
|
||||
display.warning("We were unable to decode all characters in the module return data."
|
||||
" Replaced some in an effort to return as much as possible")
|
||||
res[k] = _clean_res(res[k], errors='surrogate_then_replace')
|
||||
else:
|
||||
raise
|
||||
elif isinstance(res, list):
|
||||
for idx, item in enumerate(res):
|
||||
res[idx] = _clean_res(item, errors=errors)
|
||||
return res
|
||||
|
||||
display.debug("dumping result to json")
|
||||
res = _clean_res(res)
|
||||
display.debug("done dumping result, returning")
|
||||
return res
|
||||
except AnsibleError as e:
|
||||
return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')), _ansible_no_log=self._play_context.no_log)
|
||||
except Exception as e:
|
||||
return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_text(traceback.format_exc()),
|
||||
stdout='', _ansible_no_log=self._play_context.no_log)
|
||||
finally:
|
||||
try:
|
||||
self._connection.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
except Exception as e:
|
||||
display.debug(u"error closing connection: %s" % to_text(e))
|
||||
|
||||
def _get_loop_items(self):
|
||||
'''
|
||||
Loads a lookup plugin to handle the with_* portion of a task (if specified),
|
||||
and returns the items result.
|
||||
'''
|
||||
|
||||
# save the play context variables to a temporary dictionary,
|
||||
# so that we can modify the job vars without doing a full copy
|
||||
# and later restore them to avoid modifying things too early
|
||||
play_context_vars = dict()
|
||||
self._play_context.update_vars(play_context_vars)
|
||||
|
||||
old_vars = dict()
|
||||
for k in play_context_vars:
|
||||
if k in self._job_vars:
|
||||
old_vars[k] = self._job_vars[k]
|
||||
self._job_vars[k] = play_context_vars[k]
|
||||
|
||||
# get search path for this task to pass to lookup plugins
|
||||
self._job_vars['ansible_search_path'] = self._task.get_search_path()
|
||||
|
||||
# ensure basedir is always in (dwim already searches here but we need to display it)
|
||||
if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
|
||||
self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
|
||||
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
|
||||
items = None
|
||||
loop_cache = self._job_vars.get('_ansible_loop_cache')
|
||||
if loop_cache is not None:
|
||||
# _ansible_loop_cache may be set in `get_vars` when calculating `delegate_to`
|
||||
# to avoid reprocessing the loop
|
||||
items = loop_cache
|
||||
elif self._task.loop_with:
|
||||
if self._task.loop_with in self._shared_loader_obj.lookup_loader:
|
||||
fail = True
|
||||
if self._task.loop_with == 'first_found':
|
||||
# first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing.
|
||||
fail = False
|
||||
|
||||
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, loader=self._loader, fail_on_undefined=fail,
|
||||
convert_bare=False)
|
||||
if not fail:
|
||||
loop_terms = [t for t in loop_terms if not templar._contains_vars(t)]
|
||||
|
||||
# get lookup
|
||||
mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar)
|
||||
|
||||
# give lookup task 'context' for subdir (mostly needed for first_found)
|
||||
for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
|
||||
if subdir in self._task.action:
|
||||
break
|
||||
setattr(mylookup, '_subdir', subdir + 's')
|
||||
|
||||
# run lookup
|
||||
items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True)
|
||||
else:
|
||||
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with)
|
||||
|
||||
elif self._task.loop:
|
||||
items = templar.template(self._task.loop)
|
||||
if not isinstance(items, list):
|
||||
raise AnsibleError(
|
||||
"Invalid data passed to 'loop', it requires a list, got this instead: %s."
|
||||
" Hint: If you passed a list/dict of just one element,"
|
||||
" try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items
|
||||
)
|
||||
|
||||
# now we restore any old job variables that may have been modified,
|
||||
# and delete them if they were in the play context vars but not in
|
||||
# the old variables dictionary
|
||||
for k in play_context_vars:
|
||||
if k in old_vars:
|
||||
self._job_vars[k] = old_vars[k]
|
||||
else:
|
||||
del self._job_vars[k]
|
||||
|
||||
if items:
|
||||
for idx, item in enumerate(items):
|
||||
if item is not None and not isinstance(item, UnsafeProxy):
|
||||
items[idx] = UnsafeProxy(item)
|
||||
|
||||
return items
|
||||
|
||||
def _run_loop(self, items):
|
||||
'''
|
||||
Runs the task with the loop items specified and collates the result
|
||||
into an array named 'results' which is inserted into the final result
|
||||
along with the item for which the loop ran.
|
||||
'''
|
||||
|
||||
results = []
|
||||
|
||||
# make copies of the job vars and task so we can add the item to
|
||||
# the variables and re-validate the task with the item variable
|
||||
# task_vars = self._job_vars.copy()
|
||||
task_vars = self._job_vars
|
||||
|
||||
loop_var = 'item'
|
||||
index_var = None
|
||||
label = None
|
||||
loop_pause = 0
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
|
||||
|
||||
# FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate)
|
||||
if self._task.loop_control:
|
||||
loop_var = templar.template(self._task.loop_control.loop_var)
|
||||
index_var = templar.template(self._task.loop_control.index_var)
|
||||
loop_pause = templar.template(self._task.loop_control.pause)
|
||||
|
||||
# This may be 'None',so it is tempalted below after we ensure a value and an item is assigned
|
||||
label = self._task.loop_control.label
|
||||
|
||||
# ensure we always have a label
|
||||
if label is None:
|
||||
label = '{{' + loop_var + '}}'
|
||||
|
||||
if loop_var in task_vars:
|
||||
display.warning(u"The loop variable '%s' is already in use. "
|
||||
u"You should set the `loop_var` value in the `loop_control` option for the task"
|
||||
u" to something else to avoid variable collisions and unexpected behavior." % loop_var)
|
||||
|
||||
ran_once = False
|
||||
if self._task.loop_with:
|
||||
# Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are
|
||||
items = self._squash_items(items, loop_var, task_vars)
|
||||
|
||||
no_log = False
|
||||
for item_index, item in enumerate(items):
|
||||
task_vars[loop_var] = item
|
||||
if index_var:
|
||||
task_vars[index_var] = item_index
|
||||
|
||||
# Update template vars to reflect current loop iteration
|
||||
templar.set_available_variables(task_vars)
|
||||
|
||||
# pause between loop iterations
|
||||
if loop_pause and ran_once:
|
||||
try:
|
||||
time.sleep(float(loop_pause))
|
||||
except ValueError as e:
|
||||
raise AnsibleError('Invalid pause value: %s, produced error: %s' % (loop_pause, to_native(e)))
|
||||
else:
|
||||
ran_once = True
|
||||
|
||||
try:
|
||||
tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
|
||||
tmp_task._parent = self._task._parent
|
||||
tmp_play_context = self._play_context.copy()
|
||||
except AnsibleParserError as e:
|
||||
results.append(dict(failed=True, msg=to_text(e)))
|
||||
continue
|
||||
|
||||
# now we swap the internal task and play context with their copies,
|
||||
# execute, and swap them back so we can do the next iteration cleanly
|
||||
(self._task, tmp_task) = (tmp_task, self._task)
|
||||
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
|
||||
res = self._execute(variables=task_vars)
|
||||
task_fields = self._task.dump_attrs()
|
||||
(self._task, tmp_task) = (tmp_task, self._task)
|
||||
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
|
||||
|
||||
# update 'general no_log' based on specific no_log
|
||||
no_log = no_log or tmp_task.no_log
|
||||
|
||||
# now update the result with the item info, and append the result
|
||||
# to the list of results
|
||||
res[loop_var] = item
|
||||
if index_var:
|
||||
res[index_var] = item_index
|
||||
res['_ansible_item_result'] = True
|
||||
res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')
|
||||
|
||||
# gets templated here unlike rest of loop_control fields, depends on loop_var above
|
||||
res['_ansible_item_label'] = templar.template(label, cache=False)
|
||||
|
||||
self._final_q.put(
|
||||
TaskResult(
|
||||
self._host.name,
|
||||
self._task._uuid,
|
||||
res,
|
||||
task_fields=task_fields,
|
||||
),
|
||||
block=False,
|
||||
)
|
||||
results.append(res)
|
||||
del task_vars[loop_var]
|
||||
|
||||
self._task.no_log = no_log
|
||||
|
||||
return results
|
||||
|
||||
def _squash_items(self, items, loop_var, variables):
|
||||
'''
|
||||
Squash items down to a comma-separated list for certain modules which support it
|
||||
(typically package management modules).
|
||||
'''
|
||||
name = None
|
||||
try:
|
||||
# _task.action could contain templatable strings (via action: and
|
||||
# local_action:) Template it before comparing. If we don't end up
|
||||
# optimizing it here, the templatable string might use template vars
|
||||
# that aren't available until later (it could even use vars from the
|
||||
# with_items loop) so don't make the templated string permanent yet.
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
|
||||
task_action = self._task.action
|
||||
if templar._contains_vars(task_action):
|
||||
task_action = templar.template(task_action, fail_on_undefined=False)
|
||||
|
||||
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
|
||||
if all(isinstance(o, string_types) for o in items):
|
||||
final_items = []
|
||||
|
||||
found = None
|
||||
for allowed in ['name', 'pkg', 'package']:
|
||||
name = self._task.args.pop(allowed, None)
|
||||
if name is not None:
|
||||
found = allowed
|
||||
break
|
||||
|
||||
# This gets the information to check whether the name field
|
||||
# contains a template that we can squash for
|
||||
template_no_item = template_with_item = None
|
||||
if name:
|
||||
if templar._contains_vars(name):
|
||||
variables[loop_var] = '\0$'
|
||||
template_no_item = templar.template(name, variables, cache=False)
|
||||
variables[loop_var] = '\0@'
|
||||
template_with_item = templar.template(name, variables, cache=False)
|
||||
del variables[loop_var]
|
||||
|
||||
# Check if the user is doing some operation that doesn't take
|
||||
# name/pkg or the name/pkg field doesn't have any variables
|
||||
# and thus the items can't be squashed
|
||||
if template_no_item != template_with_item:
|
||||
display.deprecated(
|
||||
'Invoking "%s" only once while using a loop via squash_actions is deprecated. '
|
||||
'Instead of using a loop to supply multiple items and specifying `%s: %s`, '
|
||||
'please use `%s: %r` and remove the loop' % (self._task.action, found, name, found, self._task.loop),
|
||||
version='2.11'
|
||||
)
|
||||
for item in items:
|
||||
variables[loop_var] = item
|
||||
if self._task.evaluate_conditional(templar, variables):
|
||||
new_item = templar.template(name, cache=False)
|
||||
final_items.append(new_item)
|
||||
self._task.args['name'] = final_items
|
||||
# Wrap this in a list so that the calling function loop
|
||||
# executes exactly once
|
||||
return [final_items]
|
||||
else:
|
||||
# Restore the name parameter
|
||||
self._task.args['name'] = name
|
||||
# elif:
|
||||
# Right now we only optimize single entries. In the future we
|
||||
# could optimize more types:
|
||||
# * lists can be squashed together
|
||||
# * dicts could squash entries that match in all cases except the
|
||||
# name or pkg field.
|
||||
except Exception:
|
||||
# Squashing is an optimization. If it fails for any reason,
|
||||
# simply use the unoptimized list of items.
|
||||
|
||||
# Restore the name parameter
|
||||
if name is not None:
|
||||
self._task.args['name'] = name
|
||||
return items
|
||||
|
||||
def _execute(self, variables=None):
|
||||
'''
|
||||
The primary workhorse of the executor system, this runs the task
|
||||
on the specified host (which may be the delegated_to host) and handles
|
||||
the retry/until and block rescue/always execution
|
||||
'''
|
||||
|
||||
if variables is None:
|
||||
variables = self._job_vars
|
||||
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
|
||||
|
||||
context_validation_error = None
|
||||
try:
|
||||
# apply the given task's information to the connection info,
|
||||
# which may override some fields already set by the play or
|
||||
# the options specified on the command line
|
||||
self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
|
||||
|
||||
# fields set from the play/task may be based on variables, so we have to
|
||||
# do the same kind of post validation step on it here before we use it.
|
||||
self._play_context.post_validate(templar=templar)
|
||||
|
||||
# now that the play context is finalized, if the remote_addr is not set
|
||||
# default to using the host's address field as the remote address
|
||||
if not self._play_context.remote_addr:
|
||||
self._play_context.remote_addr = self._host.address
|
||||
|
||||
# We also add "magic" variables back into the variables dict to make sure
|
||||
# a certain subset of variables exist.
|
||||
self._play_context.update_vars(variables)
|
||||
|
||||
# FIXME: update connection/shell plugin options
|
||||
except AnsibleError as e:
|
||||
# save the error, which we'll raise later if we don't end up
|
||||
# skipping this task during the conditional evaluation step
|
||||
context_validation_error = e
|
||||
|
||||
# Evaluate the conditional (if any) for this task, which we do before running
|
||||
# the final task post-validation. We do this before the post validation due to
|
||||
# the fact that the conditional may specify that the task be skipped due to a
|
||||
# variable not being present which would otherwise cause validation to fail
|
||||
try:
|
||||
if not self._task.evaluate_conditional(templar, variables):
|
||||
display.debug("when evaluation is False, skipping this task")
|
||||
return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
|
||||
except AnsibleError:
|
||||
# loop error takes precedence
|
||||
if self._loop_eval_error is not None:
|
||||
raise self._loop_eval_error # pylint: disable=raising-bad-type
|
||||
raise
|
||||
|
||||
# Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
|
||||
if self._loop_eval_error is not None:
|
||||
raise self._loop_eval_error # pylint: disable=raising-bad-type
|
||||
|
||||
# if we ran into an error while setting up the PlayContext, raise it now
|
||||
if context_validation_error is not None:
|
||||
raise context_validation_error # pylint: disable=raising-bad-type
|
||||
|
||||
# if this task is a TaskInclude, we just return now with a success code so the
|
||||
# main thread can expand the task list for the given host
|
||||
if self._task.action in ('include', 'include_tasks'):
|
||||
include_variables = self._task.args.copy()
|
||||
include_file = include_variables.pop('_raw_params', None)
|
||||
if not include_file:
|
||||
return dict(failed=True, msg="No include file was specified to the include")
|
||||
|
||||
include_file = templar.template(include_file)
|
||||
return dict(include=include_file, include_variables=include_variables)
|
||||
|
||||
# if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
|
||||
elif self._task.action == 'include_role':
|
||||
include_variables = self._task.args.copy()
|
||||
return dict(include_variables=include_variables)
|
||||
|
||||
# Now we do final validation on the task, which sets all fields to their final values.
|
||||
self._task.post_validate(templar=templar)
|
||||
if '_variable_params' in self._task.args:
|
||||
variable_params = self._task.args.pop('_variable_params')
|
||||
if isinstance(variable_params, dict):
|
||||
if C.INJECT_FACTS_AS_VARS:
|
||||
display.warning("Using a variable for a task's 'args' is unsafe in some situations "
|
||||
"(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
|
||||
variable_params.update(self._task.args)
|
||||
self._task.args = variable_params
|
||||
|
||||
# get the connection and the handler for this execution
|
||||
if (not self._connection or
|
||||
not getattr(self._connection, 'connected', False) or
|
||||
self._play_context.remote_addr != self._connection._play_context.remote_addr):
|
||||
self._connection = self._get_connection(variables=variables, templar=templar)
|
||||
else:
|
||||
# if connection is reused, its _play_context is no longer valid and needs
|
||||
# to be replaced with the one templated above, in case other data changed
|
||||
self._connection._play_context = self._play_context
|
||||
|
||||
self._set_connection_options(variables, templar)
|
||||
self._set_shell_options(variables, templar)
|
||||
|
||||
# get handler
|
||||
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
|
||||
|
||||
# Apply default params for action/module, if present
|
||||
# These are collected as a list of dicts, so we need to merge them
|
||||
module_defaults = {}
|
||||
for default in self._task.module_defaults:
|
||||
module_defaults.update(default)
|
||||
if module_defaults:
|
||||
module_defaults = templar.template(module_defaults)
|
||||
if self._task.action in module_defaults:
|
||||
tmp_args = module_defaults[self._task.action].copy()
|
||||
tmp_args.update(self._task.args)
|
||||
self._task.args = tmp_args
|
||||
if self._task.action in C.config.module_defaults_groups:
|
||||
for group in C.config.module_defaults_groups.get(self._task.action, []):
|
||||
tmp_args = (module_defaults.get('group/{0}'.format(group)) or {}).copy()
|
||||
tmp_args.update(self._task.args)
|
||||
self._task.args = tmp_args
|
||||
|
||||
# And filter out any fields which were set to default(omit), and got the omit token value
|
||||
omit_token = variables.get('omit')
|
||||
if omit_token is not None:
|
||||
self._task.args = remove_omit(self._task.args, omit_token)
|
||||
|
||||
# Read some values from the task, so that we can modify them if need be
|
||||
if self._task.until:
|
||||
retries = self._task.retries
|
||||
if retries is None:
|
||||
retries = 3
|
||||
elif retries <= 0:
|
||||
retries = 1
|
||||
else:
|
||||
retries += 1
|
||||
else:
|
||||
retries = 1
|
||||
|
||||
delay = self._task.delay
|
||||
if delay < 0:
|
||||
delay = 1
|
||||
|
||||
# make a copy of the job vars here, in case we need to update them
|
||||
# with the registered variable value later on when testing conditions
|
||||
vars_copy = variables.copy()
|
||||
|
||||
display.debug("starting attempt loop")
|
||||
result = None
|
||||
for attempt in range(1, retries + 1):
|
||||
display.debug("running the handler")
|
||||
try:
|
||||
result = self._handler.run(task_vars=variables)
|
||||
except AnsibleActionSkip as e:
|
||||
return dict(skipped=True, msg=to_text(e))
|
||||
except AnsibleActionFail as e:
|
||||
return dict(failed=True, msg=to_text(e))
|
||||
except AnsibleConnectionFailure as e:
|
||||
return dict(unreachable=True, msg=to_text(e))
|
||||
display.debug("handler run complete")
|
||||
|
||||
# preserve no log
|
||||
result["_ansible_no_log"] = self._play_context.no_log
|
||||
|
||||
# update the local copy of vars with the registered value, if specified,
|
||||
# or any facts which may have been generated by the module execution
|
||||
if self._task.register:
|
||||
vars_copy[self._task.register] = wrap_var(result)
|
||||
|
||||
if self._task.async_val > 0:
|
||||
if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
|
||||
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
|
||||
# FIXME callback 'v2_runner_on_async_poll' here
|
||||
|
||||
# ensure no log is preserved
|
||||
result["_ansible_no_log"] = self._play_context.no_log
|
||||
|
||||
# helper methods for use below in evaluating changed/failed_when
|
||||
def _evaluate_changed_when_result(result):
|
||||
if self._task.changed_when is not None and self._task.changed_when:
|
||||
cond = Conditional(loader=self._loader)
|
||||
cond.when = self._task.changed_when
|
||||
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
|
||||
|
||||
def _evaluate_failed_when_result(result):
|
||||
if self._task.failed_when:
|
||||
cond = Conditional(loader=self._loader)
|
||||
cond.when = self._task.failed_when
|
||||
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
|
||||
result['failed_when_result'] = result['failed'] = failed_when_result
|
||||
else:
|
||||
failed_when_result = False
|
||||
return failed_when_result
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
if self._task.action in ('set_fact', 'include_vars'):
|
||||
vars_copy.update(result['ansible_facts'])
|
||||
else:
|
||||
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
|
||||
vars_copy.update(namespace_facts(result['ansible_facts']))
|
||||
if C.INJECT_FACTS_AS_VARS:
|
||||
vars_copy.update(clean_facts(result['ansible_facts']))
|
||||
|
||||
# set the failed property if it was missing.
|
||||
if 'failed' not in result:
|
||||
# rc is here for backwards compatibility and modules that use it instead of 'failed'
|
||||
if 'rc' in result and result['rc'] not in [0, "0"]:
|
||||
result['failed'] = True
|
||||
else:
|
||||
result['failed'] = False
|
||||
|
||||
# Make attempts and retries available early to allow their use in changed/failed_when
|
||||
if self._task.until:
|
||||
result['attempts'] = attempt
|
||||
|
||||
# set the changed property if it was missing.
|
||||
if 'changed' not in result:
|
||||
result['changed'] = False
|
||||
|
||||
# re-update the local copy of vars with the registered value, if specified,
|
||||
# or any facts which may have been generated by the module execution
|
||||
# This gives changed/failed_when access to additional recently modified
|
||||
# attributes of result
|
||||
if self._task.register:
|
||||
vars_copy[self._task.register] = wrap_var(result)
|
||||
|
||||
# if we didn't skip this task, use the helpers to evaluate the changed/
|
||||
# failed_when properties
|
||||
if 'skipped' not in result:
|
||||
_evaluate_changed_when_result(result)
|
||||
_evaluate_failed_when_result(result)
|
||||
|
||||
if retries > 1:
|
||||
cond = Conditional(loader=self._loader)
|
||||
cond.when = self._task.until
|
||||
if cond.evaluate_conditional(templar, vars_copy):
|
||||
break
|
||||
else:
|
||||
# no conditional check, or it failed, so sleep for the specified time
|
||||
if attempt < retries:
|
||||
result['_ansible_retry'] = True
|
||||
result['retries'] = retries
|
||||
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
|
||||
self._final_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
if retries > 1:
|
||||
# we ran out of attempts, so mark the result as failed
|
||||
result['attempts'] = retries - 1
|
||||
result['failed'] = True
|
||||
|
||||
# do the final update of the local variables here, for both registered
|
||||
# values and any facts which may have been created
|
||||
if self._task.register:
|
||||
variables[self._task.register] = wrap_var(result)
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
if self._task.action in ('set_fact', 'include_vars'):
|
||||
variables.update(result['ansible_facts'])
|
||||
else:
|
||||
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
|
||||
variables.update(namespace_facts(result['ansible_facts']))
|
||||
if C.INJECT_FACTS_AS_VARS:
|
||||
variables.update(clean_facts(result['ansible_facts']))
|
||||
|
||||
# save the notification target in the result, if it was specified, as
|
||||
# this task may be running in a loop in which case the notification
|
||||
# may be item-specific, ie. "notify: service {{item}}"
|
||||
if self._task.notify is not None:
|
||||
result['_ansible_notify'] = self._task.notify
|
||||
|
||||
# add the delegated vars to the result, so we can reference them
|
||||
# on the results side without having to do any further templating
|
||||
# FIXME: we only want a limited set of variables here, so this is currently
|
||||
# hardcoded but should be possibly fixed if we want more or if
|
||||
# there is another source of truth we can use
|
||||
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy()
|
||||
if len(delegated_vars) > 0:
|
||||
result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
|
||||
for k in ('ansible_host', ):
|
||||
result["_ansible_delegated_vars"][k] = delegated_vars.get(k)
|
||||
|
||||
# and return
|
||||
display.debug("attempt loop complete, returning result")
|
||||
return result
|
||||
|
||||
def _poll_async_result(self, result, templar, task_vars=None):
|
||||
'''
|
||||
Polls for the specified JID to be complete
|
||||
'''
|
||||
|
||||
if task_vars is None:
|
||||
task_vars = self._job_vars
|
||||
|
||||
async_jid = result.get('ansible_job_id')
|
||||
if async_jid is None:
|
||||
return dict(failed=True, msg="No job id was returned by the async task")
|
||||
|
||||
# Create a new pseudo-task to run the async_status module, and run
|
||||
# that (with a sleep for "poll" seconds between each retry) until the
|
||||
# async time limit is exceeded.
|
||||
|
||||
async_task = Task().load(dict(action='async_status jid=%s' % async_jid, environment=self._task.environment))
|
||||
|
||||
# FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
|
||||
# Because this is an async task, the action handler is async. However,
|
||||
# we need the 'normal' action handler for the status check, so get it
|
||||
# now via the action_loader
|
||||
normal_handler = self._shared_loader_obj.action_loader.get(
|
||||
'normal',
|
||||
task=async_task,
|
||||
connection=self._connection,
|
||||
play_context=self._play_context,
|
||||
loader=self._loader,
|
||||
templar=templar,
|
||||
shared_loader_obj=self._shared_loader_obj,
|
||||
)
|
||||
|
||||
time_left = self._task.async_val
|
||||
while time_left > 0:
|
||||
time.sleep(self._task.poll)
|
||||
|
||||
try:
|
||||
async_result = normal_handler.run(task_vars=task_vars)
|
||||
# We do not bail out of the loop in cases where the failure
|
||||
# is associated with a parsing error. The async_runner can
|
||||
# have issues which result in a half-written/unparseable result
|
||||
# file on disk, which manifests to the user as a timeout happening
|
||||
# before it's time to timeout.
|
||||
if (int(async_result.get('finished', 0)) == 1 or
|
||||
('failed' in async_result and async_result.get('_ansible_parsed', False)) or
|
||||
'skipped' in async_result):
|
||||
break
|
||||
except Exception as e:
|
||||
# Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
|
||||
# On an exception, call the connection's reset method if it has one
|
||||
# (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
|
||||
display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
|
||||
display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
|
||||
try:
|
||||
normal_handler._connection.reset()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Little hack to raise the exception if we've exhausted the timeout period
|
||||
time_left -= self._task.poll
|
||||
if time_left <= 0:
|
||||
raise
|
||||
else:
|
||||
time_left -= self._task.poll
|
||||
|
||||
if int(async_result.get('finished', 0)) != 1:
|
||||
if async_result.get('_ansible_parsed'):
|
||||
return dict(failed=True, msg="async task did not complete within the requested time")
|
||||
else:
|
||||
return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
|
||||
else:
|
||||
return async_result
|
||||
|
||||
def _get_connection(self, variables, templar):
|
||||
'''
|
||||
Reads the connection property for the host, and returns the
|
||||
correct connection object from the list of connection plugins
|
||||
'''
|
||||
|
||||
if self._task.delegate_to is not None:
|
||||
# since we're delegating, we don't want to use interpreter values
|
||||
# which would have been set for the original target host
|
||||
for i in list(variables.keys()):
|
||||
if isinstance(i, string_types) and i.startswith('ansible_') and i.endswith('_interpreter'):
|
||||
del variables[i]
|
||||
# now replace the interpreter values with those that may have come
|
||||
# from the delegated-to host
|
||||
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict())
|
||||
if isinstance(delegated_vars, dict):
|
||||
for i in delegated_vars:
|
||||
if isinstance(i, string_types) and i.startswith("ansible_") and i.endswith("_interpreter"):
|
||||
variables[i] = delegated_vars[i]
|
||||
|
||||
conn_type = self._play_context.connection
|
||||
|
||||
connection = self._shared_loader_obj.connection_loader.get(
|
||||
conn_type,
|
||||
self._play_context,
|
||||
self._new_stdin,
|
||||
task_uuid=self._task._uuid,
|
||||
ansible_playbook_pid=to_text(os.getppid())
|
||||
)
|
||||
|
||||
if not connection:
|
||||
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
|
||||
|
||||
# FIXME: remove once all plugins pull all data from self._options
|
||||
self._play_context.set_options_from_plugin(connection)
|
||||
|
||||
if any(((connection.supports_persistence and C.USE_PERSISTENT_CONNECTIONS), connection.force_persistence)):
|
||||
self._play_context.timeout = connection.get_option('persistent_command_timeout')
|
||||
display.vvvv('attempting to start connection', host=self._play_context.remote_addr)
|
||||
display.vvvv('using connection plugin %s' % connection.transport, host=self._play_context.remote_addr)
|
||||
# We don't need to send the entire contents of variables to ansible-connection
|
||||
filtered_vars = dict(
|
||||
(key, value) for key, value in variables.items()
|
||||
if key.startswith('ansible') and key != 'ansible_failed_task'
|
||||
)
|
||||
socket_path = self._start_connection(filtered_vars)
|
||||
display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr)
|
||||
setattr(connection, '_socket_path', socket_path)
|
||||
|
||||
return connection
|
||||
|
||||
def _set_connection_options(self, variables, templar):
|
||||
|
||||
# Keep the pre-delegate values for these keys
|
||||
PRESERVE_ORIG = ('inventory_hostname',)
|
||||
|
||||
# create copy with delegation built in
|
||||
final_vars = combine_vars(variables, variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()))
|
||||
|
||||
# grab list of usable vars for this plugin
|
||||
option_vars = C.config.get_plugin_vars('connection', self._connection._load_name)
|
||||
|
||||
# create dict of 'templated vars'
|
||||
options = {'_extras': {}}
|
||||
for k in option_vars:
|
||||
if k in PRESERVE_ORIG:
|
||||
options[k] = templar.template(variables[k])
|
||||
elif k in final_vars:
|
||||
options[k] = templar.template(final_vars[k])
|
||||
|
||||
# add extras if plugin supports them
|
||||
if getattr(self._connection, 'allow_extras', False):
|
||||
for k in final_vars:
|
||||
if k.startswith('ansible_%s_' % self._connection._load_name) and k not in options:
|
||||
options['_extras'][k] = templar.template(final_vars[k])
|
||||
|
||||
# set options with 'templated vars' specific to this plugin
|
||||
self._connection.set_options(var_options=options)
|
||||
self._set_shell_options(final_vars, templar)
|
||||
|
||||
def _set_shell_options(self, variables, templar):
|
||||
option_vars = C.config.get_plugin_vars('shell', self._connection._shell._load_name)
|
||||
options = {}
|
||||
for k in option_vars:
|
||||
if k in variables:
|
||||
options[k] = templar.template(variables[k])
|
||||
self._connection._shell.set_options(var_options=options)
|
||||
|
||||
def _get_action_handler(self, connection, templar):
|
||||
'''
|
||||
Returns the correct action plugin to handle the requestion task action
|
||||
'''
|
||||
|
||||
module_prefix = self._task.action.split('_')[0]
|
||||
|
||||
# let action plugin override module, fallback to 'normal' action plugin otherwise
|
||||
if self._task.action in self._shared_loader_obj.action_loader:
|
||||
handler_name = self._task.action
|
||||
elif all((module_prefix in C.NETWORK_GROUP_MODULES, module_prefix in self._shared_loader_obj.action_loader)):
|
||||
handler_name = module_prefix
|
||||
else:
|
||||
handler_name = 'normal'
|
||||
|
||||
handler = self._shared_loader_obj.action_loader.get(
|
||||
handler_name,
|
||||
task=self._task,
|
||||
connection=connection,
|
||||
play_context=self._play_context,
|
||||
loader=self._loader,
|
||||
templar=templar,
|
||||
shared_loader_obj=self._shared_loader_obj,
|
||||
)
|
||||
|
||||
if not handler:
|
||||
raise AnsibleError("the handler '%s' was not found" % handler_name)
|
||||
|
||||
return handler
|
||||
|
||||
def _start_connection(self, variables):
|
||||
'''
|
||||
Starts the persistent connection
|
||||
'''
|
||||
master, slave = pty.openpty()
|
||||
|
||||
python = sys.executable
|
||||
|
||||
def find_file_in_path(filename):
|
||||
# Check $PATH first, followed by same directory as sys.argv[0]
|
||||
paths = os.environ['PATH'].split(os.pathsep) + [os.path.dirname(sys.argv[0])]
|
||||
for dirname in paths:
|
||||
fullpath = os.path.join(dirname, filename)
|
||||
if os.path.isfile(fullpath):
|
||||
return fullpath
|
||||
|
||||
raise AnsibleError("Unable to find location of '%s'" % filename)
|
||||
|
||||
p = subprocess.Popen(
|
||||
[python, find_file_in_path('ansible-connection'), to_text(os.getppid())],
|
||||
stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
os.close(slave)
|
||||
|
||||
# We need to set the pty into noncanonical mode. This ensures that we
|
||||
# can receive lines longer than 4095 characters (plus newline) without
|
||||
# truncating.
|
||||
old = termios.tcgetattr(master)
|
||||
new = termios.tcgetattr(master)
|
||||
new[3] = new[3] & ~termios.ICANON
|
||||
|
||||
try:
|
||||
termios.tcsetattr(master, termios.TCSANOW, new)
|
||||
write_to_file_descriptor(master, variables)
|
||||
write_to_file_descriptor(master, self._play_context.serialize())
|
||||
|
||||
(stdout, stderr) = p.communicate()
|
||||
finally:
|
||||
termios.tcsetattr(master, termios.TCSANOW, old)
|
||||
os.close(master)
|
||||
|
||||
if p.returncode == 0:
|
||||
result = json.loads(to_text(stdout, errors='surrogate_then_replace'))
|
||||
else:
|
||||
try:
|
||||
result = json.loads(to_text(stderr, errors='surrogate_then_replace'))
|
||||
except getattr(json.decoder, 'JSONDecodeError', ValueError):
|
||||
# JSONDecodeError only available on Python 3.5+
|
||||
result = {'error': to_text(stderr, errors='surrogate_then_replace')}
|
||||
|
||||
if 'messages' in result:
|
||||
for msg in result.get('messages'):
|
||||
display.vvvv('%s' % msg, host=self._play_context.remote_addr)
|
||||
|
||||
if 'error' in result:
|
||||
if self._play_context.verbosity > 2:
|
||||
if result.get('exception'):
|
||||
msg = "The full traceback is:\n" + result['exception']
|
||||
display.display(msg, color=C.COLOR_ERROR)
|
||||
raise AnsibleError(result['error'])
|
||||
|
||||
return result['socket_path']
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user