pax_global_header 0000666 0000000 0000000 00000000064 14154403771 0014520 g ustar 00root root 0000000 0000000 52 comment=8bda88c2bec96c04700df89f91e6ddda3f911eba
ansible-paquerette-dev/ 0000775 0000000 0000000 00000000000 14154403771 0015432 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/.gitignore 0000664 0000000 0000000 00000000167 14154403771 0017426 0 ustar 00root root 0000000 0000000 play.book.yml
hosts
__pycache__/*
host_vars/*
group_vars/*
releases/*
contract/*
paquerette_utils.conf.yml
# IDE
.idea ansible-paquerette-dev/.gitmodules 0000664 0000000 0000000 00000000001 14154403771 0017576 0 ustar 00root root 0000000 0000000
ansible-paquerette-dev/LICENCE.txt 0000664 0000000 0000000 00000104451 14154403771 0017242 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
ansible-roles-paquerette
Copyright (C) 2020 Jérôme Marchini
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
ansible-roles-paquerette Copyright (C) 2020 Jérôme Marchini
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
ansible-paquerette-dev/README.md 0000664 0000000 0000000 00000015065 14154403771 0016720 0 ustar 00root root 0000000 0000000 # ansible-roles-paquerette
*Proposition of **minimal** ansible roles, to host and maintain services on premise or in the cloud.*
Supported systems : **Ubuntu 20.04 LTS**
partially : **debian 9**
Principles :
- "less is more" and "simpler is better"
- usage of Ansible for all good reasons
- minimisation of shell usage
- 1 configuration file per host with his complete definition and the list of instances
- 1 utility to manage inventory : inventory.py
- 1 utility to apply changes on servers : play.py
- possibility to recover backup on a separate dedicated machine, the master backup server
Technical choices:
- 2 physical partitions based configuration, 1 system, 1 programs and data, mounted on **/mnt/vdb** by default
- monitoring : **monit**
- backup : **backupninja**, external backup in "master/slave" mode: an external server is connecting to the host to recover backups
- mail, alters : **postfix as SMTP relay**, possibility of bcc
- **all** services are provided using HTTPS with **letsencrypt** certificates
## 0 - Prerequisites
- an ubuntu machine with public IP. (**the server**)
- a machine with ansible to manage the servers. (**the controller**) (It is also possible to use ansible locally on the server)
- on the server : optionally, recommended, a distinct partition for data and programs, mounted on /mnt/vdb by default. If absent, set **data_partition** to *no* or *False* in the host_vars
- on the server : **an administrative account** which can sudo commands as root without providing password
- on the server: an ssh key for the master backup server (can be the same as the administrative account, but not recommended)
- **a mail box** available via SMTP for alerts.
All relevant variables are in the group_vars base_server file, in PREREQUISITES sections
## 1 - The inventory
Here is the definition of the list of servers, for each server the composition of the platform and the list of instances and all role variables.
in groups_vars :
***In bold**, groups to be defined specifically, as your needs.*
- secret : common secret variables (partner keys, smtp password...)
- - base_server : all common non secret variables
- - - **test** : all specifics variables for testings hosts
- - - **prod** : all specifics variables for production hosts
etc...
cf: [./doc/hosts.prod](./doc/hosts.prod)
## 2 - The server
Here is the definition of the base of the server, it implements :
- the base file and directory organization
- the server localization
- the backup strategy
- the monitoring strategy
cf: [./doc/host_template.yml](./doc/host_template.yml)
**rôle : [base_server](./roles/base_server/README.md)**
## 3 - The platform
Here is the definition of the components used to run the instances, it implements :
- web server (nginx, apache,...)
- database server (mariadb, postgres, mongodb...)
- languages (php, python, nodejs...)
- associated monitoring
**role : [base_platform](./roles/base_platform/README.md)**
## 4 - Instance deployment, upgrade, removal, maintenance, restoration, etc...
Here is the implemented methods for the full live of an instance :
for the deployment (install / reinstall command):
- download of the application
- database and dependencies management (letsencrypt certificate...)
- base configuration of the application
- backup and monitoring
- service start
for the upgrade (upgrade command):
- download of the new version
- service stop
- complete cold backup
- less or more automatic upgrading of the application
- service start
for the removal (uninstall command):
- stop backup and monitoring
- service stop
- program removal and database drop
- removal of associated tasks (logrotate...)
- revocation and deletion of the letsencrypt certificate
for the restoration (restore command):
- recovering of database and application files from archive, and restoration, except external data as users files for Nextcloud
**roles : \_instance**
## 5 - Reusable elements
Roles are using factorized parts and reusable (database creation, certificates etc...)
**roles : \_app\_\**
## 6 - Instance management
The inventory is managed with the utility program **inventory.py**
The application of the changes is done with the utility program **play.py** which generates the playbook and runs ansible.
for example :
./inventory.py --new-instance
./play.py myhost myinstance install
./play.py -e 'domain_name=www.peace.org' -r myhost letsencrypt_nightly_new
**utility programs : inventory.py, play.py**
## 7 - Utility roles
Getting letsencrypt certificates during the nightly shutdown: **letsencrypt_nightly_new** :
./play.py -e 'domain_name=www.peace.org' -r myhost letsencrypt_nightly_new
## 8 - Special cases
Some roles are specifics, as the Collabora instance management :
**roles :**
- **collabora\_online\_instance**
- **mumble_server**
- **wekan_instance_snap** (no longer maintained)
- **turn\_server**
- **_master_backup_server**
## Some vars
- **clear_app_user_password** : The clear password to get chrooted sftp environment to access the web_app
- **app_user_password** : The user password of the web app in what format ?
## Documentation :
cf: [The documentation of your dreams](./doc/README.md "And all your dreams come true !")
## Notes :
STABLES ROLES
- base server (_python3)
- base platform (apache, nginx, mysql/mariadb, mongodb, nodejs, postgres, php7_fpm )
- _app_log_inventory, _app_backup, _app_restore_instance, _app_logrotate, _app_monit, _create_database, _letsencrypt_certificate
- backup in master/slave mode
- nextcloud_instance (nginx, apache)
- collabora online_instance (official partner) (nginx, apache)
- dolibarr_instance (apache only) (no longer maintained)
- rocketchat_instance (no longer maintained) : unfortunately, it's now impossible to install several instances on the same host
- _web_app (chrooted sftp only user, git, static, php, python) (empty, wordpress, grav, pelican, yeswiki, adminer etc...) (apache only)
- derived from _web_app : wordpress_instance, yeswiki_instance, adminer_instance
- mattermost_instance
NEW ROLES OR IN PROGRESS (may be subject to important refactoring):
- tryton_instance
- framadate_instance (install only)
- grav_instance, pelican_instance,
- wekan snap (missing uninstall)(no longer maintained)
- turn_server
- mumble_server (install only beta)
OTHERS :
- status.py : utility providing a rapid check of the production
TODO :
- database files in system partition : move /var/lib/ postgres.... in /mnt/vdb/ ... in study
- failtoban for services (or not)
to redesign :
- wekan snap in backup role
[paquerette.eu](http://paquerette.eu "L'informatique responsable est l'affaire de tous !")
ansible-paquerette-dev/README_fr.md 0000664 0000000 0000000 00000017566 14154403771 0017417 0 ustar 00root root 0000000 0000000 # ansible-roles-paquerette
*Proposition de rôles ansible **minimaux**, pour mettre en oeuvre et maintenir des applications sur des machines auto hébergées ou dans le cloud.*
Systèmes supportés : **Ubuntu 16.04 LTS**, **Ubuntu 18.04 LTS**
partiellement : **debian 9**
Principes :
- "less is more" et "simpler is better"
- utilisation d'ansible pour toutes les bonnes raisons
- minimisation de l'usage du shell
- 1 fichier de configuration par hôte avec sa définition et la liste des instances hébergées
- 1 utilitaire pour gérer l'inventaire des serveurs et des applications : inventory.py
- 1 utilitaire pour appliquer les changements sur les serveurs et les applications : play.py
- possibilité de récupérer les sauvegardes sur une machine dédiée (master backup server)
Choix techniques:
- configuration basée sur deux partitions, 1 système, 1 pour les données et les programmes montée sur **/mnt/vdb** par défaut
- monitoring : **monit**
- backup : **backupninja**, backup externe en mode "master/slave" : un serveur externe se connecte sur la machine pour récupérer les archives mises à sa disposition
- mail, remontée de messages : **postfix configuré en relais SMTP**, possibilité de copie cachée systématique permettant l'envoi de sms par exemple, non détaillé ici
- **tous** les services web sont fournis en HTTPS et utilisent des certificats **letsencrypt**
## 0 - Prérequis
- une machine ubuntu avec une adresse ip publique. (**le serveur**)
- une machine avec ansible à partir de laquelle effectuer le déploiement. (**le contrôleur**) (il est aussi possible d'utiliser ansible localement sur le server)
- sur le serveur : en option, mais recommandé une partition de donnée sur la machine montée sur /mnt/vdb par défaut. Si absente, positionner dans les host_vars **data_partition** à *no* ou *False*
- sur le serveur : **un compte d'administration** permettant l'execution de commande via sudo et sans renseigner de mot de passe, avec sa clé ssh
- sur le serveur : une clé ssh permettant la connexion du compte récupérant les sauvegardes (peut être la même que le compte d'administration, mais non recommandable)
- **une boîte mail** accessible via SMTP pour relayer par défaut l'ensemble des messages.
L'ensemble des variables associées sont dans le fichier base_server, dans les sections PREREQUISITES
## 1 - L'inventaire
C'est la partie qui définit la liste des machines pilotées, la composition de la plateforme de support applicative pour chaque serveur et la liste des instances applicatives et toutes les variables utiles aux rôles.
fichiers dans group_vars
***En gras**, les groupes à définir localement, selon les besoins.*
- secret : les variables communes qui doivent rester secrètes (mot de passe etc...)
- - base_server : toutes les variables communes non secrètes
- - - **test** : toutes les variables propres aux machines de test
- - - **prod** : toutes les variables propres aux machines de production
etc...
cf: [./doc/hosts.prod](./doc/hosts.prod)
## 2 - Le serveur
C'est la partie qui définit les bases du serveur, elle est la couche basse. Elle met en oeuvre :
- l'organisation des répertoires et fichiers
- la localisation du serveur
- la stratégie de sauvegarde
- la stratégie de monitoring
cf: [./doc/host_template.yml](./doc/host_template.yml)
**rôle : [base_server](./roles/base_server/README.md)**
## 3 - La plateforme de support applicative
C'est la partie qui définit l'ensemble des services ou composants nécessaire au fonctionnement d'une instance applicative. Elle met en oeuvre :
- les serveurs web (nginx, apache,...)
- les serveurs de base de données (mariadb, postgres, mongodb...)
- les languages (php, python, nodejs...)
- le monitoring associé à ces composants
**role : [base_platform](./roles/base_platform/README.md)**
## 4 - Le déploiement de l'instance applicative, la mise à jour, la désinstallation complète, la restauration etc...
C'est la partie qui définit la méthode de déploiement et de mise à jour d'une instance applicative. Elle met en oeuvre,
pour le déploiement (commande install / reinstall):
- le téléchargement d'une application
- la création des bases de données et dépendances (certificat letsencrypt...)
- la configuration de base de l'application
- la mise en place des sauvegardes et du monitoring
- le démarrage du service
pour la mise à jour (commande upgrade):
- le téléchargement de la nouvelle version
- l'arrêt du service
- la sauvegarde complète de la version courante à froid
- la mise à jour du logiciel et de la base de données + ou - automatisée selon l'application
- le redémarrage du service
pour la suppression complète (commande uninstall):
- l'arrêt du monitoring
- l'arrêt du service
- la suppression de l'application et de la base de données
- la suppression des tâches de sauvegarde et associées (logrotate...)
- la révocation et la suppression du certificat letsencrypt
pour la restauration (commande restore):
- récupération de la base de données et des fichiers applicatifs, en dehors des données externes telles que les fichier utilisateurs de Nextcloud
**rôles : \_instance**
## 5 - Éléments réutilisables
Les rôles utilisent des parties factorisées dans des rôles réutilisables (création de base de données, de certificats etc...)
**rôles : \_app\_\**
## 6 - Gestion des instances
La gestion de l'inventaire des instances se fait avec le programme **inventory.py**
L'application des modifications de l'inventaire se fait avec le programme **play.py** qui génère le playbook et le lance avec ansible
par exemple :
./inventory.py --new-instance
./play.py myhost myinstance install
./play.py -e 'domain_name=www.peace.org' -r myhost letsencrypt_nightly_new
**utilitaires : inventory.py, play.py**
## 7 - Rôles utilitaires
Obtention de certificats letsencrypt pendant la coupure nocturne : **letsencrypt_nightly_new** :
./play.py -e 'domain_name=www.peace.org' -r myhost letsencrypt_nightly_new
## 8 - Cas particuliers
Ce sont des rôles qui permettent de déployer des applications telles Collabora d'une façon spécifique.
**rôles :**
- **collabora\_online\_instance**
- **mumble_server**
- **wekan_instance_snap** (plus maintenu)
- **turn\_server**
- **_master_backup_server**
## Documentation :
cf: [The documentation of your dreams](./doc/README.md "And all your dreams come true !")
## Notes :
ROLES STABLES
- base server (_python3)
- base platform (apache, nginx, mysql/mariadb, mongodb, nodejs, postgres, php7_fpm )
- _app_log_inventory, _app_backup, _app_restore_instance, _app_logrotate, _app_monit, _create_database, _letsencrypt_certificate
- backup en mode master/slave
- nextcloud_instance (nginx, apache)
- collabora online_instance (partenaire officiel) (nginx, apache)
- dolibarr_instance (apache uniquement) (plus maintenu)
- rocketchat_instance (plus maintenu) : il est devenu impossible d'installer plusieurs instances sur une même machine, rocket.chat n'est pas prévu pour ça
- _web_app (chrooted sftp only user, git, static, php, python) (empty, wordpress, grav, pelican, yeswiki, adminer etc...) (apache uniquement)
- dérivés de _web_app : wordpress_instance, yeswiki_instance, adminer_instance
- mattermost_instance
ROLES NOUVEAUX OU EN DÉVELOPPEMENT (qui peuvent subir un refactoring important):
- tryton_instance
- framadate_instance (installation uniquement)
- grav_instance, pelican_instance,
- wekan snap (manque uninstall) (plus maintenu)
- turn_server
- mumble_server (installation uniquement béta)
AUTRES
- status.py : utilitaire permettant de faire un check rapide de la production, en cours de développement
TODO :
- bases de données dans la partition système : déplacer le /var/lib/ postgres.... dans /mnt/vdb/ à l'étude
- failtoban pour les services (ou pas)
à revoir :
- wekan snap dans le rôle de backup
[paquerette.eu](http://paquerette.eu "L'informatique responsable est l'affaire de tous !")
ansible-paquerette-dev/doc/ 0000775 0000000 0000000 00000000000 14154403771 0016177 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/doc/README.md 0000664 0000000 0000000 00000001555 14154403771 0017464 0 ustar 00root root 0000000 0000000 Documentation :
For simple things see [Cookbook.md](./cookbook.md "The wonderful cookbook")
For advanced stuff see [Avanced stuffs.md](./advanced stuffs.md "The amazing solutions to your amazing problems")
Better than all, try it following [quickstart.md](./quickstart.md "The quickstart guide that will change your live") using [hosts.first](./hosts.first), [myfirsthost.yml](./myfirsthost.yml)
Base configuration file for inventory.py and play.py utilities is here : [paquerette_utils.conf.yml](./paquerette_utils.conf.yml "The best configuration file you ever read") and should be copied when easily filled with your host list into the install directory
Base server configuration file is here : [host_template.yml](./host_template.yml "The template of the century")
Sample for production server inventory is here : [hosts.prod](./hosts.prod "The ultimate revelation") ansible-paquerette-dev/doc/advanced stuffs.md 0000664 0000000 0000000 00000012676 14154403771 0021575 0 ustar 00root root 0000000 0000000 ## Advanced stuffs and things you will have to do soon or late... or never
### Advanced running facilities
#### scheduling playbook
using **at** command
at command must be installed on controller :
sudo apt install at
# ensure at is running
sudo atd
use the --at-time option (see at manual for syntax)
./play.py myhost myinstance mycommand **-a 05:00**
the script logs in the file : myhost_myinstance_mycommand.log
#### making scripts 'walking' on instances
using **--script-command**
It is possible to prepare a script to apply changes on a set of instance :
./play.py -s mycommand
prints on stdout a list of ./play calls that you can filter :
./play.py -s upgrade | grep myhost
or decorate...
./play.py -s 'maintenance -a 05:00' | grep nextcloud_instance > myscript.sh
... you can adapt the default template **script_default_template** in the configuration file to your needs.
#### using local releases to avoid download problems
It is possible to deploy releases from controller machine instead of trying to download them from external repository.
To do that, just store the archive in **releases/{{ role_name }}/default** to deploy it.
eg :
ls ./releases/wordpress_instance/
lrwxrwxrwx 1 jerome jerome 13 juin 19 10:59 default -> latest.tar.gz
-rw-rw-r-- 1 jerome jerome 11200591 juin 18 19:52 latest.tar.gz
- local_release : **releases/{{ role_name }}/default** (app_src must be set, set to "None" or "Local" for local only applications)
### Backup, restoration, recovery
[README.md](../roles/_app_restore_instance/README.md)
*NB* : **Nextcloud, and other applications are using and external data directory**, the backup and restoration of **user files are not covered by standard backup**. the reason is the volume is too big for that. If the files are critical, then find another solution more adapted for that like rclone based solution.
#### setup a remote backup server
using **_master_backup_server** role
It is nice to have an independent server with backups of your instances, strongly recommended for production environment
The master backup server uses ssh to connect to the production host with a key by default in ".ssh/backup_master", see *(
**master_backup_key_path** and master_backup_key_file )*
setup the host_var **backup_slaves** : backup_slaves: ["myhost1", "myhost2",] etc
./play.py -ry mymasterbackuphost base_server
./play.py -ry mymasterbackuphost _master_backup_server
You may also remove a previously backup salved by updating the **remove_slaves** variable : remove_slaves: ["nohost1","nohost2"]
#### find all archives containing an instance and all "version backup" slots
using **_app_restore_instance** role or **restore command** and **restore_action=list**
"version backup" are made first when the upgrade command is called on an instance
find an archive containing the instance on the host :
./play.py myhost myinstance restore -e 'restore_action=list'
find an archive containing the instance on the backup master host :
./play.py -ry masterbackuphost _app_restore_instance -e 'restore_action=list app_host=myhost app_instance_id=myinstance'
#### after a crash
using **restore_action=restore**
to restore an instance, ensure it is deployed (empty):
./play.py myhost myinstance install
then use the restore action using archive previously found as myarchive.tar.gz as explained above:
-case 1: from the same host
./play.py myhost myinstance restore -e 'restore_action=restore restore_source=myarchive.tar.gz'
-case 2 : from the controller :first copy the recovered archive to the host and restore it locally
./scp my_archive.tar.gz ubuntu@myhost:/tmp/my_archive.tar.gz
./play.py myhost myinstance restore -e 'restore_action=restore restore_source=/tmp/myarchive.tar.gz from_full_archive=no'
*NB* : if _app_restore_instance role is not integrated in the "instance role" extra vars must be provided by hand
#### installing an instance cloning another instance
using **from_instance_id**
It can be very useful to migrate files and database from an instance to another one. It is possible to clone an instance like this :
./play.py myhost myinstance_new install
./play.py myhost myinstance_new restore -e 'restore_action=restore restore_source=myarchive.tar.gz from_instance_id=myinstance"
*NB* : remember that **external data directory is not covered by standard backup**, migrating data must be done by hand
#### recover file and database of an instance
using **restore_action=recover**
first find the archive as explained above, then
./play.py myhost myinstance restore -e 'restore_action=recover restore_source=myarchive.tar.gz'
or
./play.py -ry masterbackuphost _app_restore_instance -e 'restore_action=recover restore_source=myarchive.tar.gz app_instance_id=myinstance'
#### restoring after problematic upgrade
using **from_version_backup=yes**
/play.py -y my_host myinstance restore -e 'restore_action=restore restore_source=/mnt/vdb/backups/version/myinstance/oldversion from_version_backup=yes'
### writing new roles
The best way to write new roles to provide new applications with less effort is to rely on reusable roles.
If the application is standard, use the [_web_app](../roles/_web_app/README.md) role and complete it as you need.
See [wordpress instance vars](../roles/wordpress_instance/vars/main.yml) as example of reusing _web_app without any code.
### customizing inventory tools
coming soon...
### upgrade system host
coming soon... ansible-paquerette-dev/doc/base_server_template.yml 0000664 0000000 0000000 00000014105 14154403771 0023116 0 ustar 00root root 0000000 0000000 ---
#############################################
# GLOBAL CONFIGURATION
#############################################
#############################################
# BASE PACKAGES
#############################################
# debian, ubuntu 16 +
base_packages_list: [
"anacron",
# needed for ansible apt
"aptitude",
"apt-transport-https",
# backup solution
"backupninja",
"curl",
# needed for timezone
"dbus",
"ed",
"git",
"htop",
"iftop",
"iotop",
# needed for smtp ssl
"libsasl2-modules",
"monit",
"nano",
"nfs-common",
"postfix",
"tree",
"python3-pip",
"python3-apt",
"rsync",
"ufw",
"unzip",
"vim",
"zip",
]
debian_backports_uri: "http://http.debian.net/debian"
debian_backports_components: "{{ansible_distribution_release}}-backports main contrib non-free"
#############################################
# SCHEDULING BACKUP AND LETSENCRYPT RENEW
#############################################
# 03:05 backup_monit_stop
# 03:06 backup_web_stop
# 03:07 new_cert_standalone
# 03:07 backup_base_service_conf_time
# 03:08 backup_app_service_conf_time
# 03:10 renew_cert_standalone
# 03:20 backup_web_start
# 03:21 backup_monit_start_hour
# 03:30 backup_day_conf_time
# 03:30 backup_month_conf_time
# 04:00 master_backup_time
# 06:00 monit_start_anyway_hour (every 10 minutes until 03:00)
#############################################
# FILE SYSTEM
#############################################
# allow to use different partition form /
data_partition: yes
base_prod_path: "/mnt/vdb"
base_prod_ansible_log: "{{ base_prod_path }}/ansible_log"
base_prod_options: "{{ base_prod_path }}/opt"
base_prod_log: "{{ base_prod_path }}/log"
base_root_opt: "{{ base_prod_path }}/opt-root"
base_chroot_jail: "{{ base_prod_path }}/jail"
# retention for files, all files older than retention in days will be deleted (weekly)
tmp_file_retention: "7"
log_file_retention: "60"
#############################################
# USER CHROOT AND SFTP ONLY
#############################################
sftp_users_chroot: "sftp_users_chroot"
#############################################
# SMTP POSTMASTER
#############################################
# START PREREQUISITES
# smtp_shortfrom: "postmaster"
# smtp_domain: "mydomain.org"
# smtp_host: "my.smtphost.org"
smtp_port: "587"
smtp_user: "{{ smtp_shortfrom }}@{{ smtp_domain }}"
# smtp_password: "verysecret"
base_postmaster: "{{ smtp_shortfrom }}@{{ smtp_domain }}"
# END PREREQUISITES
# override to "" to send also mails in copy to {{ mail_in_copy }} (monit and postfix)
comment_for_copy_mail: "#"
mail_in_copy: "NOMAIL"
alert_group: "PROD"
#############################################
# BACKUP
#############################################
# backup scripts are here
backup_item_dir: "/etc/backup.d"
# backup destinations
# base destination
backup_base_dir: "{{ base_prod_path }}/backups"
# production daily backup
backup_prod_dir: "{{ backup_base_dir }}/prod"
# used for upgrades of instances
backup_version_dir: "{{ backup_base_dir }}/version"
# for opt components
backup_opt_dir: "{{ backup_prod_dir }}/opt"
backup_root_opt_dir: "{{ backup_prod_dir }}/opt-root"
# stop and start web services for backup
backup_web_stop_hour: "3"
backup_web_stop_minute: "06"
backup_web_start_hour: "3"
backup_web_start_minute: "20"
# stop and start base services for backup
backup_base_service_conf_time: "03:07"
backup_app_service_conf_time: "03:08"
backup_day_conf_time: "03:30"
backup_month_conf_time: "{{ backup_day_conf_time }}"
### master - slave configuration ###
# user for master, and slave (same user)
backup_master_user: "backup_master_user"
backup_master_group: "{{ backup_master_user }}"
# keys for master (public + .pub)
# file location on master server and ansible automation server, behind $HOME
# START PREREQUISITES
master_backup_key_path: ".ssh/backup_master"
master_backup_key_file: "{{ master_backup_key_path }}/id_rsa"
# END PREREQUISITES
### master configuration ###
backup_master_slaves_location: "{{ backup_base_dir }}/slaves"
# list of hosts
backup_slaves: []
backup_master_hour: "4"
backup_master_minute: "00"
# random delay in seconds before making the copy to avoid bottleneck
backup_master_delay: "3600"
# slaves to be removed from the hosts to backup
remove_slaves: []
#############################################
# MONITORING
#############################################
backup_monit_stop_hour: "3"
backup_monit_stop_minute: "05"
backup_monit_start_hour: "3"
backup_monit_start_minute: "21"
monit_start_anyway_hour: "0,1,2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23"
monit_start_anyway_minute: "*/10"
# in seconds
# 6 minutes
monit_cycle_duration: "300"
# 4 minutes
monit_start_delay: "240"
# system monitoring defaults
# when % raised then alert
monit_system_cpu_usage: 95
monit_system_memory_usage: 95
monit_system_swap_usage: 90
monit_system_root_partition_usage: 80
monit_system_data_partition_usage: 90
#############################################
# LETSENCRYPT
#############################################
new_cert_standalone_hour: "3"
new_cert_standalone_minute: "07"
renew_cert_standalone_hour: "3"
renew_cert_standalone_minute: "10"
renew_cert_copy_minute: "20"
renew_cert_days: "1,4"
letsencrypt_cert_root: "/etc/letsencrypt/live/"
certbot_revoke_command: "certbot revoke --delete-after-revoke --cert-path /etc/letsencrypt/live/{{ app_domain }}/fullchain.pem"
# todo: refactor
certbot_grant_command_apache2: 'certbot --agree-tos --authenticator standalone --installer apache -n -m {{ base_postmaster | mandatory }} -d {{ letsencrypt_domain | quote }} '
certbot_grant_command_nginx: "certbot certonly --agree-tos --webroot -n -m {{ base_postmaster }} --webroot-path=/var/www/html -d {{ letsencrypt_domain | quote }} "
# using that for the moment
certbot_grant_command_standalone: "certbot certonly --standalone --agree-tos -n -m {{ base_postmaster | mandatory }} -d {{ app_domain | quote }} "
#############################################
# WWW
#############################################
# www root directory and root directory for logs of reverse proxy
www_root: "{{ base_prod_path }}/www"
www_log: "{{ base_prod_log }}/{{ rev_proxy }}"
ansible-paquerette-dev/doc/cookbook.md 0000664 0000000 0000000 00000010065 14154403771 0020331 0 ustar 00root root 0000000 0000000 Basic actions, for more advanced see ./advanced_stuffs.md
### Inventory
**List all inventory :**
./inventory.py
**List hosts :**
./inventory.py -L
**List instances of a host:**
./inventory.py -l myhost
or in interactive mode :
./inventory.py -l
**Filtrer to find something in inventory :**
./inventory.py -f mydomain.org
**Print information on an instance :**
./inventory.py -i myhost myinstance
or interactive :
./inventory.py -i
output example :
Definition of myinstance on myhost :
role: wordpress_instance
description: wordpress instance of mydomain.org
app_domain: myfomain.org
app_instance_id: wp_mydomain
database_password: dAH451a.?1qd*
app_user: mydomain_user
clear_app_user_password: d42?.8vyd9Rc4
app_user_password: $6$jDAdbjKOFVTgGhXP$R8zHbaYnqNNQ/TMOMfTOhBJ56S3g.DpO/yyhDufgQy5zZctdAqLBi5Rhk9kbnmgj4va2S9859EBf7BIoL7DU80
see also "preparing a message" for custom formatting
### Get a new letsencrypt certificate during the nightly shutdown
use **./play.py** to play the role **letsencrypt_nightly_new** on **myhost** with the "extravar" **mydomain.org**
./play.py -r myhost letsencrypt_nightly_new -e 'domain_name=mydomain.org'
### After that, install an instance on a host
**Note :** If the certificate is not present on the host, all services will be stopped (for 15 seconds) to get the certificate
First, use **./inventory.py** to add the instance in the inventory in interactive mode
./inventory.py -n
Answers the questions...
Then, install it on the server :
./play.py myhost myinstance install
if it fails some some reason (network...)
retry like this :
./play.py myhost myinstance reinstall
**install** command checks that the instance is **not present** on the host to avoid **replace an instance by another**.
**reinstall** command checks the instance is **present** on the host for the same reason
### After that, preparing a message for credentials or another stuff
./inventory.py -m
will print a message generated from **message_default_template** in configuration file with instance informations
### Upgrade an instance
First, use **./inventory.py** to change the version of the instance in the inventory in interactive mode
The upgrade process will create first a full backup of the instance in backup dir named "app_old_version"
**NOTE if the slot for app_old_version already exists, the backup is skipped. This is the condition for having a reliable backup procedure that can be ran again if something fails, until it succeeds.**
Then, upgrade it on the server :
./play.py myhost myinstance upgrade
### Remove an instance
First uninstall it on the server :
./play.py -e 'app_instance_to_uninstall=myinstance' myhost myinstance uninstall
"app_instance_to_uninstall=myinstance user_to_remove=myuser" is just a double check, to be sure you really want to uninstall it.
**Note** :
* If The instance is removed from the inventory before applying on the server, you must recovery the instance informations from the backup of the host_vars file
* **Backups are not destroyed when an instance is removed.**
* User is not removed unless the extra var **user_to_remove** is set with the correct user name. (many instances may be installed for the same user)
Then, remove it on inventory :
./inventory.py -r myhost myinstance
### Touch an instance, change informations in inventory and apply changes
./inventory.py -t myhost myinstance
You will be prompted for changes. If any changes can be applied on the server, run ./play.py host instance reinstall
If the domain name is changed, then finally run the role cleanup_old_domain_name to remove old certificates and monitoring this way:
./play.py -r myhost _cleanup_old_domain_name -e 'app_domain=myolddomain'
### Touching playbook before running it (maybe useful in some cases, keeping control on playbook)
Answers **NO** to **"run playbook (y/N) ?"** when using ./play.py. The playbook file **play.book.yml** is generated, you can touch it and then simply launch it with ansible :
ansible-playbook play.book.yml
ansible-paquerette-dev/doc/host_template.yml 0000664 0000000 0000000 00000003345 14154403771 0021577 0 ustar 00root root 0000000 0000000 paquerette_name: "something nice to you"
# appears in selection lists in utilities inventory.py and play.py
description: "description of the host"
# BASE SERVER VARIABLES
#######################
# REMOTE BACKUP SERVER
# if this host is used as remote backup master server for other hosts (see advanced stuff)
# default is []
# backup_slaves: ["host1", "host2", ...]
# DATA PARTITION
# if set to "yes", then monitoring is enabled on the partition
# set to "no" if there is only one physical partition
# default is yes
# data_partition: no
# ALERTS
# set to comment_for_copy_mail to "" to uncomment copy mail in alert settings,
# default is "#"
# comment_for_copy_mail: "#"
# used when comment_for_copy_mail is ""
# mail_in_copy: "incopy@mymailbox.org"
# used in the subject of monit alerts
# default is "PROD"
# alert_group: "PROD"
# BACKUP
# it is strongly recommended to set up a master backup server and provide a ssh key in the default location
# default path to the public key added needed by the remote server to connect by ssh
# master_backup_key_path: ".ssh/backup_master"
## PLATFORM VARIABLES
#####################
# apache2 is available for everything :
# rev_proxy: "apache2"
# you can choose nginx instead, but the support is partial, need help for that
# rev_proxy: "nginx"
# mariadb_mysql_server: yes
# choose one between "mariadb" and "mysql", default is "mariadb"
# mariadb_mysql: "mariadb"
# mysql_root_password: "somethinghardtoguess"
# postgres_server: yes
# mongodb_server: yes
# mongodb_admin_user: "admin_mongo"
# mongodb_admin_password: "somethingsecret"
# php_server: yes
# php_version: '7.2'
# nodejs: yes
## INSTANCES VARIABLES
######################
# use "inventory.py -n" to add instances to the host
app_instances: []
ansible-paquerette-dev/doc/hosts.first 0000664 0000000 0000000 00000000314 14154403771 0020406 0 ustar 00root root 0000000 0000000 ### hosts
myfirsthost ansible_host= ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/paquerette/id_rsa
### groups
[test]
myfirsthost
### group hierarchy
[base_server:children]
test
ansible-paquerette-dev/doc/hosts.prod 0000664 0000000 0000000 00000000336 14154403771 0020227 0 ustar 00root root 0000000 0000000
### hosts
prodhost1 hostvar=...
prodhost2 hostvar=...
testhost1 hostvar=...
### groups
[test]
testhost1
[prod]
prodhost1
prodhost2
### group hierarchy
[base_server:children]
prod
test
[secret:children]
base_server ansible-paquerette-dev/doc/myfirsthost.yml 0000664 0000000 0000000 00000001371 14154403771 0021317 0 ustar 00root root 0000000 0000000 ---
paquerette_name: "my first paquerette server"
description: "an amazing quickstart server"
data_partition: no
smtp_shortfrom: "PRERIQUISITE : SMTP USER NAME"
smtp_domain: "PRERIQUISITE : SMTP DOMAIN NAME"
smtp_host: "PRERIQUISITE : SMTP HOST"
smtp_password: "PRERIQUISITE : SMTP PASSWORD"
master_backup_key_path: ".ssh/paquerette"
rev_proxy: "apache2"
php_server: yes
php_version: "7.2"
postgres_server: yes
app_instances: [
{
role: "nextcloud_instance",
description: "I can't believe it's possible"
app_domain: "PRERIQUISITE : THE CORRECT DOMAIN : PUBLIC IP WITH A DNS RULE SET",
app_instance_id: "myfirstinstance",
app_version: "15.0.8",
app_old_version: "",
database_password: "verysecret",
nextcloud_default_apps: ["calendar"]
},
] ansible-paquerette-dev/doc/paquerette_utils.conf.yml 0000664 0000000 0000000 00000011131 14154403771 0023242 0 ustar 00root root 0000000 0000000 ---
# playbook parameters
# default playbook file name
playbook_file_name: "play.book.yml"
become: yes
# instance processing in host_vars files
# do not touch unless you know what you do, frequently used in roles
instance_list_token: "app_instances"
instance_id_token: "app_instance_id"
role_name_token: "role"
command_name_token: "app_run"
# interactive mode and script mode
# HERE IS THE LIST OS HOSTS FOR INTERACTIVE MODE, LISTING HOSTS, AND MULTI INSTANCE UPGRADE
host_list: [
"myfirsthost"]
# INTERACTIVE MODE
role_list: ["base_server", "base_platform"]
command_list: ["install", "reinstall", "upgrade", "uninstall", "restore", "maintenance"]
# export csv format
# ./inventory.py
# ./inventory.py -f xxx
export_format: ['role', 'app_instance_id', 'app_domain', 'app_version', 'app_port', 'database_type', 'app_user']
# instance role rules in interactive definition of new instances, or any update of the inventory
instance_role_vars:
# free, domain_name, dir_name, clear_password, encrypted_password, simple_password, clear_simple_password, port, , boolean
# for lists (like database_type) default value is the index in list, from 1 to len(list)
- role : "nextcloud_instance"
mandatory:
app_domain: domain_name
app_version: free
app_old_version: dir_name
database_password: clear_simple_password
defaults:
app_version: "15.0.8"
app_old_version: "{{ app_version }}"
- role: "mattermost_instance"
mandatory:
app_domain: domain_name
app_version: free
app_old_version: dir_name
optional:
app_port: port
defaults:
app_version: "5.12.2"
app_old_version: "{{ app_version }}"
- role : "tryton_instance"
mandatory:
app_domain: domain_name
app_version: free
app_old_version: dir_name
database_password: clear_simple_password
admin_email: free
app_user: user_name
app_user_password: encrypted_password
web_client: boolean
defaults:
app_user: "{{ app_instance_id }}_user"
app_version: "5.4"
app_old_version: "{{ app_version }}"
web_client: True
- role: "pelican_instance"
mandatory:
app_domain: domain_name
app_user: user_name
app_user_password: encrypted_password
- role: "wordpress_instance"
mandatory:
app_domain: domain_name
database_password: clear_password
app_user: user_name
app_user_password: encrypted_password
defaults:
app_user: "{{ app_instance_id }}_user"
- role : "yeswiki_instance"
mandatory:
app_domain: domain_name
database_password: clear_password
app_user: user_name
app_user_password: encrypted_password
defaults:
app_user: "{{ app_instance_id }}_user"
- role : "empty_instance"
mandatory:
app_domain: domain_name
app_user: user_name
app_user_password: encrypted_password
database_type: ["None","mysql","postgres"]
app_version: free
app_old_version: dir_name
database_password: clear_password
defaults:
app_user: "{{ app_instance_id }}_user"
app_old_version: "{{ app_version }}"
database_type: 1
- role : "web_alias"
mandatory:
app_domain: domain_name
app_dest_domain: domain_name
- role : "dolibarr_instance"
mandatory:
app_domain: domain_name
app_version: free
app_old_version: dir_name
database_type: ["mysql","postgres"]
database_password: clear_simple_password
defaults:
app_version: "8.0.6"
app_old_version: "{{ app_version }}"
- role: "rocketchat_instance"
mandatory:
app_domain: domain_name
app_port: port
app_version: free
app_old_version: dir_name
defaults:
app_version: "1.2.1"
app_port: "3000"
app_old_version: "{{ app_version }}"
# default template for inventory message utility
# ./inventory.py -m
message_default_template: |
description: {{ description }}
domain name / sftp host : {{ app_domain }}
database name (localhost) : {{ app_instance_id }}_db
database user : {{ app_instance_id }}_usr
database password : {{ database_password }}
sftp user : {{ app_user }}
sftp password : {{ clear_app_user_password }}
# default template for scripting utility
# ./play.py -s
script_default_template:
"./play.py -y {{ host }} {{ app_instance_id }} {{ command }} # {{ role }} [{{ app_old_version }}]"
ansible-paquerette-dev/doc/playbook_new_certificate.sample.yml 0000664 0000000 0000000 00000000274 14154403771 0025240 0 ustar 00root root 0000000 0000000 ---
- hosts: test-server
vars:
domain_name: "newcert.test.paquerette.eu"
domain_admin_email: "newcert.test@paquerette.eu"
roles:
- letsencrypt_nightly_new
become: yes
ansible-paquerette-dev/doc/quickstart.md 0000664 0000000 0000000 00000007145 14154403771 0020722 0 ustar 00root root 0000000 0000000
Considering you have a server called HOST with a user named ubuntu, bionic server is recommended.
As every service runs with HTTPS protocol, you **must have** a **DNS A** rule with the **DOMAIN NAME of your service and the PUBLIC IP of your host**.
Now, en avant Simone :
## Build controler machine
On controller machine, the machine from where you control your host :
* Prerequisites
sudo apt update
sudo apt install software-properties-common
sudo apt-add-repository --yes --update ppa:ansible/ansible
sudo apt install ansible git python3 python3-yaml python3-jinja2
# prepare the key for authentification on host
mkdir $HOME/.ssh/paquerette
ssh-keygen -f $HOME/.ssh/paquerette/id_rsa -N ''
chmod 0600 $HOME/.ssh/paquerette/*
ssh-copy-id -i "$HOME/.ssh/paquerette/id_rsa.pub" ubuntu@YOUR HOST IP
# open a session on host
ssh -i "$HOME/.ssh/paquerette/id_rsa" ubuntu@YOUR HOST IP
echo 'ubuntu ALL=(ALL:ALL) NOPASSWD:ALL' | sudo EDITOR='tee -a' visudo
sudo mkdir -p /mnt/vdb
sudo apt install python
# disconnect from host
^D
# test correct configuration and keys, must return 'root'
ssh -i "$HOME/.ssh/paquerette/id_rsa" ubuntu@YOUR HOST IP sudo whoami
# cloning the roles and facilities
git clone https://gitlab.com/j.marchini/ansible-roles-paquerette.git ~/ansible
# copy the ansible hosts file:
cp ~/ansible/doc/hosts.first ~/ansible/hosts
# edit the hosts files to set YOUR HOST IP
nano ~/ansible/hosts
# replace and link /etc/ansible/hosts to our file
sudo mv /etc/ansible/hosts /etc/ansible/hosts.save
sudo ln -s ~/ansible/hosts /etc/ansible/hosts
# link /etc/ansible/roles to our roles
sudo ln -s ~/ansible/roles /etc/ansible/roles
# copy paquerette_utils.conf.yml for inventory and play facilities
cp ~/ansible/doc/paquerette_utils.conf.yml ~/ansible/
# create the host_vars directory
mkdir ~/ansible/host_vars
## Prepare a new host
# Add the host to the ansible host file
# key configuration
# group :qconfiguration
~/ansible/hosts
#
# Prepare the host, will create an admin group and account, and disallow root ssh
#
./play.py -r host.paquerette.eu base_secure_ssh
#
# edit the host file and change the user to admin
#
# copy the host_vars file
cp ~/ansible/doc/myfirsthost.yml ~/ansible/host_vars/
# add the host name in file paquerette_utils.conf.yml
# test host configuration, must return 'myfirsthost'
cd ~/ansible
./inventory.py -L
# edit the host_vars files to set your correct SMTP CREDENTIALS and application DOMAIN NAME
nano ~/ansible/host_vars/myfirsthost.yml
# test overall configuration, it must run "gathering facts" flawlessly on the server
~/ansible/play.py -ry myfirsthost
### Deploy the platform
# deploy base configuration
./play.py -r myfirsthost base_server
# deploy reverse proxy, database engine ...
./play.py -r myfirsthost base_platform
### Deploy the instance
./play.py myfirsthost myfirstinstance install
or in interactive mode :
./play.py -i
if something fails (letsencrypt, application download etc..) then run :
./play.py myfirsthost myfirstinstance reinstall
until the process ends with no failure
ET VOILA ! ansible-paquerette-dev/doc/tower_controler.md 0000664 0000000 0000000 00000000460 14154403771 0021750 0 ustar 00root root 0000000 0000000 # Ansible controler installation
## Notes
On debian
Error : AttributeError: module 'yaml' has no attribute 'FullLoader'
Need last PyYAML
```yaml
- name: "install PyYAML"
pip:
name: "PyYAML"
executable: pip3
extra_args: "--ignore-installed"
state: present
tags:
- test
``` ansible-paquerette-dev/group_vars/ 0000775 0000000 0000000 00000000000 14154403771 0017621 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/group_vars/base_server/ 0000775 0000000 0000000 00000000000 14154403771 0022121 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/group_vars/base_server/base_server.yml 0000664 0000000 0000000 00000014072 14154403771 0025150 0 ustar 00root root 0000000 0000000 ---
#############################################
# GLOBAL CONFIGURATION
#############################################
#############################################
# BASE PACKAGES
#############################################
# debian, ubuntu 16 +
base_packages_list: [
"anacron",
# needed for ansible apt
"aptitude",
"apt-transport-https",
# backup solution
"backupninja",
"curl",
# needed for timezone
"dbus",
"ed",
"git",
"htop",
"iftop",
"iotop",
# needed for smtp ssl
"libsasl2-modules",
"monit",
"nano",
"nfs-common",
"postfix",
"tree",
"python3-pip",
"python3-apt",
"ufw",
"unzip",
"vim",
"zip",
]
debian_backports_uri: "http://http.debian.net/debian"
debian_backports_components: "{{ansible_distribution_release}}-backports main contrib non-free"
#############################################
# SCHEDULING BACKUP AND LETSENCRYPT RENEW
#############################################
# 03:05 backup_monit_stop
# 03:06 backup_web_stop
# 03:07 new_cert_standalone
# 03:07 backup_base_service_conf_time
# 03:08 backup_app_service_conf_time
# 03:10 renew_cert_standalone
# 03:20 backup_web_start
# 03:21 backup_monit_start_hour
# 03:30 backup_day_conf_time
# 03:30 backup_month_conf_time
# 04:00 master_backup_time
# 06:00 monit_start_anyway_hour (every 10 minutes until 03:00)
#############################################
# FILE SYSTEM
#############################################
# allow to use different partition form /
data_partition: yes
base_prod_path: "/mnt/vdb"
base_prod_ansible_log: "{{ base_prod_path }}/ansible_log"
base_prod_options: "{{ base_prod_path }}/opt"
base_prod_log: "{{ base_prod_path }}/log"
base_root_opt: "{{ base_prod_path }}/opt-root"
base_chroot_jail: "{{ base_prod_path }}/jail"
# retention for files, all files older than retention in days will be deleted (weekly)
tmp_file_retention: "7"
log_file_retention: "60"
#############################################
# USER CHROOT AND SFTP ONLY
#############################################
sftp_users_chroot: "sftp_users_chroot"
#############################################
# SMTP POSTMASTER
#############################################
# START PREREQUISITES
# smtp_shortfrom: "postmaster"
# smtp_domain: "mydomain.org"
# smtp_host: "my.smtphost.org"
smtp_port: "587"
smtp_user: "{{ smtp_shortfrom }}@{{ smtp_domain }}"
# smtp_password: "verysecret"
base_postmaster: "{{ smtp_shortfrom }}@{{ smtp_domain }}"
# END PREREQUISITES
# override to "" to send also mails in copy to {{ mail_in_copy }} (monit and postfix)
comment_for_copy_mail: "#"
mail_in_copy: "NOMAIL"
alert_group: "PROD"
#############################################
# BACKUP
#############################################
# backup scripts are here
backup_item_dir: "/etc/backup.d"
# backup destinations
# base destination
backup_base_dir: "{{ base_prod_path }}/backups"
# production daily backup
backup_prod_dir: "{{ backup_base_dir }}/prod"
# used for upgrades of instances
backup_version_dir: "{{ backup_base_dir }}/version"
# for opt components
backup_opt_dir: "{{ backup_prod_dir }}/opt"
backup_root_opt_dir: "{{ backup_prod_dir }}/opt-root"
# stop and start web services for backup
backup_web_stop_hour: "3"
backup_web_stop_minute: "06"
backup_web_start_hour: "3"
backup_web_start_minute: "20"
# stop and start base services for backup
backup_base_service_conf_time: "03:07"
backup_app_service_conf_time: "03:08"
backup_day_conf_time: "03:30"
backup_month_conf_time: "{{ backup_day_conf_time }}"
### master - slave configuration ###
# user for master, and slave (same user)
backup_master_user: "backup_master_user"
backup_master_group: "{{ backup_master_user }}"
# keys for master (public + .pub)
# file location on master server and ansible automation server, behind $HOME
# START PREREQUISITES
master_backup_key_path: ".ssh/backup_master"
master_backup_key_file: "{{ master_backup_key_path }}/id_rsa"
# END PREREQUISITES
### master configuration ###
backup_master_slaves_location: "{{ backup_base_dir }}/slaves"
# list of hosts
backup_slaves: []
backup_master_hour: "4"
backup_master_minute: "00"
# random delay in seconds before making the copy to avoid bottleneck
backup_master_delay: "3600"
# slaves to be removed from the hosts to backup
remove_slaves: []
#############################################
# MONITORING
#############################################
backup_monit_stop_hour: "3"
backup_monit_stop_minute: "05"
backup_monit_start_hour: "3"
backup_monit_start_minute: "21"
monit_start_anyway_hour: "0,1,2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23"
monit_start_anyway_minute: "*/10"
# in seconds
# 6 minutes
monit_cycle_duration: "300"
# 4 minutes
monit_start_delay: "240"
# system monitoring defaults
# when % raised then alert
monit_system_cpu_usage: 95
monit_system_memory_usage: 95
monit_system_swap_usage: 90
monit_system_root_partition_usage: 80
monit_system_data_partition_usage: 90
#############################################
# LETSENCRYPT
#############################################
new_cert_standalone_hour: "3"
new_cert_standalone_minute: "07"
renew_cert_standalone_hour: "3"
renew_cert_standalone_minute: "10"
renew_cert_copy_minute: "20"
renew_cert_days: "1,4"
letsencrypt_cert_root: "/etc/letsencrypt/live/"
certbot_revoke_command: "certbot revoke --delete-after-revoke --cert-path /etc/letsencrypt/live/{{ app_domain }}/fullchain.pem"
# todo: refactor
certbot_grant_command_apache2: 'certbot --agree-tos --authenticator standalone --installer apache -n -m {{ base_postmaster | mandatory }} -d {{ letsencrypt_domain | quote }} '
certbot_grant_command_nginx: "certbot certonly --agree-tos --webroot -n -m {{ base_postmaster }} --webroot-path=/var/www/html -d {{ letsencrypt_domain | quote }} "
# using that for the moment
certbot_grant_command_standalone: "certbot certonly --standalone --agree-tos -n -m {{ base_postmaster | mandatory }} -d {{ app_domain | quote }} "
#############################################
# WWW
#############################################
# www root directory and root directory for logs of reverse proxy
www_root: "{{ base_prod_path }}/www"
www_log: "{{ base_prod_log }}/{{ rev_proxy }}"
ansible-paquerette-dev/inventory.py 0000775 0000000 0000000 00000073752 14154403771 0020062 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python3
"""
21/03/2019 Jerome Marchini
paquerette.eu
"""
# todo: proposer les variables optionnelles pendant le 'touch'
import sys
import argparse
import yaml
import os
import re
import crypt
import random
import csv
import shutil
import datetime
# facilities
# OrderedDict Yaml facilities
from collections import OrderedDict
import yaml.resolver
from yaml import CLoader as Loader, CDumper as Dumper
from yaml.representer import SafeRepresenter
from jinja2 import Template
# YAML configuration file
configuration_file = 'paquerette_utils.conf.yml'
# ansible hosts vars
host_vars_path = './host_vars'
# exception without traceback
sys.tracebacklimit = 0
# custom yaml with ordered dict, waiting for python 3.7
# ### start
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
Dumper.add_representer(str, SafeRepresenter.represent_str)
def ordered_dump(data):
return yaml.dump(data, Dumper=Dumper, default_flow_style=False)
def ordered_load(stream):
return yaml.load(stream, Loader=Loader)
# ### end custom yaml
# ui facilities
def ui_confirm(msg, default):
"""
prompt with message and return true or false or default if no response
:param msg:
:param default:
:return:
"""
valid_response = ['y', 'Y', 'n', 'N']
if default:
msg = msg + ' (Y/n) ? '
not_expected = ['n', 'N']
else:
msg = msg + ' (y/N) ? '
not_expected = ['y', 'Y']
i = ''
while i not in valid_response:
i = input(msg)
if i == '':
break
if i in not_expected:
return not default
else:
return default
def ui_select(choices, prompt=None, confirm_message=None, refresh=None, mandatory=False, default_index=None,
default_confirm=True):
"""
loop until user makes a selection between choices
:param choices: liste of choices
:param prompt: prompt string
:param confirm_message:
:param refresh: callback (choices : None)
:param mandatory: cannot exit without selection
:param default_index: from 1 to len(choices)
:param default_confirm: default confirmation if confirm message is set on
:return: index selected from 0 to len(choices) -1
"""
if prompt is None:
prompt = "Please select into :"
print(prompt)
selected = None
while selected is None:
choice_count = 0
for i, c in enumerate(choices):
print("{0} - {1}".format(i + 1, c))
choice_count += 1
if choice_count == 0:
if mandatory:
raise Exception("%s : no choices valid" % prompt)
else:
return None
# set select prompt
select_prompt = 'Select (1-%d)' % choice_count
if refresh:
select_prompt = select_prompt + ' (r=refresh)'
if default_index:
select_prompt = select_prompt + ' [default=%s]' % default_index
selected = input("%s ? " % select_prompt)
# default
if selected == '' or selected is None:
if default_index:
selected = str(default_index)
else:
selected = None
if selected is None:
if mandatory:
continue
else:
break
# refresh
if refresh and selected in ['r', 'R']:
selected = None
default_index = None
refresh(choices)
continue
# validate choice
if not selected.isdigit() or int(selected) > choice_count or int(selected) < 1:
print("Selection is not valid, please retry")
selected = None
continue
# confirm choice
if confirm_message:
if selected:
c = ui_confirm('{0} "{1}"'.format(confirm_message, choices[int(selected) - 1]), default_confirm)
else:
c = ui_confirm('{0} none'.format(confirm_message), default_confirm)
if not c:
selected = None
continue
# return None or 'natural' selected index : 0 - len(choices)-1
if selected:
return int(selected) - 1
else:
return None
class Inventory:
def __init__(self):
self.args = None
self.host_vars = None
self.host_instances = {}
# loading configuration
with open(configuration_file, 'r') as stream:
self.conf = yaml.load(stream, Loader=yaml.FullLoader)
# low level
@staticmethod
def password(simple=False, long=13):
"""
make a complicated enough password
:param simple: whether special characters are used or not (to avoid url and other encoding problems)
:param long: length of the password
:return: the complicated enough password in question
"""
def drop_char(string, char):
"""
drop a char somewhere in a string
"""
if string == "":
return char
pos = random.randint(0, len(string) - 1)
return string[:pos] + char + string[pos:]
if long < 4:
raise Exception("Password must be at least 4 characters long to be complicated enough")
element1 = "+-*/~$%&.:?!"
element2 = "abcdefghijklmnopqrstuvwxyz"
element3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
element4 = "0123456789"
element = element2 + element3 + element4
if not simple:
element = element + element1
p = ""
if not simple:
p = drop_char(p, random.choice(element1))
p = drop_char(p, random.choice(element2))
p = drop_char(p, random.choice(element3))
p = drop_char(p, random.choice(element4))
for i in range(long - len(p)):
p = drop_char(p, random.choice(element))
return p
@staticmethod
def check_pattern(value, pattern):
return value if re.match(pattern, value) else None
@staticmethod
def check_not_in(value, _list):
return value if value not in _list else None
def ui_input_check(self, key, check=None, default_value=None, mandatory=True):
result = None
if default_value is not None:
prompt = key + " [default=%s]" % default_value
else:
prompt = key
if not mandatory:
prompt = prompt + " (optional)"
prompt = prompt + " ? "
while result is None:
result = input(prompt)
# can be empty
if len(result) == 0 and not mandatory:
return
if len(result) == 0 and default_value is not None:
result = default_value
elif check is None:
result = self.check_pattern(result, r'.+') # non empty
elif check == "id":
result = self.check_pattern(result, r'\w{4,10}') # id
if result is None:
print('invalid value for an instance id, please retry')
continue
result = self.check_not_in(result, self.host_instances.keys())
if result is None:
print('not unique on host, please retry')
elif check == "user_name":
result = self.check_pattern(result, r'\w{4,10}') # id
if result is None:
print('invalid value for an user name, please retry')
elif check == "port":
result = self.check_pattern(result, r'\d{4,5}') # id
if result is None:
print('invalid value for port, please retry')
continue
app_port_list = [i.get('app_port', 0) for i in self.host_instances.values()]
result = self.check_not_in(result, app_port_list)
if result is None:
print('not unique on host, please retry')
elif check == "domain_name":
result = self.check_pattern(result, r'^(([a-zA-Z0-9\-])+\.)+[a-zA-Z0-9\-]{2,8}$') # domain name
if result is None:
print('invalid value for a domain name, please retry')
elif check == "dir_name":
result = self.check_pattern(result, r'^[\w\-\.]{1,20}') # dir name
if result is None:
print('invalid value for a directory name, please retry')
elif check == "boolean":
result = {"True": True, "False": False}.get(result)
if result is None:
print('invalid value for a boolean, must be "True" or "False"')
else:
result = self.check_pattern(result, r'.+') # non empty
return result
def list_hosts(self):
hl = self.conf.get('host_list')
if hl is not None:
for h in hl:
yield h
else:
for f in sorted(os.listdir(host_vars_path)):
if f.endswith('.yml'):
yield f[:-4]
@staticmethod
def get_host_description(_host):
with open(os.path.join(host_vars_path, _host + '.yml'), 'r') as host_file:
host_vars = yaml.load(host_file, Loader=yaml.FullLoader)
return host_vars.get('description', '')
def load_host_vars(self, _host):
with open(os.path.join(host_vars_path, _host + '.yml'), 'r') as host_file:
self.host_vars = ordered_load(host_file)
ilt = self.conf.get('instance_list_token', None)
if ilt is None:
raise Exception('instance_list_token not found in configuration file')
iit = self.conf.get('instance_id_token', None)
if iit is None:
raise Exception('instance_id_token not found in configuration file')
self.host_instances = {}
for i in self.host_vars.get(ilt, []):
self.host_instances[i.get(iit)] = i
@staticmethod
def default_value_of(_definition, _dict, _var_name):
if _definition.get('defaults') and _definition.get('defaults').get(_var_name) is not None:
if not isinstance(_definition.get('defaults').get(_var_name), str):
return _definition.get('defaults').get(_var_name)
else:
return Template(_definition.get('defaults').get(_var_name)).render(_dict)
return None
def new_value(self, _dict, _var_name, _type, _default_value, _mandatory):
if _type == "clear_password":
_dict[_var_name] = self.password()
elif _type == "encrypted_password":
_dict['clear_' + _var_name] = self.password()
_dict[_var_name] = crypt.crypt(_dict['clear_' + _var_name])
elif _type == "clear_simple_password":
_dict[_var_name] = self.password(True)
elif _type == "encrypted_simple_password":
_dict['clear_' + _var_name] = self.password(True)
_dict[_var_name] = crypt.crypt(_dict['clear_' + _var_name])
elif isinstance(_type, list):
_dict[_var_name] = _type[ui_select(_type, prompt='Select %s' % _var_name, mandatory=_mandatory,
default_index=_default_value)]
else:
new_value = self.ui_input_check(_var_name, _type, _default_value, _mandatory)
if new_value is not None:
_dict[_var_name] = new_value
def make_new_instance(self):
i = OrderedDict()
roles = [r['role'] for r in self.conf['instance_role_vars']]
r = ui_select(roles, prompt='Select a role for the new instance :', mandatory=True)
# base values for all instances
i['role'] = self.conf['instance_role_vars'][r]['role']
i[self.conf['instance_id_token']] = self.ui_input_check("Instance ID", "id")
i['description'] = self.ui_input_check("Description")
# mandatory values depend on role
role_vars = self.conf['instance_role_vars'][r]
for key, _type in self.conf['instance_role_vars'][r]['mandatory'].items():
self.new_value(i, key, _type, self.default_value_of(role_vars, i, key), True)
# optional values depend on role
if self.conf['instance_role_vars'][r].get('optional') is not None:
for key, _type in self.conf['instance_role_vars'][r]['optional'].items():
self.new_value(i, key, _type, self.default_value_of(role_vars, i, key), False)
return i
def export(self, _filter, _stdout=False):
def export_csv(dest):
csv_writer = csv.writer(dest)
header = ['host']
header.extend(self.conf['export_format'])
csv_writer.writerow(header)
for h in self.list_hosts():
self.load_host_vars(h)
for instance in self.host_instances.values():
row = row_from_instance(h, instance)
if rf is not None:
s = ''.join(e if e is not None else '' for e in row)
if not rf.search(s):
continue
csv_writer.writerow(row)
def row_from_instance(_host, _instance):
r = [_host]
for f in self.conf['export_format']:
r.append(_instance.get(f))
return r
rf = None
if _filter is not None:
rf = re.compile(_filter)
if _stdout:
export_csv(sys.stdout)
else:
with open(self.args.export, 'w') as export_file:
export_csv(export_file)
def new_instance(self):
if self.host_instances is None:
print('No instance list found in %s, please create it first' % self.args.host)
ni = self.make_new_instance()
ni_id = ni.get(self.conf['instance_id_token'])
print('---\nAdding %s to %s :\n---' % (ni_id, self.args.host))
print(ordered_dump(ni))
if ui_confirm('Sure ?', False):
self.host_vars[self.conf['instance_list_token']].append(ni)
shutil.copyfile(
os.path.join(host_vars_path, self.args.host + '.yml'),
os.path.join(host_vars_path, self.args.host + '.yml.%s' % datetime.datetime.now()))
with open(os.path.join(host_vars_path, self.args.host + '.yml'), 'w') as host_file:
host_file.write(ordered_dump(self.host_vars))
print('Host vars file %s modified and backup created' % os.path.join(host_vars_path,
self.args.host + '.yml'))
print('Ansible role must be launched to apply changes e.g. :')
print('./play.py %s %s install' % (self.args.host, ni_id))
def remove_instance(self):
def remove_instance_in_list(_instance_id):
il = self.host_vars[self.conf['instance_list_token']]
for idx, i in enumerate(il):
if i[self.conf['instance_id_token']] == _instance_id:
del(il[idx])
break
print('---\nRemoving %s on %s\n---' % (self.args.instance_id, self.args.host))
print(ordered_dump(self.host_instances[self.args.instance_id]))
print('Ansible role must be launched to apply changes before removing e.g. :')
print('.... after removing from inventory, playbook will have to be hand made')
print("./play.py -e 'app_instance_to_uninstall=' %s %s uninstall"
% (self.args.host, self.args.instance_id))
if ui_confirm('Sure', False):
shutil.copyfile(
os.path.join(host_vars_path, self.args.host + '.yml'),
os.path.join(host_vars_path, self.args.host + '.yml.%s' % datetime.datetime.now()))
remove_instance_in_list(self.args.instance_id)
with open(os.path.join(host_vars_path, self.args.host + '.yml'), 'w') as host_file:
host_file.write(ordered_dump(self.host_vars))
print('Host vars file %s modified and backup created' % os.path.join(host_vars_path,
self.args.host + '.yml'))
def touch_instance(self):
def role_vars(_role):
for _rv in self.conf['instance_role_vars']:
if _rv.get('role') == _role:
return _rv
print('touching %s on %s' % (self.args.instance_id, self.args.host))
rv = role_vars(self.host_instances[self.args.instance_id]['role'])
for k, v in self.host_instances[self.args.instance_id].items():
if k in ['role', self.conf['instance_id_token']]:
continue
print('\n%s = %s' % (k, v))
if ui_confirm('change value', False):
if k == 'description':
self.host_instances[self.args.instance_id][k] = self.ui_input_check(k, None, None, True)
continue
if rv.get('mandatory') is not None:
_type = rv['mandatory'].get(k)
if _type is not None:
self.new_value(self.host_instances[self.args.instance_id], k, _type,
self.default_value_of(rv, self.host_instances[self.args.instance_id], k), True)
continue
if rv.get('optional') is not None:
_type = rv['optional'].get(k)
if _type is not None:
self.new_value(self.host_instances[self.args.instance_id], k, _type,
self.default_value_of(rv, self.host_instances[self.args.instance_id], k), False)
continue
print('---\nTouching %s on %s\n---' % (self.args.instance_id, self.args.host))
print(ordered_dump(self.host_instances[self.args.instance_id]))
if ui_confirm('Sure ?', False):
shutil.copyfile(
os.path.join(host_vars_path, self.args.host + '.yml'),
os.path.join(host_vars_path, self.args.host + '.yml.%s' % datetime.datetime.now()))
with open(os.path.join(host_vars_path, self.args.host + '.yml'), 'w') as host_file:
host_file.write(ordered_dump(self.host_vars))
print('Host vars file %s modified and backup created' % os.path.join(host_vars_path,
self.args.host + '.yml'))
print('Ansible role must be launched to apply changes e.g. :')
print('./play.py %s %s reinstall' % (self.args.host, self.args.instance_id))
def upgrade_instance(self):
print('upgrading %s on %s' % (self.args.instance_id, self.args.host))
print(ordered_dump(self.host_instances[self.args.instance_id]))
new_version = ''
while new_version == '':
new_version = input('New version (app_version) ? ')
old_version = None
while old_version is None:
old_version = self.ui_input_check('backup name (app_old_version) ', 'dir_name',
self.host_instances[self.args.instance_id]['app_version'], True)
print('---\nUpgrading %s on %s from %s to %s (backup in %s):\n---' % (
self.args.instance_id,
self.args.host,
self.host_instances[self.args.instance_id].get('app_version'),
new_version,
old_version))
if ui_confirm('Sure ?', False):
self.host_instances[self.args.instance_id]['app_version'] = new_version
self.host_instances[self.args.instance_id]['app_old_version'] = old_version
shutil.copyfile(
os.path.join(host_vars_path, self.args.host + '.yml'),
os.path.join(host_vars_path, self.args.host + '.yml.%s' % datetime.datetime.now()))
with open(os.path.join(host_vars_path, self.args.host + '.yml'), 'w') as host_file:
host_file.write(ordered_dump(self.host_vars))
print('Host vars file %s modified and backup created' % os.path.join(host_vars_path,
self.args.host + '.yml'))
print('Ansible role must be launched to apply changes e.g. :')
print('./play.py %s %s upgrade' % (self.args.host, self.args.instance_id))
def instances_versions(self):
roles = [r['role'] for r in self.conf['instance_role_vars']]
r = roles[ui_select(roles, prompt='Select a role for selecting instances to update :', mandatory=True)]
cur_versions = {}
for h in self.list_hosts():
self.load_host_vars(h)
for instance in self.host_instances.values():
if instance['role'] != r:
continue
version = instance.get('app_version', '')
if cur_versions.get(h) is None:
cur_versions[h] = [version]
else:
if version not in cur_versions[h]:
cur_versions[h].append(version)
print(cur_versions)
def upgrade_instances(self):
print('upgrading multiple instances')
roles = [r['role'] for r in self.conf['instance_role_vars']]
r = roles[ui_select(roles, prompt='Select a role for selecting instances to update :', mandatory=True)]
# make current version list :
cur_versions = []
for h in self.list_hosts():
self.load_host_vars(h)
for instance in self.host_instances.values():
if instance['role'] == r:
if not instance['app_version'] in cur_versions:
cur_versions.append(instance['app_version'])
if len(cur_versions) == 0:
print('No instances found in inventory with the role "%s"' % r)
return
# select current version to upgrade:
cv = cur_versions[ui_select(cur_versions, prompt='Select a version to upgrade for the role "%s" :' % r,
default_index=1,
mandatory=True)]
new_version = ''
while new_version == '':
new_version = input('New version ? ')
old_version = None
while old_version is None:
old_version = self.ui_input_check('backup name (app_old_version)', 'dir_name', cv, True)
# list instances selected by version and role to confirm
print('Upgrading ---')
instances_to_upgrade = {}
upgrade_count = 0
for h in self.list_hosts():
self.load_host_vars(h)
for instance in self.host_instances.values():
if instance['role'] == r:
if instance['app_version'] == cv:
upgrade_count += 1
if instances_to_upgrade.get(h) is None:
instances_to_upgrade[h] = []
instances_to_upgrade[h].append(instance['app_instance_id'])
print('%s on %s ( %s )' % (instance['app_instance_id'], h, instance.get('description', '')))
print('--- these %s instance(s) ' % str(upgrade_count))
print('from version "%s" to "%s" ( old version as "%s" ) into inventory' % (cv, new_version, old_version))
if ui_confirm('Sure', False):
for h in instances_to_upgrade.keys():
self.load_host_vars(h)
for i in instances_to_upgrade[h]:
self.host_instances[i]['app_version'] = new_version
self.host_instances[i]['app_old_version'] = old_version
shutil.copyfile(
os.path.join(host_vars_path, h + '.yml'),
os.path.join(host_vars_path, h + '.yml.%s' % datetime.datetime.now()))
with open(os.path.join(host_vars_path, h + '.yml'), 'w') as host_file:
host_file.write(ordered_dump(self.host_vars))
print('Host vars files modified and backups created')
print('Ansible role must be launched on hosts and instances to apply changes e.g. :')
print('./play.py -s upgrade | grep %s | grep %s' % (r, old_version))
def print_instance(self):
print('Definition of %s on %s :' % (self.args.instance_id, self.args.host))
print(ordered_dump(self.host_instances[self.args.instance_id]))
def message_from_instance(self):
print(Template(self.conf['message_default_template']).render(self.host_instances[self.args.instance_id]))
def parse_args(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.description = \
"prints in stdout full inventory in csv format\n\n" + \
"Inventory management :\n" + \
"- list hosts and instances\n" + \
"- add / upgrade instances\n" + \
"- touch (update) instance inventory data\n" + \
"- remove instance in inventory\n" + \
"- print instance details\n\n" + \
"- using configuration file (see for details): %s" % configuration_file
parser.add_argument("host", nargs='?', help='host, if not present, specified in interactive prompt')
parser.add_argument("instance_id", nargs='?',
help='instance, if not present, specified in interactive prompt')
parser.add_argument("-e", "--export", help="export full inventory in csv format")
parser.add_argument("-f", "--filter", help="filter inventory output with regular expression")
parser.add_argument("-i", "--print-instance", help="print instance", action="store_true",
default=False)
parser.add_argument("-l", "--list-instances", help="list instances of host", action="store_true",
default=False)
parser.add_argument("-L", "--list-hosts", help="list hosts", action="store_true",
default=False)
parser.add_argument("-m", "--message-from-instance",
help="makes a message with instance informations using template", action="store_true",
default=False)
parser.add_argument("-n", "--new-instance", help="add a new instance on a host", action="store_true",
default=False)
# parser.add_argument("-N", "--new-host", help="add a new host in inventory", action="store_true",
# default=False)
parser.add_argument("-r", "--remove-instance", help="remove instance in inventory", action="store_true",
default=False)
parser.add_argument("-t", "--touch-instance", help="touch / update instance inventory data",
action="store_true", default=False)
parser.add_argument("-u", "--upgrade-instance", help="upgrade an instance, managing version and backup",
action="store_true", default=False)
parser.add_argument("-U", "--upgrade-instances",
help="upgrade a set of instances managing version and backup selected by role and version",
action="store_true", default=False)
parser.add_argument("-v", "--instances-versions",
help="list versions for each instance of a given role, sorted by host",
action="store_true", default=False)
self.args = parser.parse_args()
if self.args.host is None and \
(self.args.list_instances or
self.args.message_from_instance or
self.args.new_instance or
self.args.upgrade_instance or
self.args.print_instance or
self.args.remove_instance or
self.args.touch_instance):
hl = list(self.list_hosts())
h_choices = ['{0} ( {1} )'.format(h, self.get_host_description(h)) for h in hl]
self.args.host = hl[ui_select(h_choices, prompt='Select a host', mandatory=True)]
if self.args.host is not None:
self.load_host_vars(self.args.host)
if self.args.instance_id is None and \
(self.args.upgrade_instance or
self.args.message_from_instance or
self.args.print_instance or
self.args.remove_instance or
self.args.touch_instance):
il = list(self.host_instances.keys())
choices = ['{0} ( {1} )'.format(i, self.host_instances[i].get('description', '')) for i in il]
self.args.instance_id = il[ui_select(choices, prompt="Select an instance on %s" % self.args.host,
mandatory=True)]
# top level
def run(self):
self.parse_args()
if self.args.export:
self.export(self.args.filter)
exit(0)
if self.args.print_instance:
self.print_instance()
exit(0)
if self.args.message_from_instance:
self.message_from_instance()
exit(0)
if self.args.list_instances:
for i in self.host_instances.keys():
print(i)
exit(0)
if self.args.list_hosts:
for h in self.list_hosts():
print(h)
exit(0)
if self.args.new_instance:
self.new_instance()
exit(0)
# if self.args.new_host:
# self.new_host()
# exit(0)
#
if self.args.remove_instance:
self.remove_instance()
exit(0)
if self.args.touch_instance:
self.touch_instance()
exit(0)
if self.args.upgrade_instance:
self.upgrade_instance()
exit(0)
if self.args.upgrade_instances:
self.upgrade_instances()
exit(0)
if self.args.instances_versions:
self.instances_versions()
exit(0)
self.export(self.args.filter, True)
if __name__ == '__main__':
Inventory().run()
ansible-paquerette-dev/play.py 0000775 0000000 0000000 00000036272 14154403771 0016766 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python3
"""
15/03/2019 Jerome Marchini
paquerette.eu
"""
import sys
import os
import subprocess
import yaml
import argparse
from jinja2 import Template
# YAML configuration file
configuration_file = 'paquerette_utils.conf.yml'
host_vars_path = './host_vars'
sys.tracebacklimit = 0
class Playbook:
def __init__(self):
self.host = None
self.roles = []
self.vars = {}
self.become = True
def content(self):
for k in self.vars.keys():
if isinstance(self.vars[k], str):
self.vars[k] = '"%s"' % self.vars[k]
pl_template = \
"---" +\
"\n\n- hosts: {{ host }}" +\
"{% if vars %}" +\
"\n\n vars:" +\
"{% for key, value in vars.items() %}" +\
"\n {{ key }}: {{ value }}" +\
"{% endfor %}" +\
"{% endif %}" +\
"\n\n roles:" +\
"{% for role in roles %}" +\
"\n - {{ role }}" +\
"{% endfor %}" +\
"\n\n become: {{ become }}"
return Template(pl_template).render(vars(self))
class Play:
def __init__(self):
self.args = None
# loading configuration
with open(configuration_file, 'r') as stream:
self.conf = yaml.load(stream, Loader=yaml.FullLoader)
self.run_playbook = True
self.playbook = Playbook()
self.playbook_file_name = self.conf['playbook_file_name']
# facilities
# ui facilities
@staticmethod
def ui_confirm(msg, default):
"""
prompt with message and return true or false or default if no response
:param msg:
:param default:
:return:
"""
valid = ['y', 'Y', 'n', 'N']
if default:
msg = msg + ' (Y/n) ? '
not_expected = ['n', 'N']
else:
msg = msg + ' (y/N) ? '
not_expected = ['y', 'Y']
i = ''
while i not in valid:
i = input(msg)
if i == '':
break
if i in not_expected:
return not default
else:
return default
@staticmethod
def ui_select(choices, prompt=None, confirm_message=None, refresh=None, mandatory=False, default_index=None,
default_confirm=True):
"""
loop until user makes a selection between choices
:param choices: liste of choices
:param prompt: prompt string
:param confirm_message:
:param refresh: callback (choices : None)
:param mandatory: cannot exit without selection
:param default_index: from 1 to len(choices)
:param default_confirm: default confirmation if confirm message is set on
:return: index selected from 0 to len(choices) -1
"""
if prompt is None:
prompt = "Please select into :"
if len(choices) == 0:
if mandatory:
raise Exception("%s : no choices valid" % prompt)
else:
return None
print(prompt)
selected = None
while selected is None:
for i, c in enumerate(choices):
print("{0} - {1}".format(i + 1, c))
# set select prompt
select_prompt = 'Select (1-%d)' % (len(choices))
if refresh:
select_prompt = select_prompt + ' (r=refresh)'
if default_index:
select_prompt = select_prompt + ' [default=%s]' % default_index
selected = input("%s ? " % select_prompt)
# default
if selected == '' or selected is None:
if default_index:
selected = str(default_index)
else:
selected = None
if selected is None:
if mandatory:
continue
else:
break
# refresh
if refresh and selected in ['r', 'R']:
selected = None
default_index = None
refresh(choices)
continue
# validate choice
if not selected.isdigit() or int(selected) > len(choices) or int(selected) < 1:
print("Selection is not valid, please retry")
selected = None
continue
# confirm choice
if confirm_message:
if selected:
c = Play.ui_confirm('{0} "{1}"'.format(confirm_message, choices[int(selected) - 1]),
default_confirm)
else:
c = Play.ui_confirm('{0} none'.format(confirm_message), default_confirm)
if not c:
selected = None
continue
# return None or 'natural' selected index : 0 - len(choices)-1
if selected:
return int(selected) - 1
else:
return None
# low level methods
def instance_id_of(self, instance):
"""
return instance_id of an instance
:param instance:
:return:
"""
iit = self.conf.get('instance_id_token', None)
if iit is None:
raise Exception('instance_id_token not found in configuration file')
return instance.get(iit)
@staticmethod
def get_host_description(_host):
with open(os.path.join(host_vars_path, _host + '.yml'), 'r') as host_file:
host_vars = yaml.load(host_file, Loader=yaml.FullLoader)
return host_vars.get('description', '')
def get_instance_list(self, _host):
"""
return the list of instances of a host
:param _host:
:return:
"""
ilt = self.conf.get('instance_list_token', None)
if ilt is None:
raise Exception('instance_list_token not found in configuration file')
with open(os.path.join(host_vars_path, _host + '.yml'), 'r') as host_file:
host_vars = yaml.load(host_file, Loader=yaml.FullLoader)
return host_vars.get(ilt, [])
def get_instance(self, _host, _instance_id):
"""
:param _host:
:param _instance_id:
:return:
"""
for instance in self.get_instance_list(_host):
if self.instance_id_of(instance) == _instance_id:
return instance
raise Exception("Instance %s not found in host %s" % (_instance_id, _host))
def make_interactive(self):
"""
prepare playbook in interactive mode
:return:
"""
def complete_cmd_with_opt_args(_cmd):
if self.args.list_tags:
_cmd += " --list-tags"
if self.args.apply_tags:
_cmd += ' --tags "%s"' % self.args.apply_tags
if self.args.skip_tags:
_cmd += ' --skip-tags "%s"' % self.args.skip_tags
if self.args.extra_vars:
_cmd += ' --extra-vars "%s"' % self.args.extra_vars
return _cmd
hl = self.conf["host_list"]
h_choices = ['{0} ( {1} )'.format(h, self.get_host_description(h)) for h in self.conf["host_list"]]
sh = hl[self.ui_select(h_choices, prompt='Select an host :', mandatory=True)]
self.playbook.host = sh
il = self.get_instance_list(self.playbook.host)
i_choices = None
s = 0
if il is not None:
i_choices = ['{0} ( {1} )'.format(self.instance_id_of(i), i.get('description', '')) for i in il]
i_choices.append("or select a role ...")
s = self.ui_select(i_choices, prompt='Select an instance :', default_index=len(i_choices))
role_mode = (il is None) or (s == len(i_choices)-1)
if role_mode:
rl = self.conf["role_list"]
sr = rl[self.ui_select(rl, prompt='Select a role :', mandatory=True)]
self.playbook.roles.append(sr)
cmd = complete_cmd_with_opt_args("run %s -r %s %s" % (sys.argv[0], sh, sr))
self.run_playbook = \
self.args.list_tags or \
self.ui_confirm(cmd, False)
else:
sr = il[s].get(self.conf.get('role_name_token'))
self.playbook.roles.append(sr)
self.playbook.vars = il[s]
cl = self.conf["command_list"]
cs = cl[self.ui_select(cl, prompt='Select an command :', mandatory=True)]
self.playbook.vars[self.conf.get('command_name_token')] = cs
cmd = complete_cmd_with_opt_args("run %s %s %s %s" % (sys.argv[0], sh, self.instance_id_of(il[s]), cs))
self.run_playbook = \
self.args.list_tags or \
self.ui_confirm(cmd, False)
def make_from_args(self):
"""
prepare playbook according to args
:return:
"""
self.playbook.host = self.args.host
if self.args.item:
if self.args.role:
self.playbook.roles.append(self.args.item)
else:
if not self.args.command:
raise Exception("Command must be defined when running on an instance")
instance = self.get_instance(self.playbook.host, self.args.item)
self.playbook.roles.append(instance.get(self.conf.get('role_name_token')))
self.playbook.vars = instance
self.playbook.vars[self.conf.get('command_name_token')] = self.args.command
if not (self.args.yes or self.args.list_tags):
self.run_playbook = self.ui_confirm('run playbook {0}'.format(self.playbook_file_name), False)
# high level methods
def make_playbook(self):
if self.args.playbook_name:
self.playbook_file_name = self.args.playbook_name
self.playbook.become = self.conf.get('become', True)
if self.args.interactive:
self.make_interactive()
else:
self.make_from_args()
with open(self.playbook_file_name, mode='w') as y:
y.write(self.playbook.content())
def schedule_at(self):
if self.args.command:
job_name = "{0}_{1}_{2}" .format(self.args.host, self.args.item, self.args.command)
elif self.args.item:
job_name = "{0}_{1}" .format(self.args.host, self.args.item)
else:
job_name = self.args.host
at_args = '-y -p {0} {1}'.format(job_name+'.yml', self.args.host)
if self.args.item:
at_args += ' ' + self.args.item
if self.args.role:
at_args = '-r ' + at_args
if self.args.command:
at_args += ' ' + self.args.command
if self.args.apply_tags:
at_args += " --apply-tags " + self.args.apply_tags
if self.args.skip_tags:
at_args += " --skip-tags " + self.args.skip_tags
if self.args.extra_vars:
at_args += " --extra-vars " + self.args.extra_vars
at_args += " >> {0}.log" .format(job_name)
play_cmd = subprocess.Popen(["echo", "./play.py {0}".format(at_args)], stdout=subprocess.PIPE)
at_cmd = subprocess.Popen(["at", self.args.at_time], stdin=play_cmd.stdout, stdout=subprocess.PIPE)
play_cmd.stdout.close()
exit(at_cmd.wait())
def script_command(self):
for h in self.conf["host_list"]:
for i in self.get_instance_list(h):
i['host'] = h
i['command'] = self.args.script_command
print(Template(self.conf['script_default_template']).render(i))
def launch_playbook(self):
cmd = ["ansible-playbook", self.playbook_file_name]
if self.args.list_tags:
cmd.append("--list-tags")
if self.args.apply_tags:
cmd.extend(["--tags", self.args.apply_tags])
if self.args.skip_tags:
cmd.extend(["--skip-tags", self.args.skip_tags])
if self.args.extra_vars:
cmd.extend(["--extra-vars", self.args.extra_vars])
if self.args.inv:
cmd.extend(["-i", self.args.inv])
print(' '.join(cmd))
rc = subprocess.call(cmd)
return rc
def neutralize_playbook(self):
os.unlink(self.playbook_file_name)
def parse_args(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.description = \
"prepares an ansible playbook and run a role\n\n" + \
"play.py -i : interactive mode\n\n" + \
"play.py [myhost|mygroup] : runs empty playbook (gathering facts) on target\n\n" + \
"running a role :\n" + \
"play.py -r [myhost|mygroup] myrole: runs myrole on target\n\n" + \
"can be used to run on an instance (object in a list of instance in the specified host_vars file) " + \
"according rules fixed in configuration file :\n" + \
"play.py myhost myinstance mycommand : runs defined role with defined command on instance target\n"
parser.add_argument("host", nargs='?', help='host to run the role, or group if no instance is defined')
parser.add_argument("item", nargs='?', help='instance to run the role and command on, role if r option is set')
parser.add_argument("command", nargs='?',
help='command to run on instance : install, reinstall, upgrade, uninstall')
parser.add_argument("--apply-tags", help="applying tags")
parser.add_argument("--skip-tags", help="skipping tags")
parser.add_argument("--list-tags", help="list tags (dry run only for tag listing, includes 'yes' mode)",
action="store_true", default=False)
parser.add_argument("-a", "--at-time", help="scheduling with at command at at_time")
parser.add_argument("-e", "--extra-vars", help="applying extra vars")
parser.add_argument("--inv", help="inventory")
parser.add_argument("-i", "--interactive", help="interactive mode", action="store_true", default=False)
parser.add_argument("-p", "--playbook-name",
help="name of playbook file name (by default : playbook_file_name in configuration)")
parser.add_argument("-r", "--role", help='"item" argument as a role, ignoring "command" argument',
action="store_true", default=False)
parser.add_argument("-s", "--script-command", help="make a script on all instances with a SCRIPT_COMMAND")
parser.add_argument("-y", "--yes", help="launch ansible without prompt (not in interactive) ",
action="store_true", default=False)
self.args = parser.parse_args()
if not (self.args.interactive or self.args.host or self.args.script_command):
parser.print_usage()
exit(1)
# top level
def run(self):
"""
main
:return:
"""
self.parse_args()
if self.args.script_command:
self.script_command()
exit(0)
self.make_playbook()
if self.args.at_time:
self.schedule_at()
if self.run_playbook:
rc = self.launch_playbook()
self.neutralize_playbook()
if rc == 0:
print("success")
else:
print("playbook failed with return code %d" % rc)
exit(rc)
if __name__ == '__main__':
Play().run()
ansible-paquerette-dev/pop.py 0000775 0000000 0000000 00000002322 14154403771 0016604 0 ustar 00root root 0000000 0000000 #! /usr/bin/env python3
"""
25/01/2020 Jerome Marchini
paquerette.eu
"""
import sys
import os
import argparse
sys.tracebacklimit = 0
class Pop:
def __init__(self):
self.args = None
def pop(self):
rc = 0
if not os.path.exists(self.args.stack):
return rc
with open(self.args.stack, 'r') as stack:
cmd = stack.readline().rstrip('\n')
remain = stack.readlines()
if len(remain) > 0:
with open(self.args.stack, "w") as stack:
stack.writelines(remain)
else:
os.unlink(self.args.stack)
if len(cmd) > 0:
rc = os.system(cmd)
return rc
def parse_args(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.description = \
"run first line of a script and pop it\n" + \
"and delete the script when empty"
parser.add_argument("stack", help='stack file of commands to pop and execute')
self.args = parser.parse_args()
def run(self):
self.parse_args()
exit(self.pop())
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
Pop().run()
ansible-paquerette-dev/project_installation/ 0000775 0000000 0000000 00000000000 14154403771 0021661 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/project_installation/readme.md 0000664 0000000 0000000 00000005052 14154403771 0023442 0 ustar 00root root 0000000 0000000 # Project using paquerette ansible
How to use paquerette ansible roles and utility in your own ansible role deployement
## installation of new project
clone ansible-paquerette project
git clone git@git.paquerette.eu:paquerette/infrastructure/ansible-paquerette.git
* create a project directory
* create an `inventory`subdirectory
* create a `inventory/group_vars` subdirectory
* create a `inventory/group_vars/base_server` subdirectory
* create a `inventory/group_vars/secret` subdirectory
* create a `inventory/host_vars` subdirectory
* create a `roles`subdirectory
* create an `sshkeys/backup_master`subdirectory
* create an `inventory/hosts` file
* copy the following files
* paquerette_utils.conf.yml to project directory
* play.py to project directory
* doc/base_server_template.yml to project/inventory/group_vars/base_server.yml
## Manage ssh keys
* Create ssh keys
* admin keys in `/sshkeys` directory
* backup keys named backup_ras in `/sshkeys/backup_master` directory
Update inventory/group_vars/base_server/baser_server.yml **master_backup_key_path** with the proper path
## configure ansible
Add ansible.cfg in `` directory and configure **role_path** and **inventory** path
```ini
[defaults]
roles_path = /roles:/ansible-paquerette/roles:/etc/ansible/roles
inventory = /inventory
```
## using hetzner
* Add admin key to Hetzner
Create hezner server
cd ../ansible-paquerette/
ansible-playbook ./roles/hetzner/hcloud_create_server.yml -e "hcloud_token=iJZZ6FhybUQYvDxGg3iccXzvNBykrIZbhTxi5TKzFpDqQ9lvHHfEUQA1d8UzsSLN server_name=labsrv ssh_key=hetzneradminkey@jlebleu"
cd ../deploy/
## adding a new host
* create/update local paquerette_utils.conf.yml with the new host
* Update hosts with server name and ip address
* Create host_vars/server_name.yml
```yml
admin_key: ssh-rsa AAAAB3NzaC1.........IWwOkQ272PEb8FsR6/UhAhf1OSc9.....
```
### Secure ssh
```bash
./play.py -r labsrv base_secure_ssh
```
Create file inventory/group_vars/secret/secret.yml and update smtp variables
```yml
base_postmaster: "postmaster@mydomain.fr"
smtp_shortfrom: "postmaster"
smtp_domain: "mydomain.fr"
smtp_authtype: "PLAIN"
smtp_secure_starttls: "STARTTLS"
smtp_tlsv1: "tlsv1"
smtp_use_tls: "True"
smtp_secure: "tls"
smtp_auth: "1"
smtp_host: "smtp.domain.net"
smtp_port: "587"
smtp_user: "postmaster@mydomain.fr"
smtp_password: "dsfsdfsdfsdfsdf"
```
Start the base server role :
```bash
./play.py -r labsrv base_server
```
ansible-paquerette-dev/roles/ 0000775 0000000 0000000 00000000000 14154403771 0016556 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup/ 0000775 0000000 0000000 00000000000 14154403771 0021022 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup/README.md 0000664 0000000 0000000 00000000221 14154403771 0022274 0 ustar 00root root 0000000 0000000 # backup task for application and database
uses [backupninja](https://0xacab.org/riseuplabs/backupninja)
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_app_backup/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022147 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup/tasks/install.yml 0000664 0000000 0000000 00000002413 14154403771 0024340 0 ustar 00root root 0000000 0000000 ---
- name: "template for backup - no database"
template:
src: app_backup_no_database.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
mode: 0640
when: database_type == "None"
- name: "template for backup - mysql"
template:
src: app_backup_mysql.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
mode: 0640
when: database_type == "mysql"
- name: "template for backup - postgres"
template:
src: app_backup_postgres.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
mode: 0640
when: database_type == "postgres"
- name: "template for backup - mongodb"
template:
src: app_backup_mongodb.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
mode: 0640
when: database_type == "mongodb"
- name: "template for backup - snap_wekan"
template:
src: app_backup_snap_wekan.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
mode: 0640
when: database_type == "snap_wekan"
- name: "template for backup - postgres docker"
template:
src: app_backup_postgres_docker.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
mode: 0640
when: database_type == "postgres_docker"
ansible-paquerette-dev/roles/_app_backup/tasks/main.yml 0000664 0000000 0000000 00000000307 14154403771 0023616 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
#- import_tasks: upgrade.yml
# when: app_run == 'upgrade'
- import_tasks: uninstall.yml
when: app_run == 'uninstall'
ansible-paquerette-dev/roles/_app_backup/tasks/uninstall.yml 0000664 0000000 0000000 00000000201 14154403771 0024674 0 ustar 00root root 0000000 0000000 ---
- name: "remove backup task"
file:
path: "{{ backup_item_dir }}/20-{{ app_instance_id }}.sh"
state: absent
ansible-paquerette-dev/roles/_app_backup/templates/ 0000775 0000000 0000000 00000000000 14154403771 0023020 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup/templates/app_backup_mongodb.j2 0000664 0000000 0000000 00000001067 14154403771 0027073 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
rsync -aAx --del {{ app_instance_root }} {{ backup_prod_dir }}/{{ app_instance_id }}
rc=$?; if [[ $rc != 0 ]]; then error "app" ; fi
cd {{ backup_prod_dir }}/{{ app_instance_id }}
rm {{ backup_prod_dir }}/{{ app_instance_id }}/{{ database_name }}.dump
chmod 777 {{ backup_prod_dir }}/{{ app_instance_id }}
/usr/bin/mongodump -d {{ database_name }} --archive > {{ database_name }}.dump
rc=$?; if [[ $rc != 0 ]]; then fatal "mongodump" ; fi ansible-paquerette-dev/roles/_app_backup/templates/app_backup_mysql.j2 0000664 0000000 0000000 00000000773 14154403771 0026616 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
rsync -aAx --del {{ app_instance_root }} {{ backup_prod_dir }}/{{ app_instance_id }}
rc=$?; if [[ $rc != 0 ]]; then error "app" ; fi
cd {{ backup_prod_dir }}/{{ app_instance_id }}
rm {{ backup_prod_dir }}/{{ app_instance_id }}/{{ database_name }}.sql
/usr/bin/mysqldump --opt --force {{ database_name }} > {{ database_name }}.sql
rc=$?; if [[ $rc != 0 ]]; then fatal "sql" ; fi
ansible-paquerette-dev/roles/_app_backup/templates/app_backup_no_database.j2 0000664 0000000 0000000 00000000405 14154403771 0027701 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
rsync -aAx --del {{ app_instance_root }} {{ backup_prod_dir }}/{{ app_instance_id }}
rc=$?; if [[ $rc != 0 ]]; then error "app" ; fi
ansible-paquerette-dev/roles/_app_backup/templates/app_backup_postgres.j2 0000664 0000000 0000000 00000001061 14154403771 0027306 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
rsync -aAx --del {{ app_instance_root }} {{ backup_prod_dir }}/{{ app_instance_id }}
rc=$?; if [[ $rc != 0 ]]; then error "app" ; fi
cd {{ backup_prod_dir }}/{{ app_instance_id }}
rm {{ backup_prod_dir }}/{{ app_instance_id }}/{{ database_name }}.sql
chmod 777 {{ backup_prod_dir }}/{{ app_instance_id }}
su -c "/usr/bin/pg_dump {{ database_name }} > {{ database_name }}.sql" postgres
rc=$?; if [[ $rc != 0 ]]; then fatal "sql" ; fi ansible-paquerette-dev/roles/_app_backup/templates/app_backup_postgres_docker.j2 0000664 0000000 0000000 00000001211 14154403771 0030632 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
cd {{ backup_prod_dir }}/{{ app_instance_id }}
rm {{ backup_prod_dir }}/{{ app_instance_id }}/{{ database_docker_name }}.sql
chmod 777 {{ backup_prod_dir }}/{{ app_instance_id }}
/usr/bin/docker run --rm --link {{ database_docker_name }}:db --net {{ database_docker_network }} -v {{ backup_prod_dir }}/{{ app_instance_id }}:/backup -e PGPASSWORD={{ database_password }} {{ postgres_docker_image }} pg_dumpall -h db -U postgres -f /backup/{{ database_docker_name }}_all.sql
rc=$?; if [[ $rc != 0 ]]; then fatal "sql" ; fi ansible-paquerette-dev/roles/_app_backup/templates/app_backup_snap_wekan.j2 0000664 0000000 0000000 00000000671 14154403771 0027574 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
mkdir {{ backup_prod_dir }}/wekan
snap stop wekan.wekan
version=$(snap list | grep wekan | awk -F ' ' '{print $3}')
/snap/wekan/$version/bin/mongodump --archive --port 27019 > {{ backup_prod_dir }}/wekan/wekan_database_$version.dump
rc=$?; if [[ $rc != 0 ]]; then fatal "mongodump" ; fi
snap start wekan.wekan
ansible-paquerette-dev/roles/_app_backup_data/ 0000775 0000000 0000000 00000000000 14154403771 0022013 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup_data/README.md 0000664 0000000 0000000 00000000035 14154403771 0023270 0 ustar 00root root 0000000 0000000 # Backup task for Apps Data
ansible-paquerette-dev/roles/_app_backup_data/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0023140 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup_data/tasks/install.yml 0000664 0000000 0000000 00000000336 14154403771 0025333 0 ustar 00root root 0000000 0000000 ---
- name: "template for backup - no database"
template:
src: app_backup_data.j2
dest: "{{ backup_item_dir }}/20-{{ app_instance_id }}-data.sh"
mode: 0640
when: app_backup_data == "yes"
ansible-paquerette-dev/roles/_app_backup_data/tasks/main.yml 0000664 0000000 0000000 00000000307 14154403771 0024607 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
#- import_tasks: upgrade.yml
# when: app_run == 'upgrade'
- import_tasks: uninstall.yml
when: app_run == 'uninstall'
ansible-paquerette-dev/roles/_app_backup_data/tasks/uninstall.yml 0000664 0000000 0000000 00000000206 14154403771 0025672 0 ustar 00root root 0000000 0000000 ---
- name: "remove backup task"
file:
path: "{{ backup_item_dir }}/20-{{ app_instance_id }}-data.sh"
state: absent
ansible-paquerette-dev/roles/_app_backup_data/templates/ 0000775 0000000 0000000 00000000000 14154403771 0024011 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup_data/templates/app_backup_data.j2 0000664 0000000 0000000 00000000401 14154403771 0027337 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
rsync -aAx --del {{ app_data }} {{ backup_prod_dir }}/{{ app_instance_id }}.data
rc=$?; if [[ $rc != 0 ]]; then error "app" ; fi
ansible-paquerette-dev/roles/_app_backup_instance/ 0000775 0000000 0000000 00000000000 14154403771 0022706 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup_instance/README.md 0000664 0000000 0000000 00000000606 14154403771 0024167 0 ustar 00root root 0000000 0000000 # backup instance while upgrade (changing the version of an application)
- vars:
- **app_old_version**: name of the save point
- **backup_version_dir**: place where all is backed up
applied only on upgrade
these backups are not included and exported with production backup
**NOTE**: if the version backup directory exists, it is not clobbered
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_app_backup_instance/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0024033 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_backup_instance/tasks/main.yml 0000664 0000000 0000000 00000006277 14154403771 0025516 0 ustar 00root root 0000000 0000000 ---
- name: "create dir for backup current version {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}"
file:
path: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}"
state: directory
mode: 0777
- name: "create {{ rev_proxy }} dir for backup current version {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}"
file:
path: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/{{ rev_proxy }}"
state: directory
mode: 0777
- name: "create app dir for backup current version {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}"
file:
path: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/app"
state: directory
mode: 0777
- name: "create sql dir for backup current version {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}"
file:
path: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/sql"
state: directory
mode: 0777
when: database_type == "mysql" or database_type == "postgres"
- name: "create database dir for backup current version {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}"
file:
path: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/database"
state: directory
mode: 0777
when: database_type == "mongodb"
- name: "backup {{ rev_proxy }} config"
command: "cp /etc/{{ rev_proxy }}/sites-available/{{ app_instance_id }}.conf {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/{{ rev_proxy }}"
args:
creates: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/{{ rev_proxy }}/{{ app_instance_id }}.conf"
- name: "backup app"
command: "rsync -aAx --del {{ app_instance_root }} {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/app"
args:
creates: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/app/{{ app_instance_id }}"
- name: "backup postgres database"
shell: "/usr/bin/pg_dump {{ database_name }} > {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/sql/{{ database_name }}.sql"
become_user: "postgres"
vars:
ansible_ssh_pipelining: true
args:
creates: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/sql/{{ database_name }}.sql"
when: database_type == "postgres"
- name: "backup mysql/mariadb database"
shell: "/usr/bin/mysqldump --opt {{ database_name }} > {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/sql/{{ database_name }}.sql"
args:
creates: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/sql/{{ database_name }}.sql"
when: database_type == "mysql"
- name: "backup mongodb database"
shell: "/usr/bin/mongodump -d {{ database_name }} --archive > {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/database/{{ database_name }}.dump"
vars:
ansible_ssh_pipelining: true
args:
creates: "{{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version }}/database/{{ database_name }}.dump"
when: database_type == "mongodb"
ansible-paquerette-dev/roles/_app_log_inventory/ 0000775 0000000 0000000 00000000000 14154403771 0022453 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_log_inventory/README.md 0000664 0000000 0000000 00000001074 14154403771 0023734 0 ustar 00root root 0000000 0000000 # backup task for application and database
- vars:
- **log_type**: install / reinstall / upgrade / uninstall / server
- app_instance_to_uninstall: mandatory for uninstall, il non defined, assertion fails
to avoid collision on installation for instances:
if app_instance_id is found in inventory and app_run is install then assertion fails
if app_instance_id is not found in inventory and app_run is reinstall then assertion fails
to confirm uninstall, \ must be set with \ value
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_app_log_inventory/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0024262 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_log_inventory/defaults/main.yml 0000664 0000000 0000000 00000000125 14154403771 0025727 0 ustar 00root root 0000000 0000000 ---
# defaults values
# app_domain: "None"
app_instance_to_uninstall: "None"
ansible-paquerette-dev/roles/_app_log_inventory/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0023600 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_log_inventory/tasks/install.yml 0000664 0000000 0000000 00000003434 14154403771 0025775 0 ustar 00root root 0000000 0000000 ---
- name: "check {{ app_instance_id }} in the inventory"
shell: grep '^"{{ app_instance_id }}"' {{ base_prod_ansible_log }}/inventory | wc -l
register: in_inventory
changed_when: False
when: log_type == "install" or log_type == "reinstall"
- name: "Assert inventory and installation or reinstallation for instance_id = {{ app_instance_id }}"
assert:
that:
- (in_inventory.stdout != "0" and app_run == "reinstall") or (in_inventory.stdout == "0" and app_run == "install")
msg: "{{ app_instance_id }}: found {{ in_inventory.stdout }} times in inventory, app_run : {{ app_run }} \
-- if found then use reinstall, else use install --"
when: log_type == "install" or log_type == "reinstall"
- name: "template log inventory - install"
template:
src: app_log.j2
dest: "{{ base_prod_ansible_log }}/{{ app_instance_id }}.log"
mode: 0600
when: log_type == "install"
- name: "template log inventory - upgrade"
template:
src: app_log_upg.j2
dest: "{{ base_prod_ansible_log }}/{{ app_instance_id }}.{{ app_version }}.log"
mode: 0600
when: log_type == "upgrade"
- name: "inventory for app"
lineinfile:
path: "{{ base_prod_ansible_log }}/inventory"
regexp: '^"{{ app_instance_id }}"'
line: '"{{ app_instance_id }}","{{ app_domain | default("") }}","{{ app_program }}","{{ app_version | default("") }}"'
state: present
create: yes
mode: 0600
when: log_type != "server"
- name: "inventory for server"
lineinfile:
path: "{{ base_prod_ansible_log }}/inventory"
regexp: '^"{{ server_name }}"'
line: '"{{ server_name }}","{{ server_domain | default("") }}","",""'
state: present
create: yes
mode: 0600
when: log_type == "server"
ansible-paquerette-dev/roles/_app_log_inventory/tasks/main.yml 0000664 0000000 0000000 00000000325 14154403771 0025247 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: log_type in ['install', 'reinstall', 'upgrade']
#- import_tasks: install.yml
# when: log_type == 'upgrade'
- import_tasks: uninstall.yml
when: log_type == 'uninstall'
ansible-paquerette-dev/roles/_app_log_inventory/tasks/uninstall.yml 0000664 0000000 0000000 00000003464 14154403771 0026343 0 ustar 00root root 0000000 0000000 ---
- name: "check {{ app_instance_id }} in the inventory"
shell: grep '^"{{ app_instance_id }}"' {{ base_prod_ansible_log }}/inventory | wc -l
register: in_inventory
changed_when: False
- name: "Assert {{ app_instance_id }} is in inventory"
assert:
that:
- (in_inventory.stdout != "0" and app_run == "uninstall")
msg: "{{ app_instance_id }} not found times in inventory -
if fails (relaunch uninstall after first failure), use extravar: -e 'no_assert_in_inventory=yes'"
when: not ( no_assert_in_inventory is defined and no_assert_in_inventory == 'yes' )
- name: "Assert {{ app_instance_to_uninstall }} == {{ app_instance_id }}"
assert:
that:
- app_instance_to_uninstall == app_instance_id
msg: " app_instance_to_uninstall must be set with {{ app_instance_id }} -
if fails, use extravar: -e 'app_instance_to_uninstall={{ app_instance_id }}'"
- name: "remove install log"
file:
path: "{{ base_prod_ansible_log }}/{{ app_instance_id }}.log"
state: absent
- name: "remove upgrade logs"
shell: "rm {{ base_prod_ansible_log }}/{{ app_instance_id }}.*"
args:
warn: no
failed_when: False
changed_when: False
- name: "remove from inventory - application"
lineinfile:
path: "{{ base_prod_ansible_log }}/inventory"
regexp: '^"{{ app_instance_id }}"'
line: '"{{ app_instance_id }}","{{ app_domain | default("") }}","{{ app_program }}","{{ app_version | default("") }}"'
state: absent
when: log_type != "server"
- name: "remove from inventory - server"
lineinfile:
path: "{{ base_prod_ansible_log }}/inventory"
regexp: '^"{{ server_name }}"'
line: '"{{ server_name }}","{{ server_domain | default("") }}","",""'
state: absent
when: log_type == "server"
ansible-paquerette-dev/roles/_app_log_inventory/templates/ 0000775 0000000 0000000 00000000000 14154403771 0024451 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_log_inventory/templates/app_log.j2 0000664 0000000 0000000 00000002057 14154403771 0026333 0 ustar 00root root 0000000 0000000 [{{ app_program }}:{{ log_type }}:{{ app_instance_id }}]
hostname = {{ ansible_hostname | mandatory }}
paquerette_name = {{ paquerette_name | mandatory }}
domain = {{ app_domain | default('None') }}
version = {{ app_version | default('None') }}
app_instance_id = {{ app_instance_id | mandatory }}
app_port = {{ app_port | default('None') }}
rev_proxy = {{ rev_proxy | default('None') }}
instance = {{ app_instance_root | default('None') }}
instance_www_root = {{ app_instance_www_root | default('None') }}
data = {{ app_data | default('None') }}
log_dest = {{ www_log | mandatory }}/{{ app_instance_id }}
backup_prod_app = {{ backup_prod_dir }}/{{ app_instance_id }}
backup_data = {{ backup_prod_dir }}/{{ app_data | default('None') }}
database_type = {{ database_type | default('None') }}
database_name = {{ database_name | default('None') }}
database_user = {{ database_user | default('None') }}
database_password = {{ database_password | default('None') }}
monit_request = {{ monit_request | default('None') }}
monit_expect = {{ monit_expect | default('None') }}
ansible-paquerette-dev/roles/_app_log_inventory/templates/app_log_upg.j2 0000664 0000000 0000000 00000001075 14154403771 0027205 0 ustar 00root root 0000000 0000000 [{{ app_program | mandatory }}:{{ log_type }}:{{ app_instance_id }}]
hostname = {{ ansible_hostname | mandatory }}
domain = {{ app_domain | default('None') }}
app_instance_id = {{ app_instance_id | mandatory }}
rev_proxy = {{ rev_proxy | default('None') }}
old_version = {{ app_old_version | default('None') }}
version = {{ app_version | default('None') }}
version_backup = {{ backup_version_dir }}/{{ app_instance_id }}/{{ app_old_version | default('None') }}
database_type = {{ database_type | default('None') }}
database_name = {{ database_name | default('None') }}
ansible-paquerette-dev/roles/_app_logrotate/ 0000775 0000000 0000000 00000000000 14154403771 0021555 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_logrotate/README.md 0000664 0000000 0000000 00000000153 14154403771 0023033 0 ustar 00root root 0000000 0000000 # log rotation for access to application, keeping logs for one year
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_app_logrotate/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022702 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_logrotate/tasks/main.yml 0000664 0000000 0000000 00000001357 14154403771 0024357 0 ustar 00root root 0000000 0000000 ---
- name: "log dest {{ www_log }}/{{ app_instance_id }}"
file:
state: directory
path: "{{ www_log }}/{{ app_instance_id }}"
when: app_run in ['install', 'reinstall']
- name: "logrotate for {{ www_log }}/{{ app_instance_id }}"
template:
src: "app_logrotate_{{ rev_proxy }}.j2"
dest: "/etc/logrotate.d/{{ app_instance_id }}"
mode: 0644
when: app_run in ['install', 'reinstall']
- name: "remove logrotate configuration"
file:
state: absent
path: "/etc/logrotate.d/{{ app_instance_id }}"
when: app_run == 'uninstall'
- name: "remove logs"
file:
state: absent
path: "{{ www_log }}/{{ app_instance_id }}"
failed_when: False
when: app_run == 'uninstall'
ansible-paquerette-dev/roles/_app_logrotate/templates/ 0000775 0000000 0000000 00000000000 14154403771 0023553 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_logrotate/templates/app_logrotate_apache2.j2 0000664 0000000 0000000 00000001127 14154403771 0030234 0 ustar 00root root 0000000 0000000 {{ www_log | mandatory }}/{{ app_instance_id }}/*.log {
weekly
missingok
rotate 54
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
prerotate
if [ -d /etc/logrotate.d/httpd-prerotate ]; then \
run-parts /etc/logrotate.d/httpd-prerotate; \
fi \
endscript
postrotate
if /etc/init.d/apache2 status > /dev/null ; then \
/etc/init.d/apache2 reload > /dev/null; \
fi;
endscript
}
ansible-paquerette-dev/roles/_app_logrotate/templates/app_logrotate_nginx.j2 0000664 0000000 0000000 00000000776 14154403771 0030065 0 ustar 00root root 0000000 0000000 {{ www_log | mandatory }}/{{ app_instance_id }}/*.log {
weekly
missingok
rotate 54
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
prerotate
if [ -d /etc/logrotate.d/httpd-prerotate ]; then \
run-parts /etc/logrotate.d/httpd-prerotate; \
fi \
endscript
postrotate
invoke-rc.d nginx rotate >/dev/null 2>&1
endscript
}
ansible-paquerette-dev/roles/_app_monit/ 0000775 0000000 0000000 00000000000 14154403771 0020703 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_monit/README.md 0000664 0000000 0000000 00000000521 14154403771 0022160 0 ustar 00root root 0000000 0000000 # monitoring of an application
- vars:
- monit_request: http request, by default : **app_domain**
- monit_expect: expected string expression in http response (regular expression)
- monit_timeout : **3**
uses
- [monit](https://mmonit.com/documentation/)
- http_check utility (see templates)
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_app_monit/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0022512 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_monit/defaults/main.yml 0000664 0000000 0000000 00000000416 14154403771 0024162 0 ustar 00root root 0000000 0000000 ---
# request, timeout and expected response for monitoring applications
monit_request: "https://{{ app_domain }}"
# regular expression pattern expected, optional
monit_expect: ""
# default timeout
monit_timeout: 3
# default status 200
monit_status: ""
ansible-paquerette-dev/roles/_app_monit/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022030 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_monit/tasks/install.yml 0000664 0000000 0000000 00000001176 14154403771 0024226 0 ustar 00root root 0000000 0000000 ---
- name: "ensure presence of {{ base_prod_options }}/http_check/"
file:
path: "{{ base_prod_options }}/http_check/"
state: directory
- name: "http check facility {{ base_prod_options }}/http_check//http_check.py"
template:
src: "http_check.py.j2"
dest: "{{ base_prod_options }}/http_check/http_check.py"
mode: "755"
- name: "monitoring configuration for {{ app_domain }}"
template:
src: "app_monit.j2"
dest: "/etc/monit/conf.d/{{ app_instance_id }}.conf"
register: monit_conf
- name: reload monit
service: name=monit state=reloaded
when: monit_conf.changed
ansible-paquerette-dev/roles/_app_monit/tasks/main.yml 0000664 0000000 0000000 00000000261 14154403771 0023476 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
tags: _app_monit
- import_tasks: uninstall.yml
when: app_run == 'uninstall'
tags: _app_monit
ansible-paquerette-dev/roles/_app_monit/tasks/uninstall.yml 0000664 0000000 0000000 00000000424 14154403771 0024564 0 ustar 00root root 0000000 0000000 ---
- name: "remove monitoring configuration for {{ app_domain }}"
file:
path: "/etc/monit/conf.d/{{ app_instance_id }}.conf"
state: absent
register: monit_conf
- name: reload monit
service: name=monit state=reloaded
when: monit_conf.changed
ansible-paquerette-dev/roles/_app_monit/templates/ 0000775 0000000 0000000 00000000000 14154403771 0022701 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_monit/templates/app_monit.j2 0000664 0000000 0000000 00000000600 14154403771 0025120 0 ustar 00root root 0000000 0000000 check program {{ app_domain }}
# by default timeout = 3 seconds, use monit_timeout variable to change it
with path "{{ base_prod_options }}/http_check/http_check.py -t {{ monit_timeout }} {% if monit_expect %}-c {{ monit_expect | quote }}{% endif %} {% if monit_status %}-S {{ monit_status | quote }}{% endif %} {{ monit_request | quote }}"
if status != 0 for 3 cycles then alert
ansible-paquerette-dev/roles/_app_monit/templates/http_check.py.j2 0000775 0000000 0000000 00000003740 14154403771 0025710 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import argparse
import requests
import re
default_timeout = 1.0
user_agent = 'Monit'
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.description = \
'Check url status code is 200 and optionally content. Exit 0 when success, else 1'
arg_parser.add_argument("url",
help='Url to check, must return status code 200')
arg_parser.add_argument("-c", "--contains",
help="Search pattern in http response")
arg_parser.add_argument("-o", "--output-response",
help="Print http response in stdout.",
action="store_true", default=False)
arg_parser.add_argument("-s", "--silent",
help="Silent mode, no errors, no output",
action="store_true", default=False)
arg_parser.add_argument("-S", "--status",
help="Expected status, if not 200")
arg_parser.add_argument("-t", "--timeout",
help="Timeout for request in seconds, default %s" % str(default_timeout))
args = arg_parser.parse_args()
r = re.compile(args.contains) if args.contains else None
request_timeout = float(args.timeout) if args.timeout else default_timeout
headers = {'User-Agent': user_agent}
try:
response = requests.get(args.url, headers=headers, timeout=request_timeout)
expected_status = args.status if args.status else '200'
if str(response.status_code) != expected_status:
raise Exception("HTTP status code %s" % response.status_code)
if args.output_response:
print(response.text)
if r and not re.search(r, response.text):
raise Exception('Pattern not found : "%s"' % args.contains)
except Exception as E:
if not args.silent:
print(E)
exit(1)
if not (args.silent or args.output_response):
print('ok')
ansible-paquerette-dev/roles/_app_restore_instance/ 0000775 0000000 0000000 00000000000 14154403771 0023124 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_restore_instance/README.md 0000664 0000000 0000000 00000003103 14154403771 0024400 0 ustar 00root root 0000000 0000000 # restore application files and database for the instance
*list*: list tar archive files containing instance, the result is usefull for restore, recover or transfer
*recover*: fetch locally (on the controller) an archive of the instance from **restore_source**
*transfer*: copy an archive of the instance from host/**restore_source** to **transfer_dest** (same format as recover)
*restore*: restore application files and database from "host / **restore_source**"
- vars:
- **app_instance_id**
- **restore_action**: list / recover / transfer / restore
mandatory for list action :
if listing on master backup host, use **app_host** to specify the instance's host
mandatory for restore, transfer and recover actions :
- **restore_source**, the archive file location. use *list* to list all sources available on host or master backup host
optional for restore action :
- from_instance_id: restores files and database with files and database from a different instance
- from_full_archive: **yes**/no change to "no" if the archive contains only the instance (after recover or transfer). The "full archive" contains all host's instances.
- from_version_backup: yes/**no** set to "yes" to restore from unarchived directory created by [automatic backup](../_app_backup_instance/README.md) when upgrading the app
- from_directory: yes/**no** set to yes to read from restore_source without the archive extraction step
- restore_app_file: **yes**/no set to yes to restore also application files (Not the data i.e. nextcloud)
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_app_restore_instance/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0024733 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_restore_instance/defaults/main.yml 0000664 0000000 0000000 00000001071 14154403771 0026401 0 ustar 00root root 0000000 0000000 ---
# by default for host full archive file
instance_depth_in_archive: "4"
# set to no when extracting from an archive that contains only the instance
from_full_archive: "yes"
# set to yes when extracting from a version backup, overrides from_directory
from_version_backup: "no"
# set to yes to read from restore_source without the archive extraction step
from_directory: "no"
# set to yes if you want to restore application files
restore_app_file: "yes"
database_name: "{{ app_instance_id }}_db"
database_user: "{{ app_instance_id }}_usr"
ansible-paquerette-dev/roles/_app_restore_instance/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0024251 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_app_restore_instance/tasks/main.yml 0000664 0000000 0000000 00000023323 14154403771 0025723 0 ustar 00root root 0000000 0000000 ---
## LIST ARCHIVES AND DIRECTORIES CONTAINING INSTANCE BACKUP
- block:
- name : "listing old versions on host for {{ app_instance_id }}"
shell: "ls {{ backup_version_dir }}/{{ app_instance_id }}"
failed_when: False
register: version_dir_out
- name : "listing archives on host for {{ backup_base_dir }}"
shell: "ls {{ backup_base_dir }}/*.tar.gz"
register: dir_out
- name: "searching for {{ app_instance_id }} in archives"
shell: "tar --list --file {{ archive_name }} | grep {{ backup_prod_dir[1:] }}/{{ app_instance_id }}/ -m 1 | wc -l"
args:
warn: False
loop: "{{ dir_out.stdout_lines }}"
loop_control:
loop_var: archive_name
register: archive_instance
- name: "see if {{ backup_master_slaves_location }}/{{ app_host }} exists (master backup)"
stat:
path: "{{ backup_master_slaves_location }}/{{ app_host }}"
register: slaves_backup
when: app_host is defined
when: restore_action == 'list'
- block:
- name : "list archives on master for {{ app_host }}"
shell: "ls {{ backup_master_slaves_location }}/{{ app_host }}/*.tar.gz"
failed_when: False
register: dir_out
- name: "Searching for {{ app_instance_id }} in archives"
shell: "tar --list --file {{ archive_name }} | grep {{ backup_prod_dir[1:] }}/{{ app_instance_id }}/ -m 1 | wc -l"
args:
warn: False
loop: "{{ dir_out.stdout_lines }}"
loop_control:
loop_var: archive_name
register: archive_instance_master
when: restore_action == 'list' and app_host is defined and slaves_backup.stat.exists
## PRINTING RESULT
- block:
- name: " Old versions in {{ backup_version_dir }}/{{ app_instance_id }}"
debug:
msg: '{{ version_dir_out.stdout_lines }}'
- name: " Archives containing {{ app_instance_id }}"
debug:
msg: '{% for item in archive_instance.results %}{% if item.stdout == "1" %}{{ item.archive_name }} {% endif %}{% endfor %}'
- name: " Archives is 'slaves' directory containing {{ app_instance_id }} for {{ app_host }} "
debug:
msg: '{% for item in archive_instance_master.results %}{% if item.stdout == "1" %}{{ item.archive_name }} {% endif %}{% endfor %}'
when: app_host is defined and slaves_backup.stat.exists
- name: "End"
meta: end_play
when: restore_action == 'list'
## EXTRACT OR COPY FOR RESTORE, TRANSFER OR RECOVER
- block:
- name: "clean up tmp dir for extraction or copy"
file:
path: "/tmp/restore_{{ app_instance_id }}/"
state: absent
- name: "setup tmp dir for extraction or copy"
file:
path: "/tmp/restore_{{ app_instance_id }}/"
state: directory
- name: "Extract {{ from_instance_id | default(app_instance_id) }} from {{ restore_source }} as full archive into /tmp"
unarchive:
src: "{{ restore_source }}"
dest: "/tmp/restore_{{ app_instance_id }}/"
remote_src: True
extra_opts:
- "{{ backup_prod_dir[1:] }}/{{ from_instance_id | default(app_instance_id) }}"
- "--strip-components={{ instance_depth_in_archive }}"
when: from_full_archive == "yes" and from_version_backup == "no" and from_directory == "no"
- name: "Extract {{ from_instance_id | default(app_instance_id) }} from {{ restore_source }} as simple archive into /tmp"
unarchive:
src: "{{ restore_source }}"
dest: "/tmp/restore_{{ app_instance_id }}/"
remote_src: True
when: from_full_archive == "no" and from_version_backup == "no" and from_directory == "no"
- name: "setup app dir for copy"
file:
path: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}"
state: directory
when: from_version_backup == "yes" or from_directory == "yes"
- name: "copy application files from version backup"
command: "rsync -a {{ restore_source }}/app/{{ from_instance_id | default(app_instance_id) }}/ /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ app_instance_id }}/"
when: from_version_backup == "yes"
- name: "copy sql database files from version backup"
shell: "cp -p {{ restore_source }}/sql/* /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/"
failed_when: false
when: from_version_backup == "yes"
- name: "copy other database files from version backup"
shell: "cp -p {{ restore_source }}/database/* /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/"
failed_when: false
when: from_version_backup == "yes"
- name: "copy application files from source directory"
command: "rsync -ax {{ restore_source }}/ /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/"
when: from_version_backup == "no" and from_directory == "yes"
- name: "make archive for recover or transfer"
archive:
path: "/tmp/restore_{{ app_instance_id }}/{{ from_instance_id | default(app_instance_id) }}"
dest: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}.tar.gz"
when: restore_action == 'recover' or restore_action == 'transfer'
- name: "recover archive to local directory"
fetch:
src: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}.tar.gz"
dest: "./"
flat: yes
when: restore_action == 'recover'
- name: "transfer archive to {{ transfer_dest }}"
copy:
remote_src: True
src: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}.tar.gz"
dest: "{{ transfer_dest }}"
when: restore_action == 'transfer'
when: restore_action == 'recover' or restore_action == 'restore' or restore_action == 'transfer'
## RESTORATION
- block:
- name: "check {{ app_instance_id }} in the inventory"
shell: grep '^"{{ app_instance_id }}"' {{ base_prod_ansible_log }}/inventory | wc -l
register: in_inventory
changed_when: False
- name: "Assert {{ app_instance_id }} is in inventory before restoration"
assert:
that:
- in_inventory.stdout != "0"
msg: "{{ app_instance_id }} not found in inventory"
- name: "Rename backup directory"
command: "mv /tmp/restore_{{ app_instance_id }}/{{ from_instance_id }} /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}"
failed_when: False
when: from_instance_id is defined
- name: "Rename database dump - sql"
shell: "mv /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/*.sql /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.sql"
failed_when: False
when: from_instance_id is defined
- name: "Rename database dump - mongodb"
shell: "mv /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/*.dump /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.dump"
failed_when: False
when: from_instance_id is defined
- name: "restore application files"
command: "rsync -ax --del /tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ from_instance_id | default(app_instance_id) }}/ {{ app_instance_root }}/"
when: restore_app_file == 'yes'
- name: "chown/grp {{ app_user }} / {{ app_group }}"
file:
dest: "{{ app_instance_root }}"
owner: "{{ app_user }}"
group: "{{ app_group }}"
recurse: True
when: app_user is defined and restore_app_file == 'yes'
- name: "restore mysql/mariadb database"
mysql_db:
name: "{{ database_name }}"
encoding: "utf8"
state: import
target: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.sql"
when: database_type == "mysql"
- name: "ensure mysql/mariadb user is present"
mysql_user:
name: "{{ database_user | mandatory }}"
password: "{{ database_password | mandatory }}"
priv: '{{ database_name }}.*:ALL,GRANT'
state: present
when: database_type == "mysql"
- name: "ensure postgres user {{ database_user }} is present"
postgresql_user:
name: "{{ database_user | mandatory }}"
password: "{{ database_password | mandatory }}"
state: present
become_user: postgres
vars:
ansible_ssh_pipelining: true
when: database_type == "postgres"
- name: "drop postgres database"
postgresql_db:
name: "{{ database_name | mandatory }}"
owner: "{{ database_user | mandatory }}"
state: absent
become_user: postgres
vars:
ansible_ssh_pipelining: True
when: database_type == "postgres"
- name: "create empty postgres database"
postgresql_db:
name: "{{ database_name | mandatory }}"
owner: "{{ database_user | mandatory }}"
encoding: UTF-8
lc_collate: fr_FR.UTF-8
lc_ctype: fr_FR.UTF-8
template: template0
become_user: postgres
vars:
ansible_ssh_pipelining: True
when: database_type == "postgres"
- name: "restore postgres database"
postgresql_db:
name: "{{ database_name | mandatory }}"
owner: "{{ database_user | mandatory }}"
encoding: UTF-8
lc_collate: fr_FR.UTF-8
lc_ctype: fr_FR.UTF-8
template: template0
state: restore
target: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.sql"
become_user: postgres
vars:
ansible_ssh_pipelining: True
when: database_type == "postgres"
- name: "restore mongodb database"
shell: "/usr/bin/mongorestore --drop --archive=/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.dump"
when: database_type == "mongodb"
when: restore_action == 'restore'
- name: "remove tmp dir"
file:
path: "/tmp/restore_{{ app_instance_id }}/"
state: absent
ansible-paquerette-dev/roles/_cleanup_old_domain_name/ 0000775 0000000 0000000 00000000000 14154403771 0023531 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_cleanup_old_domain_name/README.md 0000664 0000000 0000000 00000000412 14154403771 0025005 0 ustar 00root root 0000000 0000000 # Utility role : cleanup old domain name for an application
- vars:
- **app_domain**: full old domain name for the service
- removes and revoke letsencrypt certificate for old domain
usage
requirements :
- role : None
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_cleanup_old_domain_name/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0025340 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_cleanup_old_domain_name/defaults/main.yml 0000664 0000000 0000000 00000000032 14154403771 0027002 0 ustar 00root root 0000000 0000000 ---
app_run: "uninstall" ansible-paquerette-dev/roles/_cleanup_old_domain_name/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0024656 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_cleanup_old_domain_name/tasks/main.yml 0000664 0000000 0000000 00000000074 14154403771 0026326 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _letsencrypt_certificate
ansible-paquerette-dev/roles/_create_database/ 0000775 0000000 0000000 00000000000 14154403771 0022004 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_create_database/README.md 0000664 0000000 0000000 00000000304 14154403771 0023260 0 ustar 00root root 0000000 0000000 # database and user creation
- vars:
- **database_name**
- **database_user**
- **database_password**
- **database_type**: mysql / postgres / mongodb
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_create_database/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0023613 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_create_database/defaults/main.yml 0000664 0000000 0000000 00000000135 14154403771 0025261 0 ustar 00root root 0000000 0000000 ---
database_name: "{{ app_instance_id }}_db"
database_user: "{{ app_instance_id }}_usr"
ansible-paquerette-dev/roles/_create_database/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0023131 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_create_database/tasks/install.yml 0000664 0000000 0000000 00000003262 14154403771 0025325 0 ustar 00root root 0000000 0000000 ---
- name: "Create a mysql/mariadb database with name {{ database_name }} "
mysql_db:
name: "{{ database_name | mandatory }}"
encoding: "utf8"
state: present
when: database_type == "mysql"
- name: "Create a mysql/mariadb user with all privileges on database {{ database_name }} "
mysql_user:
name: "{{ database_user | mandatory }}"
password: "{{ database_password | mandatory }}"
priv: '{{ database_name }}.*:ALL,GRANT'
state: present
when: database_type == "mysql"
- name: "create postgres user {{ database_user }}"
postgresql_user:
name: "{{ database_user | mandatory }}"
password: "{{ database_password | mandatory }}"
state: present
become_user: postgres
vars:
ansible_ssh_pipelining: true
when: database_type == "postgres"
- name: "create postgres database {{ database_name }}"
postgresql_db:
name: "{{ database_name | mandatory }}"
owner: "{{ database_user | mandatory }}"
encoding: UTF-8
lc_collate: fr_FR.UTF-8
lc_ctype: fr_FR.UTF-8
template: template0
state: present
become_user: postgres
vars:
ansible_ssh_pipelining: true
when: database_type == "postgres"
- name: "mongodb database {{ database_name }} and user"
mongodb_user:
login_user: "{{ mongodb_admin_user }}"
login_password: "{{ mongodb_admin_password }}"
database: "{{ database_name }}"
name: "{{ database_user }}"
password: "{{ database_password }}"
state: present
roles:
- { db: "admin", role: "readWrite" }
- { db: "{{ database_name }}", role: "readWrite" }
when: database_type == "mongodb"
ansible-paquerette-dev/roles/_create_database/tasks/main.yml 0000664 0000000 0000000 00000000377 14154403771 0024607 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall'] and database_type != "None"
#- import_tasks: upgrade.yml
# when: app_run == 'upgrade'
- import_tasks: uninstall.yml
when: app_run == 'uninstall' and database_type != "None"
ansible-paquerette-dev/roles/_create_database/tasks/uninstall.yml 0000664 0000000 0000000 00000002232 14154403771 0025664 0 ustar 00root root 0000000 0000000 ---
- name: drop user with all privileges on database {{ database_name }} "
mysql_user:
name: "{{ database_user | mandatory }}"
state: absent
when: database_type == "mysql"
- name: "drop mysql/mariadb database {{ database_name }} "
mysql_db:
name: "{{ database_name | mandatory }}"
state: absent
when: database_type == "mysql"
- name: "drop postgres database {{ database_name }}"
postgresql_db:
name: "{{ database_name | mandatory }}"
state: absent
become_user: postgres
vars:
ansible_ssh_pipelining: true
when: database_type == "postgres"
- name: "drop postgres user {{ database_user }}"
postgresql_user:
name: "{{ database_user | mandatory }}"
state: absent
become_user: postgres
vars:
ansible_ssh_pipelining: true
when: database_type == "postgres"
- name: "drop mongodb users on {{ database_name }}"
shell: 'mongo {{ database_name }} --eval "db.dropAllUsers()"'
when: database_type == "mongodb"
- name: "drop mongodb {{ database_name }}"
shell: 'mongo {{ database_name }} --eval "db.dropDatabase()"'
when: database_type == "mongodb"
ansible-paquerette-dev/roles/_letsencrypt_certificate/ 0000775 0000000 0000000 00000000000 14154403771 0023633 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_letsencrypt_certificate/README.md 0000664 0000000 0000000 00000000355 14154403771 0025115 0 ustar 00root root 0000000 0000000 # letsencrypt certificates management
needs to stop reverse proxy (standalone mode)
automatic renew twice per week see base_server
- vars:
- **app_domain**
uses mail address
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_letsencrypt_certificate/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0024760 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_letsencrypt_certificate/tasks/install.yml 0000664 0000000 0000000 00000001456 14154403771 0027157 0 ustar 00root root 0000000 0000000 ---
- name: "test certificate presence"
stat:
path: "{{ letsencrypt_cert_root }}/{{ app_domain | quote }}"
register: cert
- name: "stop apache2"
service: name=apache2 state=stopped
when: (rev_proxy == "apache2") and not (cert.stat.exists)
- name: "stop nginx"
service: name=nginx state=stopped
when: (rev_proxy == "nginx") and not (cert.stat.exists)
- name: "letsencrypt certificate for {{ app_domain }} {{ certbot_grant_command_standalone }}"
command: "{{ certbot_grant_command_standalone }}"
when: not (cert.stat.exists)
tags:
- letsencrypt
- name: "start apache2"
service: name=apache2 state=started
when: (rev_proxy == "apache2") and not (cert.stat.exists)
- name: "start nginx"
service: name=nginx state=started
when: (rev_proxy == "nginx") and not (cert.stat.exists)
ansible-paquerette-dev/roles/_letsencrypt_certificate/tasks/main.yml 0000664 0000000 0000000 00000000307 14154403771 0026427 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
#- import_tasks: upgrade.yml
# when: app_run == 'upgrade'
- import_tasks: uninstall.yml
when: app_run == 'uninstall'
ansible-paquerette-dev/roles/_letsencrypt_certificate/tasks/uninstall.yml 0000664 0000000 0000000 00000000403 14154403771 0027511 0 ustar 00root root 0000000 0000000 ---
- name: "test certificate presence"
stat:
path: "{{ letsencrypt_cert_root }}/{{ app_domain | quote }}"
register: cert
- name: "revoke and delete certificate for {{ app_domain }}"
command: "{{ certbot_revoke_command }}"
when: cert.stat.exists ansible-paquerette-dev/roles/_master_backup_server/ 0000775 0000000 0000000 00000000000 14154403771 0023123 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_master_backup_server/README.md 0000664 0000000 0000000 00000000447 14154403771 0024407 0 ustar 00root root 0000000 0000000 # Configuration of backup master
backup master retrieves backups from slaves
requirements :
- \
vars:
- **backup_slaves** : list of slaves
- **remove_slaves** : list of slaves to be removed (This will not remove any previous backup files)
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_master_backup_server/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0024723 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_master_backup_server/handlers/main.yml 0000664 0000000 0000000 00000000124 14154403771 0026367 0 ustar 00root root 0000000 0000000 ---
- name: reload monit master backup server
service: name=monit state=reloaded
ansible-paquerette-dev/roles/_master_backup_server/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0024250 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_master_backup_server/tasks/main.yml 0000664 0000000 0000000 00000007431 14154403771 0025724 0 ustar 00root root 0000000 0000000 ---
- name: "slaves backup location root"
file:
state: directory
path: "{{ backup_master_slaves_location }}"
- name: "slaves backup locations"
file:
path: "{{backup_master_slaves_location}}/{{ backup_slave }}"
state: directory
loop: "{{ backup_slaves | default([]) }}"
loop_control:
loop_var: backup_slave
- name: "ensure presence of /home/{{backup_master_user}}/{{ master_backup_key_path }}"
file:
path: "/home/{{backup_master_user}}/{{ master_backup_key_path }}"
owner: "{{ backup_master_user }}"
group: "{{ backup_master_user }}"
state: directory
- name: "private key"
copy:
src: "~/{{ master_backup_key_file }}"
dest: "/home/{{backup_master_user}}/{{ master_backup_key_file }}"
owner: "{{ backup_master_user }}"
group: "{{ backup_master_user }}"
mode: 0600
- name: "check known host for ssh connection"
command: "ssh-keygen -F {{ backup_slave }}"
changed_when: False
failed_when: False
loop: "{{ backup_slaves | default([]) }}"
loop_control:
loop_var: backup_slave
register: ssh_knowns
# add public key in known host for client
- name: "ssh-keyscan >> /root/.ssh/known_hosts"
shell: "ssh-keyscan {{ ssh_known.backup_slave }} >> /root/.ssh/known_hosts"
loop: "{{ ssh_knowns.results }}"
loop_control:
loop_var: ssh_known
when: ssh_known.stdout == ''
- name: "ensure presence of {{ base_prod_options }}/master_backup/"
file:
path: "{{ base_prod_options }}/master_backup/"
state: directory
- name: "template for daily backup"
template:
src: "retrieve_backups_day.j2"
dest: "{{ base_prod_options }}/master_backup/retrieve_backups_day.sh"
mode: "700"
- name: "template for monthly backup"
template:
src: "retrieve_backups_month.j2"
dest: "{{ base_prod_options }}/master_backup/retrieve_backups_month.sh"
mode: "700"
- name: "cron daily retrieving slaves backups"
cron:
name: "master daily retrieving for {{ backup_slave }}"
hour: "{{ backup_master_hour }}"
minute: "{{ backup_master_minute }}"
job: "{{ base_prod_options }}/master_backup/retrieve_backups_day.sh {{ backup_slave }}"
loop: "{{ backup_slaves | default([]) }}"
loop_control:
loop_var: backup_slave
- name: "Removing cron daily retrieving backups for {{ remove_slave }}"
cron:
name: "master daily retrieving for {{ remove_slave }}"
state: absent
loop: "{{ remove_slaves | default([]) }}"
loop_control:
loop_var: remove_slave
- name: "cron monthly retrieving slaves backups"
cron:
name: "master monthly retrieving for {{ backup_slave }}"
day: "1"
hour: "{{ backup_master_hour }}"
minute: "{{ backup_master_minute }}"
job: "{{ base_prod_options }}/master_backup/retrieve_backups_month.sh {{ backup_slave }}"
loop: "{{ backup_slaves | default([]) }}"
loop_control:
loop_var: backup_slave
- name: "Removing cron monthly retrieving slaves backups for {{ remove_slave }}"
cron:
name: "master monthly retrieving for {{ remove_slave }}"
state: absent
loop: "{{ remove_slaves | default([]) }}"
loop_control:
loop_var: remove_slave
- name: "template for monitoring {{ backup_slave }}"
template:
src: "srv_monit.j2"
dest: "/etc/monit/conf.d/{{ backup_slave }}.conf"
loop: "{{ backup_slaves | default([]) }}"
loop_control:
loop_var: backup_slave
notify: reload monit master backup server
- name: "remove monitoring for {{ remove_slaves }}"
file:
path: "/etc/monit/conf.d/{{ remove_slave }}.conf"
state: absent
loop: "{{ remove_slaves | default([]) }}"
loop_control:
loop_var: remove_slave
notify: reload monit master backup server
ansible-paquerette-dev/roles/_master_backup_server/templates/ 0000775 0000000 0000000 00000000000 14154403771 0025121 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_master_backup_server/templates/retrieve_backups_day.j2 0000664 0000000 0000000 00000001573 14154403771 0031556 0 ustar 00root root 0000000 0000000 #!/bin/bash
# random delay
sleep $[ ( $RANDOM % {{ backup_master_delay }} ) + 1 ]s
LANG=en_US.utf8
day_name=$(date "+%A")
# retrieve true date of the file
file_day=$(ssh -i /home/{{ backup_master_user }}/{{ master_backup_key_file }} {{ backup_master_user }}@$1 stat -c %y {{ backup_base_dir }}/prod.$day_name.tar.gz | cut -d ' ' -f 1)
today=$(date '+%Y-%m-%d')
# check the file is correct
if [ "$file_day" = "$today" ]
then
scp -p -i /home/{{ backup_master_user }}/{{ master_backup_key_file }} {{ backup_master_user }}@$1:{{ backup_base_dir }}/prod.$day_name.tar.gz {{ backup_master_slaves_location }}/$1/prod.$day_name.tar.gz
chown {{ backup_master_user }}:{{ backup_master_user }} {{ backup_master_slaves_location }}/$1/prod.$day_name.tar.gz
else
logger "backup file of the day not available : {{ backup_master_slaves_location }}/$1/prod.$day_name.tar.gz"
exit 1
fi
ansible-paquerette-dev/roles/_master_backup_server/templates/retrieve_backups_month.j2 0000664 0000000 0000000 00000000606 14154403771 0032122 0 ustar 00root root 0000000 0000000 #!/bin/bash
# random delay
sleep $[ ( $RANDOM % {{ backup_master_delay }} ) + 1 ]s
scp -i /home/{{ backup_master_user }}/{{ master_backup_key_file }} {{ backup_master_user }}@$1:{{ backup_base_dir }}/prod.Month.tar.gz {{ backup_master_slaves_location }}/$1/prod.Month.tar.gz
chown {{ backup_master_user }}:{{ backup_master_user }} {{ backup_master_slaves_location }}/$1/prod.Month.tar.gz
ansible-paquerette-dev/roles/_master_backup_server/templates/srv_monit.j2 0000664 0000000 0000000 00000000172 14154403771 0027376 0 ustar 00root root 0000000 0000000 check program {{ backup_slave }}
with path " /bin/ping -c 1 {{ backup_slave }}"
if status != 0 for 3 cycles then alert
ansible-paquerette-dev/roles/_python3/ 0000775 0000000 0000000 00000000000 14154403771 0020321 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_python3/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0021446 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_python3/tasks/main.yml 0000664 0000000 0000000 00000001552 14154403771 0023120 0 ustar 00root root 0000000 0000000 ---
- name: "install python3"
apt:
name: "{{ python3_package }}"
state: present
loop: "{{ python3_package_list }}"
loop_control:
loop_var: python3_package
# UPGRADE PIP CAN BREAK PIP ON XENIAL
# - name: "pip upgrade"
# shell: "pip3 install --upgrade pip"
# changed_when: false
#
- name: "install modules"
pip:
name: "{{ python3_module }}"
executable: pip3
state: present
loop: "{{ python3_module_list }}"
loop_control:
loop_var: python3_module
- name: "ensure presence of {{ base_prod_options }}/json_path/"
file:
path: "{{ base_prod_options }}/json_path/"
state: directory
- name: "http check facility {{ base_prod_options }}/json_path/json_path.py"
template:
src: "json_path.py.j2"
dest: "{{ base_prod_options }}/json_path/json_path.py"
mode: "755"
ansible-paquerette-dev/roles/_python3/templates/ 0000775 0000000 0000000 00000000000 14154403771 0022317 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_python3/templates/json_path.py.j2 0000664 0000000 0000000 00000003171 14154403771 0025172 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import argparse
import json
import sys
# exception without traceback
sys.tracebacklimit = 0
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.description = \
'Updates or print a value in a JSON file, following a path. Returns "changed" if value changes'
arg_parser.add_argument("json_file", help='Json file to touch ')
arg_parser.add_argument("json_path", help='Path to find the key to update ')
arg_parser.add_argument("value", nargs='?', help='value to assign to key, if None then prints current value')
arg_parser.add_argument("-s", "--silent",
help="Silent mode, no error if key not found",
action="store_true", default=False)
args = arg_parser.parse_args()
with open(args.json_file) as f:
opl = json.load(f)
pl = opl
parent = None
key = None
value = None
if args.json_path:
path = args.json_path.split('/')
parent = None
for obj in path:
if obj == '':
continue
key = obj
value = pl.get(obj)
if value is None and not args.silent:
raise Exception("%s not found in %s" % (args.json_path, args.json_file))
cur = value
parent = pl
pl = cur
if args.value is not None:
if args.value != value:
parent[key] = args.value
with open(args.json_file, mode='w') as w:
w.write(json.dumps(opl, indent=4))
print("changed")
else:
if value is not None:
print(value)
ansible-paquerette-dev/roles/_python3/vars/ 0000775 0000000 0000000 00000000000 14154403771 0021274 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_python3/vars/main.yml 0000664 0000000 0000000 00000000421 14154403771 0022740 0 ustar 00root root 0000000 0000000 ---
python3_package_list: [
"python3",
"python3-pip",
"python3-dev",
"python3-virtualenv",
"build-essential",
"python3-paramiko",
# "python3-lxml",
# "libssl-dev",
# "libffi-dev",
]
python3_module_list: [
# "virtualenv",
]
ansible-paquerette-dev/roles/_ssh_chroot/ 0000775 0000000 0000000 00000000000 14154403771 0021070 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_ssh_chroot/README.md 0000664 0000000 0000000 00000006625 14154403771 0022360 0 ustar 00root root 0000000 0000000 # Ansible Role: SSH chroot jail config
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-security.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-ssh-chroot-jail)
Configures a chroot jail specifically for the purpose of limiting a set of SSH users to the jail. Useful if you have a server where you need to allow very limited access to a very limited amount of functionality.
## Requirements
Requires OpenSSH server. Doesn't require `geerlingguy.security`, but that role (or one like it) is highly recommended to help lock down your server as much as possible.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
base_chroot_jail: /var/jail
The path to the root of the chroot jail.
ssh_chroot_jail_group_name: ssh_jailed
The group into which jailed users should be added.
ssh_chroot_jail_users:
- name: foo
homedir: /home/foo
shell: /bin/bash
A list of users who should be in the chroot jail. Leave set to the default (`[]`) if you would like to manage users on your own.
ssh_chroot_jail_dirs:
- bin
- dev
- etc
- lib
- lib64
- usr/bin
- usr/lib
- usr/lib64
- home
Base directories that should exist in the jail.
ssh_chroot_jail_devs:
- { dev: 'null', major: '1', minor: '3' }
- { dev: 'random', major: '5', minor: '0' }
- { dev: 'urandom', major: '1', minor: '5' }
- { dev: 'zero', major: '1', minor: '8' }
Devices that should exist in the jail.
ssh_chroot_bins:
- /bin/cp
- /bin/sh
- /bin/bash
- /bin/ls
...
- /usr/bin/tail
- /usr/bin/head
- /usr/bin/awk
- /usr/bin/wc
...
- bin: /usr/bin/which
l2chroot: false
A list of binaries which should be copied over to the jail. Each binary will also have its library dependencies copied into the jail using [`l2chroot`](https://www.cyberciti.biz/files/lighttpd/l2chroot.txt); you can skip that task by setting the `bin` key explicitly and setting `l2chroot: false` as in the last example above.
ssh_chroot_l2chroot_url: https://www.cyberciti.biz/files/lighttpd/l2chroot.txt
ssh_chroot_l2chroot_path: /usr/local/bin/l2chroot
The download URL and path into which `l2chroot` should be installed.
ssh_chroot_copy_extra_items:
- /etc/hosts
- /etc/passwd
- /etc/group
- /etc/ld.so.cache
- /etc/ld.so.conf
- /etc/nsswitch.conf
Extra items which should be copied into the jail.
ssh_chroot_sshd_chroot_jail_config: |
Match group {{ ssh_chroot_jail_group_name }}
ChrootDirectory {{ base_chroot_jail }}
X11Forwarding no
AllowTcpForwarding no
Configuration to add to the server's `sshd_config` controlling how users in the chroot jail group are handled.
## Dependencies
None.
## Example Playbook
- hosts: servers
roles:
- geerlingguy.security
- geerlingguy.ssh-chroot-jail
*Inside `vars/main.yml`*:
ssh_chroot_jail_users:
- name: janedoe
homedir: /home/janedoe
shell: /bin/bash
## License
MIT (Expat) / BSD
## Author Information
This role was created in 2017 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
Special thanks to [Acquia](https://www.acquia.com) for sponsoring the initial development of this role.
ansible-paquerette-dev/roles/_ssh_chroot/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0022677 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_ssh_chroot/defaults/main.yml 0000664 0000000 0000000 00000003464 14154403771 0024355 0 ustar 00root root 0000000 0000000 ---
# base_chroot_jail: /var/jail
ssh_chroot_jail_group_name: ssh_jailed
ssh_chroot_jail_users: []
ssh_chroot_jail_dirs:
# - bin
- dev
- etc
- lib
- lib64
# - usr/bin
# - usr/lib
# - usr/lib64
- home
ssh_chroot_jail_devs:
- {dev: 'null', major: '1', minor: '3'}
- {dev: 'random', major: '5', minor: '0'}
- {dev: 'urandom', major: '1', minor: '5'}
- {dev: 'zero', major: '1', minor: '8'}
#ssh_chroot_bins:
# - /bin/cp
# - /bin/sh
# - /bin/bash
# - /bin/ls
# - /bin/rm
# - /bin/cat
# - /bin/grep
# - /bin/sed
# - /bin/chmod
# - /bin/chown
# - /bin/ed
# - /bin/nano
## - /usr/bin/tail
## - /usr/bin/head
## - /usr/bin/awk
## - /usr/bin/wc
## - /usr/bin/sort
## - /usr/bin/uniq
## - /usr/bin/cut
## - /usr/bin/scp
## - /usr/bin/tee
## - /usr/bin/touch
## - /usr/bin/vim
## - /usr/bin/vi
## - /usr/bin/dircolors
## - /usr/bin/tput
## - /usr/bin/free
## - /usr/bin/top
## - /usr/bin/find
## - bin: /usr/bin/which
# l2chroot: false
## - /usr/bin/id
## - /usr/bin/whoami
## - /usr/bin/groups
# # Can also set mode, e.g. to setuid or set different permissions.
# # - bin: /bin/ping
# # mode: 4755
# - /bin/mkdir
# - /bin/rmdir
# - /bin/mv
# - /bin/less
# - /bin/tar
## - /usr/bin/ssh
## - /usr/bin/curl
## - /usr/bin/git
## - /usr/bin/wget
## - /usr/bin/rsync
## - /usr/bin/unzip
## - /usr/bin/zip
#
ssh_chroot_l2chroot_url: https://www.cyberciti.biz/files/lighttpd/l2chroot.txt
ssh_chroot_l2chroot_path: /usr/local/bin/l2chroot
ssh_chroot_copy_extra_items:
- /etc/hosts
- /etc/passwd
- /etc/group
- /etc/ld.so.cache
- /etc/ld.so.conf
- /etc/nsswitch.conf
- /etc/resolv.conf
ssh_chroot_sshd_chroot_jail_config: |
Match group {{ ssh_chroot_jail_group_name }}
ChrootDirectory {{ base_chroot_jail }}
X11Forwarding no
AllowTcpForwarding no
ansible-paquerette-dev/roles/_ssh_chroot/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0022670 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_ssh_chroot/handlers/main.yml 0000664 0000000 0000000 00000000722 14154403771 0024340 0 ustar 00root root 0000000 0000000 ---
- name: restart ssh daemon
service:
name: "{{ ssh_daemon }}"
state: restarted
- name: add binary libs via l2chroot
command: "l2chroot {{ item.bin | default(item) }}"
when: item.l2chroot is not defined or item.l2chroot
with_items: "{{ ssh_chroot_bins }}"
- name: copy extra libraries for whoami
shell: "cp {{ lib64_path }}/libns* {{ base_chroot_jail }}/{{ lib64_path }}/"
args:
warn: false
when: "'/usr/bin/whoami' in ssh_chroot_bins"
ansible-paquerette-dev/roles/_ssh_chroot/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022215 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_ssh_chroot/tasks/jail-user.yml 0000664 0000000 0000000 00000001513 14154403771 0024633 0 ustar 00root root 0000000 0000000 ---
- name: Ensure jailed user exists.
user:
name: "{{ item.name }}"
groups: "{{ item.groups | default(omit) }}"
append: true
shell: "{{ item.shell | default(omit) }}"
home: "{{ item.home | default(omit) }}"
createhome: "{{ item.createhome | default(omit) }}"
register: user_task
- name: Ensure jailed user has a homedir inside the jail (if configured).
file:
path: "{{ base_chroot_jail }}{{ item.home }}"
state: directory
owner: "{{ item.name }}"
group: "{{ item.name }}"
when: item.home is defined
- name: Ensure account skeleton files are copied into jailed user's home.
command: "cp -r -n /etc/skel/. {{ base_chroot_jail }}{{ item.home }}"
args:
warn: false
become: true
become_user: "{{ item }}"
failed_when: false
when:
- item.home is defined
- user_task.changed
ansible-paquerette-dev/roles/_ssh_chroot/tasks/l2chroot.yml 0000664 0000000 0000000 00000001016 14154403771 0024472 0 ustar 00root root 0000000 0000000 ---
# Install and configure l2chroot.
- name: See if l2chroot is already installed.
stat:
path: "{{ ssh_chroot_l2chroot_path }}"
register: l2chroot_stat
- name: Ensure l2chroot is available.
get_url:
url: "{{ ssh_chroot_l2chroot_url }}"
dest: "{{ ssh_chroot_l2chroot_path }}"
mode: 0755
when: not l2chroot_stat.stat.exists
- name: Ensure l2chroot uses the configured jail path.
lineinfile:
path: "{{ ssh_chroot_l2chroot_path }}"
regexp: "^BASE=.+"
line: 'BASE="{{ base_chroot_jail }}"'
ansible-paquerette-dev/roles/_ssh_chroot/tasks/main.yml 0000664 0000000 0000000 00000005255 14154403771 0023673 0 ustar 00root root 0000000 0000000 ---
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
- name: Ensure jail directories exist.
file:
path: "{{ base_chroot_jail }}/{{ item }}"
state: directory
recurse: true
with_items: "{{ ssh_chroot_jail_dirs }}"
- name: Ensure jail devices exist.
command: mknod -m 0666 {{ base_chroot_jail }}/dev/{{ item.dev }} {{ item.type | default('c') }} {{ item.major }} {{ item.minor }}
args:
creates: "{{ base_chroot_jail }}/dev/{{ item.dev }}"
with_items: "{{ ssh_chroot_jail_devs }}"
#- include_tasks: l2chroot.yml
#- name: Ensure binaries are copied into the jail.
# copy:
# src: "{{ item.bin | default(item) }}"
# dest: "{{ base_chroot_jail }}{{ item.bin | default(item) }}"
# remote_src: true
# mode: "{{ item.mode | default('0755') }}"
# with_items: "{{ ssh_chroot_bins }}"
# notify:
# - add binary libs via l2chroot
# - copy extra libraries for whoami
- name: copy ssh-chroot-jail extra items
copy:
src: "{{ item }}"
dest: "{{ base_chroot_jail }}{{ item }}"
remote_src: true
with_items: "{{ ssh_chroot_copy_extra_items }}"
- name: Ensure an ssh jail group exists.
group:
name: "{{ ssh_chroot_jail_group_name }}"
state: present
- name: Ensure SSHD config contains jail configuration.
blockinfile:
path: /etc/ssh/sshd_config
block: "{{ ssh_chroot_sshd_chroot_jail_config }}"
insertafter: EOF
notify: restart ssh daemon
- name: Ensure SSHD config uses internal-sftp.
replace:
path: /etc/ssh/sshd_config
regexp: "^Subsystem sftp /usr/lib/openssh/sftp-server"
replace: "Subsystem sftp internal-sftp"
notify: restart ssh daemon
- include_tasks: jail-user.yml
with_items: "{{ ssh_chroot_jail_users }}"
- name: "mount point for /bin"
file:
state: directory
path: "{{ base_chroot_jail }}/bin"
- name: "bind mount /bin"
mount:
path: "{{ base_chroot_jail }}/bin"
src: "/bin"
opts: bind
state: mounted
fstype: ext4
- name: "mount point for /usr"
file:
state: directory
path: "{{ base_chroot_jail }}/usr"
- name: "bind mount /usr"
mount:
path: "{{ base_chroot_jail }}/usr"
src: "/usr"
opts: bind
state: mounted
fstype: ext4
#- name: "mount point for /run "
# file:
# state: directory
# path: "{{ base_chroot_jail }}/run"
#
#- name: "bind mount /run"
# mount:
# path: "{{ base_chroot_jail }}/run"
# src: "/run"
# opts: bind
# state: mounted
# fstype: ext4
- name: "mount point for /etc/ssl/certs "
file:
state: directory
path: "/etc/ssl/certs"
- name: "bind mount /etc/ssl/certs"
mount:
path: "/etc/ssl/certs"
src: "/etc/ssl/certs"
opts: bind
state: mounted
fstype: ext4
ansible-paquerette-dev/roles/_ssh_chroot/vars/ 0000775 0000000 0000000 00000000000 14154403771 0022043 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_ssh_chroot/vars/Debian.yml 0000664 0000000 0000000 00000000070 14154403771 0023745 0 ustar 00root root 0000000 0000000 ---
ssh_daemon: 'ssh'
lib64_path: /lib/x86_64-linux-gnu
ansible-paquerette-dev/roles/_ssh_chroot/vars/RedHat.yml 0000664 0000000 0000000 00000000056 14154403771 0023736 0 ustar 00root root 0000000 0000000 ---
ssh_daemon: 'sshd'
lib64_path: /usr/lib64
ansible-paquerette-dev/roles/_user/ 0000775 0000000 0000000 00000000000 14154403771 0017673 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_user/README.md 0000664 0000000 0000000 00000002020 14154403771 0021144 0 ustar 00root root 0000000 0000000 # chrooted users
sftp only chrooted user management with password or ssh keys
ssh jail is not fully implemented yet
- vars:
- **user_name**
- user_home: **/home/user_name**
- user_to_remove : must be set to user_name to remove user with **uninstall** command
_if sftp_chroot is set to "no" the user is in ssh jail and the role chroot_jail be activated_
- sftp_chroot: **yes**/no
- user_key_file
- user_password: cf note
- to provide a encrypted password to ansible, use this :
`python3 -m pip install --user passlib`
`python3 -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.encrypt(getpass.getpass()))"`
- The mkpasswd utility that is available on most Linux systems is also a great option:
`mkpasswd --method=sha-512`
- platform roles :
set chroot_jail: "yes" in host var to activate ssh chroot jail if needed
In sftp only chrooted, real home directory is : {{ base_prod_path }}/jail//home/
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/_user/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0021502 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_user/defaults/main.yml 0000664 0000000 0000000 00000000116 14154403771 0023147 0 ustar 00root root 0000000 0000000 ---
user_home: "/home/{{ user_name }}"
user_in_jail: "no"
sftp_chroot: "yes"
ansible-paquerette-dev/roles/_user/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0021020 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_user/tasks/install.yml 0000664 0000000 0000000 00000003270 14154403771 0023213 0 ustar 00root root 0000000 0000000 ---
- name: "switch ssh jail / sftp chroot"
set_fact: user_in_jail = "yes"
when: sftp_chroot == "no"
- name: "set default home for ssh jailed {{ user_name }}"
set_fact:
user_home: "{{ base_chroot_jail }}{{ user_home }}"
when: user_in_jail == "yes"
- name: "user {{ user_name }}"
user:
name: "{{ user_name }}"
home: "{{ user_home }}"
create_home: no
groups: ['ssh_jailed']
state: present
when: user_in_jail == "yes"
- name: "user sftp chroot {{ user_name }}"
user:
name: "{{ user_name }}"
home: "{{ user_home }}"
create_home: no
groups: ["{{ sftp_users_chroot }}"]
state: present
when: sftp_chroot == "yes"
- name: "Ensure root dir /{{ user_name }} exists"
file:
state: directory
path: "{{ base_chroot_jail }}/{{ user_name }}"
when: sftp_chroot == "yes"
- name: "Ensure /{{ user_name }}/home exists for {{ user_name }}"
file:
state: directory
path: "{{ base_chroot_jail }}/{{ user_name }}/home"
when: sftp_chroot == "yes"
- name: "Ensure /home/{{ user_name }} exists for {{ user_name }}"
file:
state: directory
owner: "{{ user_name }}"
group: "{{ sftp_users_chroot }}"
path: "{{ base_chroot_jail }}/{{ user_name }}/home/{{ user_name }}"
when: sftp_chroot == "yes"
- name: "add public key for {{ user_name }}"
authorized_key:
user: "{{ user_name }}"
state: present
key: "{{ lookup('file', '{{ user_key_file }}') }}"
when: user_key_file is defined and user_key_file != ""
- name: "set password for {{ user_name }}"
user:
name: "{{ user_name }}"
update_password: "always"
password: "{{ user_password }}"
when: user_password is defined and user_password != ""
ansible-paquerette-dev/roles/_user/tasks/main.yml 0000664 0000000 0000000 00000000405 14154403771 0022466 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
#- import_tasks: upgrade.yml
# when: app_run == 'upgrade'
- import_tasks: uninstall.yml
when: app_run == 'uninstall' and user_to_remove is defined and user_to_remove == user_name
ansible-paquerette-dev/roles/_user/tasks/uninstall.yml 0000664 0000000 0000000 00000001403 14154403771 0023552 0 ustar 00root root 0000000 0000000 ---
- name: "switch ssh jail / sftp chroot"
set_fact: user_in_jail = "yes"
when: sftp_chroot == "no"
- name: "set default home for ssh jailed {{ user_name }}"
set_fact:
user_home: "{{ base_chroot_jail }}{{ user_home }}"
when: user_in_jail == "yes"
- name: "kill all processes for {{ user_name }}"
command: "pkill -u {{ user_name }}"
failed_when: False
changed_when: False
- name: "delete user {{ user_name }}"
user:
name: "{{ user_name }}"
state: absent
force: True
- name: "Remove home {{ user_home }} for {{ user_name }}"
file:
state: absent
path: "{{ user_home }}"
- name: "Remove root dir /{{ user_name }}"
file:
state: absent
path: "{{ base_chroot_jail }}/{{ user_name }}"
when: sftp_chroot == "yes"
ansible-paquerette-dev/roles/_web_app/ 0000775 0000000 0000000 00000000000 14154403771 0020332 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_web_app/README.md 0000664 0000000 0000000 00000005751 14154403771 0021621 0 ustar 00root root 0000000 0000000 # standard (web) application (static, php, python...)
- vars:
- **app_program**: Program name
- **app_run**: install / reinstall / upgrade / uninstall
- **app_instance_id**: must be unique on a server
- **app_version**: version, git branch if git repo is defined
- app_old_version: used to identify backup when upgrade
- app_domain: full domain name for the service
- database_type: **None** cf role _create_database
- database_name: **_db**
- database_user: **_usr**
- **database_password**: (mandatory if database type is not "None")
- app_git_repo: git repository (if app_source is not used)
- app_src: app source url for download (if app_git is not used)
- app_src_root_name : root directory in archive if app_source is used
- local_release : **releases/{{ role_name }}/default** if the archive exists, then used instead of source location
- empty_slot: yes/**no**: when yes don't fetch any git repo or source and deploy only a basic index.html file
- **app_user**:
- app_user_chrooted: **yes**/no
- app_user_key_file
- app_user_password: cf note
- app_group: ****
- app_instance_root:
- **/** if app_user is defined
- **/** if no app_user is defined
- instance_www_root: ****
- packages_list: **[]** additional system packages to be installed
- php_composer: yes/**no**: when yes, uses composer in install and updates with basic command and standard options (nodev...)
- python3: yes/**no**
- python_module_list: **[]** additional modules to be installed via pip3
- app_venv_entry_point
- app_wsgi: yes/**no**
- app_wsgi_entry_point
- to provide an encrypted password to ansible, use this :
`python3 -m pip install --user passlib`
`python3 -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.encrypt(getpass.getpass()))"`
- The mkpasswd utility that is available on most Linux systems is also a great option:
`mkpasswd --method=sha-512`
- platform roles :
- _python3 or php7_fpm ...
- **apache_server** (or nginx_server coming later) if needed (app_domain defined)
- any database server
set chroot_jail: "yes" in host var if ssh chroot is needed **experimental**
## Additional Nginx configuration
You may add additional nginx configuration as i.e. rewrite rules by adding configuration files in application root **nginx** directory
## Php configuration ##
In addition to the www-data fpm pool,
a specific php fpm pool php-fpm-**app_user**.conf is deployed for the **app_user** in /etc/php/**php_version**/fpm/pool.d
You may adjust the pool configuration by using variables :
Example
```yml
php_memory: 1536M
php_pm_max_children: "25"
php_pm_start_servers: "15"
php_pm_min_spare_servers: "10"
php_pm_max_spare_servers: "20"
```
If defined at the server leve, will configure the www-data pool and the **app_user** fpm pool
[paquerette.eu](http://paquerette.eu)
ansible-paquerette-dev/roles/_web_app/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0022141 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_web_app/defaults/main.yml 0000664 0000000 0000000 00000001520 14154403771 0023606 0 ustar 00root root 0000000 0000000 ---
packages_list: []
app_user_chrooted: "yes"
app_group: "www-data"
app_git_repo: ""
app_src: ""
empty_slot: False
# can be used to deploy a specific release or avoid downloading it
# not used with git repository
local_release: "releases/{{ role | default(_web_app) }}/default"
# can be used to separate root from www root like Dolibarr with htdocs or Pelican with projects/...
app_instance_www_root: "{{ app_instance_root }}"
database_type: "None"
php_composer: "no"
python3: "no"
python_module_list: []
app_venv_entry_point: "{{ app_instance_root }}/{{ app_instance_id }}.sh"
app_wsgi: "no"
app_wsgi_callable: "application"
app_wsgi_entry_point: "{{ app_instance_root }}/{{ app_instance_id }}.wsgi"
app_wsgi_script_reloading: "On"
app_wsgi_pass_authorization: "On"
app_backup_data: "no"
ansible-paquerette-dev/roles/_web_app/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0022132 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_web_app/handlers/main.yml 0000664 0000000 0000000 00000000345 14154403771 0023603 0 ustar 00root root 0000000 0000000 ---
- name: reload apache2 web_app
service: name=apache2 state=reloaded
- name: reload nginx web_app
service: name=nginx state=reloaded
- name: reload php-fpm web_app
service: name=php{{ php_version }}-fpm state=reloaded ansible-paquerette-dev/roles/_web_app/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0021457 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_web_app/tasks/deploy_program.yml 0000664 0000000 0000000 00000005011 14154403771 0025222 0 ustar 00root root 0000000 0000000 ---
- name: "install specific packages if any"
apt:
name: "{{ package }}"
state: present
loop: "{{ packages_list }}"
loop_control:
loop_var: package
### retrieving app from archive
- block:
- import_tasks: retrieve_from_archive.yml
- name: "Move {{ _tmp_new_app.path }}/{{ app_src_root_name }} to {{ app_instance_root }}"
command: "/bin/mv {{ _tmp_new_app.path }}/{{ app_src_root_name }} {{ app_instance_root }}"
args:
creates: "{{ app_instance_root }}"
notify: reload {{ rev_proxy }} web_app
- name: "remove {{ _tmp_new_app.path }}"
file:
path: "{{ _tmp_new_app.path }}"
state: absent
changed_when: False
when: not(empty_slot) and app_src != ""
tags:
- deploy
- name: "root directory for app : {{ app_instance_root }}"
file:
state: directory
path: "{{ app_instance_root }}"
when: empty_slot or app_git_repo != ""
tags:
- deploy
- name: "check is {{ app_instance_root }} empty"
find:
paths: "{{ app_instance_root }}"
register: instance_is_empty
tags:
- deploy
- name: "basic index file for empty_slot"
template:
src: "empty_index.j2"
dest: "{{ app_instance_root }}/index.html"
when: empty_slot and instance_is_empty.matched == 0
tags:
- deploy
- name: "chown / group {{ app_user }} / {{ app_group }}"
file:
dest: "{{ app_instance_root }}"
owner: "{{ app_user | default('www-data') }}"
group: "{{ app_group | default('www-data') }}"
recurse: True
tags:
- deploy
- name: "permissions on {{ app_instance_root }}"
file:
state: directory
path: "{{ app_instance_root }}"
mode: 0711
tags:
- deploy
### retrieving from git
- block:
- name: "retrieving app from git repo {{ app_git_repo }}"
git:
repo: "{{ app_git_repo }}"
dest: "{{ app_instance_root }}"
force: yes
version: "{{ app_version }}"
notify: reload {{ rev_proxy }} web_app
when: not(empty_slot) and app_git_repo != ""
become_user: "{{ run_user }}"
vars:
ansible_ssh_pipelining: true
tags:
- deploy
### php requirements
- name: "installs php requirements using composer"
composer:
command: "install"
working_dir: "{{ app_instance_root }}"
when: php_composer == "yes"
tags:
- deploy
### python3 requirements
- import_tasks: python3_requirements.yml
when: python3 == "yes"
tags:
- deploy
ansible-paquerette-dev/roles/_web_app/tasks/install.yml 0000664 0000000 0000000 00000005420 14154403771 0023651 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _app_log_inventory
vars:
log_type: "install"
- import_role:
name: _letsencrypt_certificate
when: app_domain is defined and app_domain != ""
- import_role:
name: _create_database
- import_role:
name: _user
vars:
user_name: "{{ app_user }}"
user_password: "{{ app_user_password }}"
when: app_user is defined
- import_tasks: deploy_program.yml
- import_role:
name: _app_backup
- name: "Backup data"
import_role:
name: _app_backup_data
when: app_backup_data == "yes"
### reverse proxy configuration
- block:
- name: "directory for www logs mounted in jail"
file:
state: directory
path: "{{ app_instance_root }}/../logs"
mode: 0711
owner: "{{ app_user }}"
group: "{{ app_group }}"
when: app_user is defined
- name: "template {{ rev_proxy }}_app.j2 {{ app_instance_id }}"
template:
src: "{{ rev_proxy }}_app.j2"
dest: "/etc/{{ rev_proxy }}/sites-available/{{ app_instance_id }}.conf"
when: app_wsgi == "no"
notify: reload {{ rev_proxy }} web_app
tags:
- rev_proxy
- name: "{{ rev_proxy }} config for {{ app_instance_id }} with wsgi and venv"
template:
src: "app_{{ rev_proxy }}_wsgi_venv.j2"
dest: "/etc/{{ rev_proxy }}/sites-available/{{ app_instance_id }}.conf"
when: app_wsgi == "yes"
notify: reload {{ rev_proxy }} web_app
tags:
- rev_proxy
- import_tasks: php_config.yml
- import_role:
name: _app_logrotate
# logs availability for user
- name: "mount point for {{ app_instance_id }} logs "
file:
state: directory
path: "{{ app_instance_root }}/../logs/{{ app_instance_id }}"
when: app_user is defined
# ansible bug : non idempotency with mount module
- name: "bind mount for logs in jail"
mount:
path: "{{ app_instance_root }}/../logs/{{ app_instance_id }}"
src: "{{ www_log }}/{{ app_instance_id }}"
opts: bind,auto
state: present
fstype: ext4
register: __fstab
when: app_user is defined
- name: "Reload fstab"
command: mount -a
args:
warn: no
when: app_user is defined and __fstab.changed
- name: "enable site for {{ app_domain }}"
file:
state: link
path: "/etc/{{ rev_proxy }}/sites-enabled/{{ app_instance_id }}.conf"
src: "/etc/{{ rev_proxy }}/sites-available/{{ app_instance_id }}.conf"
when: app_domain is defined and app_domain != ""
notify: reload {{ rev_proxy }} web_app
- import_role:
name: _app_monit
when: monit_request is defined and monit_request != ''
when: app_domain is defined and app_domain != ""
ansible-paquerette-dev/roles/_web_app/tasks/main.yml 0000664 0000000 0000000 00000002217 14154403771 0023130 0 ustar 00root root 0000000 0000000 ---
- name: "set real home path when user is in jail"
set_fact:
app_user_home: "{{ base_chroot_jail }}/{{ app_user }}/home/{{ app_user }}"
when: app_user_chrooted == "yes"
tags:
- setpath
- name: "set user home var "
set_fact:
app_user_home: "/home/{{ app_user }}"
when: app_user_chrooted != "yes"
tags:
- setpath
- name: "set instance root"
set_fact:
app_instance_root: "{{ app_user_home }}/{{ app_instance_id }}"
when: app_user is defined
tags:
- setpath
- name: "set instance root"
set_fact:
run_user: "{{ app_user }}"
when: app_user is defined
tags:
- setpath
- name: "set instance root"
set_fact:
app_instance_root: "{{ www_root }}/{{ app_instance_id }}"
when: app_user is not defined
tags:
- setpath
- name: "set instance root"
set_fact:
run_user: "root"
when: app_user is not defined
tags:
- setpath
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
- import_tasks: upgrade.yml
when: app_run == 'upgrade'
- import_tasks: uninstall.yml
when: app_run == 'uninstall'
- import_role:
name: _app_restore_instance
when: app_run == 'restore'
ansible-paquerette-dev/roles/_web_app/tasks/php_config.yml 0000664 0000000 0000000 00000000413 14154403771 0024314 0 ustar 00root root 0000000 0000000 ---
- name: "template user-php-fpm.j2 for {{ app_user }} {{ php_version }}"
template:
src: "php-fpm-user.j2"
dest: "/etc/php/{{ php_version }}/fpm/pool.d/php-fpm-{{ app_user }}.conf"
notify: reload php-fpm web_app
tags:
- php_config ansible-paquerette-dev/roles/_web_app/tasks/python3_requirements.yml 0000664 0000000 0000000 00000002414 14154403771 0026412 0 ustar 00root root 0000000 0000000 ---
- block:
- name: "install virtual environment"
command: "python3 -m virtualenv {{ app_instance_root }}/venv -p /usr/bin/python3"
args:
creates: "{{ app_instance_root }}/venv"
- name: "pip3 install modules with venv"
pip:
name: "{{ module }}"
virtualenv: "{{ app_instance_root }}/venv"
virtualenv_python: "python3"
loop: "{{ python_module_list }}"
loop_control:
loop_var : module
environment:
PATH: "{{ ansible_env.PATH }}:{{ app_user_home }}/.local/bin"
- name: "requirements needed"
stat:
path: "{{ app_instance_root }}/requirements.txt"
register: requirements_venv
- name: "install requirements with venv"
pip:
requirements: "{{ app_instance_root }}/requirements.txt"
virtualenv: "{{ app_instance_root }}/venv"
virtualenv_python: "python3"
state: present
when: requirements_venv.stat.exists
environment:
PATH: "{{ ansible_env.PATH }}:{{ app_user_home }}/.local/bin"
- name: "bash entry point with venv"
template:
src: "bash_entry_venv.j2"
dest: "{{ app_venv_entry_point }}"
mode: 0700
become_user: "{{ run_user }}"
vars:
ansible_ssh_pipelining: true
ansible-paquerette-dev/roles/_web_app/tasks/retrieve_from_archive.yml 0000664 0000000 0000000 00000002320 14154403771 0026550 0 ustar 00root root 0000000 0000000 ---
- block:
- name: "check local release exists"
local_action: stat path=$PWD/{{ local_release }}
register: local_release_file
- name: "set src to {{ local_release }}"
set_fact:
app_src: "{{ local_release }}"
when: local_release_file.stat.exists
- name: "create temporary download directory for archive"
tempfile:
state: directory
suffix: "_app_new"
register: _tmp_new_app
changed_when: False
- name: "check {{ app_instance_root }} exists"
stat:
path: "{{ app_instance_root }}"
register: instance_root_dir
- name: "retrieving app from {{ app_src }}"
unarchive:
src: "{{ app_src }}"
dest: "{{ _tmp_new_app.path }}"
remote_src: "{{ not(local_release_file.stat.exists) }}"
when: (app_run == 'upgrade') or (app_run in ['install', 'reinstall'] and not(instance_root_dir.stat.exists))
- name: "chown / chgrp {{ app_user }} / {{ app_group }}"
file:
dest: "{{ _tmp_new_app.path }}"
owner: "{{ app_user | default('www-data') }}"
group: "{{ app_group | default('www-data') }}"
recurse: True
when: app_user is defined
when: app_src != ""
ansible-paquerette-dev/roles/_web_app/tasks/uninstall.yml 0000664 0000000 0000000 00000003732 14154403771 0024220 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _app_log_inventory
vars:
log_type: "uninstall"
- import_role:
name: _app_backup
### reverse proxy configuration and logs
- block:
- name: "disable site for {{ app_domain }}"
file:
state: absent
path: "/etc/{{ rev_proxy }}/sites-enabled/{{ app_instance_id }}.conf"
when: app_domain is defined and app_domain != ""
register: disable_site
- import_role:
name: _app_monit
when: monit_request is defined and monit_request != ''
- name: reload monit
service: name=monit state=reloaded
when: disable_site.changed
- name: "remove {{ rev_proxy }} configuration for {{ app_instance_id }}"
file:
state: absent
path: "/etc/{{ rev_proxy }}/sites-available/{{ app_instance_id }}.conf"
- name: reload {{ rev_proxy }}
service: name={{ rev_proxy }} state=reloaded
when: disable_site.changed
- import_role:
name: _letsencrypt_certificate
- import_role:
name: _app_logrotate
- name: "umount {{ app_instance_id }} logs"
shell: "umount {{ app_instance_root }}/../logs/{{ app_instance_id }}"
failed_when: False
changed_when: false
when: app_user is defined
- name: "Remove mount line in fstab"
lineinfile:
path: "/etc/fstab"
regexp: "^{{ www_log }}/{{ app_instance_id }}"
state: absent
when: app_user is defined
- name: "remove mount point for {{ app_instance_id }} logs "
file:
state: absent
path: "{{ app_instance_root }}/../logs/{{ app_instance_id }}"
when: app_user is defined
when: app_domain != ""
- name: "remove {{ app_instance_root }}"
file:
state: absent
path: "{{ app_instance_root }}"
- import_role:
name: _create_database
- import_role:
name: _user
vars:
user_name: "{{ app_user }}"
user_password: "{{ app_user_password }}"
when: app_user is defined
ansible-paquerette-dev/roles/_web_app/tasks/upgrade.yml 0000664 0000000 0000000 00000002010 14154403771 0023622 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _app_log_inventory
vars:
log_type: "upgrade"
- import_tasks: retrieve_from_archive.yml
when: app_src is defined and app_src != ""
- name: "disable site for {{ app_domain }}"
file:
state: absent
path: "/etc/{{ rev_proxy }}/sites-enabled/{{ app_instance_id }}.conf"
when: app_domain is defined and app_domain != ""
- name: "reload {{ rev_proxy }}"
service: name={{ rev_proxy }} state=reloaded
when: app_domain is defined and app_domain != ""
- import_role:
name: _app_backup_instance
- import_tasks: upgrade_program.yml
- name: "enable site for {{ app_domain }}"
file:
state: link
path: "/etc/{{ rev_proxy }}/sites-enabled/{{ app_instance_id }}.conf"
src: "/etc/{{ rev_proxy }}/sites-available/{{ app_instance_id }}.conf"
when: app_domain is defined and app_domain != ""
- name: "reload {{ rev_proxy }}"
service: name={{ rev_proxy }} state=reloaded
when: app_domain is defined and app_domain != "" ansible-paquerette-dev/roles/_web_app/tasks/upgrade_program.yml 0000664 0000000 0000000 00000002401 14154403771 0025355 0 ustar 00root root 0000000 0000000 ---
- name: "install specific packages if any"
apt:
name: "{{ package }}"
state: present
loop: "{{ packages_list }}"
loop_control:
loop_var: package
### upgrading app from archive
- block:
- name: "copy {{ _tmp_new_app.path }}/{{ app_src_root_name }}/ to {{ app_instance_root }}"
command: "rsync -ax {{ _tmp_new_app.path }}/{{ app_src_root_name }}/ {{ app_instance_root }}"
when: app_src is defined and app_src != ''
- name: "remove {{ _tmp_new_app.path }}"
file:
path: "{{ _tmp_new_app.path }}"
state: absent
changed_when: False
when: app_src != ""
### upgrading from git
- block:
- name: "retrieving app from git repo {{ app_git_repo }}"
git:
repo: "{{ app_git_repo }}"
dest: "{{ app_instance_root }}"
force: yes
version: "{{ app_version }}"
when: app_git_repo != ""
become_user: "{{ run_user }}"
vars:
ansible_ssh_pipelining: true
### php requirements
- name: "updates php requirements using composer"
composer:
command: "update"
working_dir: "{{ app_instance_root }}"
when: php_composer == "yes"
### python3 requirements
- import_tasks: python3_requirements.yml
when: python3 == "yes"
ansible-paquerette-dev/roles/_web_app/templates/ 0000775 0000000 0000000 00000000000 14154403771 0022330 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/_web_app/templates/apache2_app.j2 0000664 0000000 0000000 00000002045 14154403771 0024731 0 ustar 00root root 0000000 0000000
ServerName {{ app_domain | mandatory }}
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R=301,L]
ServerName {{ app_domain }}
DocumentRoot {{ app_instance_www_root }}
AssignUserId {{ app_user }} {{ app_group }}
SetEnvIFNoCase User-Agent "Monit" dontlog
CustomLog {{ www_log }}/{{ app_instance_id }}/access.log combined env=!dontlog
ErrorLog {{ www_log }}/{{ app_instance_id }}/error.log
SSLProxyEngine on
Options +FollowSymlinks
AllowOverride All
Require all granted
Header always set Strict-Transport-Security "max-age=15768000; preload"
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/{{ app_domain }}/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/{{ app_domain }}/privkey.pem
ansible-paquerette-dev/roles/_web_app/templates/app_apache2_wsgi_venv.j2 0000664 0000000 0000000 00000002507 14154403771 0027023 0 ustar 00root root 0000000 0000000
ServerName {{ app_domain | mandatory }}
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R=301,L]
ServerName {{ app_domain }}
SetEnvIFNoCase User-Agent "Monit" dontlog
CustomLog {{ www_log }}/{{ app_instance_id }}/access.log combined env=!dontlog
ErrorLog {{ www_log }}/{{ app_instance_id }}/error.log
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/{{ app_domain }}/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/{{ app_domain }}/privkey.pem
WSGIProcessGroup {{ app_instance_id }}
WSGIDaemonProcess {{ app_instance_id }} user={{ app_user }} group={{ app_group }} python-home={{ app_instance_root }}/venv display-name=%{GROUP}
WSGIScriptAlias / {{ app_wsgi_entry_point }}
WSGICallableObject {{ app_wsgi_callable }}
WSGIScriptReloading {{ app_wsgi_script_reloading}}
WSGIPassAuthorization {{ app_wsgi_pass_authorization}}
AllowOverride All
Require all granted
# Alias /static {{ app_instance_root }}/static
#
# AllowOverride All
# Require all granted
#
ansible-paquerette-dev/roles/_web_app/templates/bash_entry_venv.j2 0000664 0000000 0000000 00000000464 14154403771 0025765 0 ustar 00root root 0000000 0000000 #! /bin/bash
cd {{ app_instance_root }}
source venv/bin/activate
# virtualenv is now active, which means your PATH has been modified.
# Don't try to run python from /usr/bin/python, just run "python" and
# let the PATH figure out which version to run (based on what your
# virtualenv has configured).
"$@"
ansible-paquerette-dev/roles/_web_app/templates/empty_index.j2 0000664 0000000 0000000 00000000157 14154403771 0025115 0 ustar 00root root 0000000 0000000 {{ app_domain }} is working fine and waiting for code !
Paquerette.eu
ansible-paquerette-dev/roles/_web_app/templates/nginx_app.j2 0000664 0000000 0000000 00000012141 14154403771 0024547 0 ustar 00root root 0000000 0000000 upstream php-handler{{ app_instance_id }} {
server unix:/var/run/php/php{{ php_version }}-fpm-{{ app_user }}.sock;
}
map $http_user_agent $log_ua {
~Monit 0;
default 1;
}
server {
listen 80;
listen [::]:80;
server_name {{ app_domain | mandatory }};
# enforce https
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name {{ app_domain }};
ssl_certificate /etc/letsencrypt/live/{{ app_domain }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ app_domain }}/privkey.pem;
# Add headers to serve security related headers
# Before enabling Strict-Transport-Security headers please read into this
# topic first.
# add_header Strict-Transport-Security "max-age=15768000;
# includeSubDomains; preload;";
#
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header X-Robots-Tag all; # https://developers.google.com/search/docs/advanced/robots/robots_meta_tag
add_header X-Download-Options noopen;
add_header X-Permitted-Cross-Domain-Policies none;
add_header Strict-Transport-Security "max-age=15768000";
# Path to the root of your installation
root {{ app_instance_www_root }};
access_log {{ www_log }}/{{ app_instance_id }}/access.log combined if=$log_ua;
error_log {{ www_log }}/{{ app_instance_id }}/error.log;
include {{ app_instance_www_root }}/nginx/*.conf;
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location = /favicon.ico {
log_not_found off;
access_log off;
}
# Deny all attempts to access hidden files such as .htaccess, .htpasswd, .DS_Store (Mac).
# Keep logging the requests to parse later (or to pass to firewall utilities such as fail2ban)
location ~ /\. {
deny all;
}
# Deny access to any files with a .php extension in the uploads directory
# Works in sub-directory installs and also in multisite network
# Keep logging the requests to parse later (or to pass to firewall utilities such as fail2ban)
location ~* /(?:uploads|files)/.*\.php$ {
deny all;
}
# set max upload size
client_max_body_size 512M;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
index index.php;
location / {
# This is cool because no php is touched for static content.
# include the "?$args" part so non-default permalinks doesn't break when using query string
try_files $uri $uri/ /index.php?$args;
}
location ~ \.php$ {
#NOTE: You should have "cgi.fix_pathinfo = 0;" in php.ini
#include fastcgi.conf;
#fastcgi_intercept_errors on;
#fastcgi_pass php;
fastcgi_split_path_info ^(.+\.php)(/.*)$;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param HTTPS on;
#Avoid sending the security headers twice
fastcgi_param modHeadersAvailable true;
fastcgi_param front_controller_active true;
fastcgi_pass php-handler{{ app_instance_id }};
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
}
# Adding the cache control header for js and css files
# Make sure it is BELOW the PHP block
location ~* \.(js|css|png|jpg|jpeg|gif|ico|woff|svg)$ {
try_files $uri /index.php$uri$is_args$args;
add_header Cache-Control "public, max-age=15778463";
# Add headers to serve security related headers (It is intended to
# have those duplicated to the ones above)
# Before enabling Strict-Transport-Security headers please read into
# this topic first.
# add_header Strict-Transport-Security "max-age=15768000;
# includeSubDomains; preload;";
#
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
# add_header X-Robots-Tag all;
add_header X-Download-Options noopen;
add_header X-Permitted-Cross-Domain-Policies none;
# Optional: Don't log access to assets
access_log off;
}
}
ansible-paquerette-dev/roles/_web_app/templates/php-fpm-user.j2 0000664 0000000 0000000 00000043145 14154403771 0025117 0 ustar 00root root 0000000 0000000 ;
; ************* Warning ******************
; This template is valid for PHP 7.x
; Should be reviewed for other php releases.
; ref: https://www.vennedey.net/resources/3-Secure-webspaces-with-NGINX-PHP-FPM-chroots-and-Lets-Encrypt#php-fpm1
;
; Start a new pool named '{{ app_user }}'.
; the variable $pool can be used in any directive and will be replaced by the
; pool name ('{{ app_user }}' here)
[{{ app_user }}]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = $pool
group = $pool
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
listen = /var/run/php/php{{ php_version }}-fpm-$pool.sock
; Set listen(2) backlog.
; Default Value: 511 (-1 on FreeBSD and OpenBSD)
;listen.backlog = 511
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions. The owner
; and group can be specified either by name or by their numeric IDs.
; Default Values: user and group are set as the running user
; mode is set to 0660
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
; or group is differrent than the master process user. It allows to create process
; core dump and ptrace the process for the pool user.
; Default Value: no
; process.dumpable = yes
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = {{ php_pm_max_children|default(6, true) }}
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: (min_spare_servers + max_spare_servers) / 2
; pm.start_servers = 3
pm.start_servers = {{ php_pm_start_servers|default(3, true) }}
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
; pm.min_spare_servers = 2
pm.min_spare_servers = {{ php_pm_min_spare_servers|default(2, true) }}
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
; pm.max_spare_servers = 4
pm.max_spare_servers = {{ php_pm_max_spare_servers|default(4, true) }}
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php/7.4/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
pm.status_path = /status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; Depth of slow log stack trace.
; Default Value: 20
;request_slowlog_trace_depth = 20
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 0
; The timeout set by 'request_terminate_timeout' ini option is not engaged after
; application calls 'fastcgi_finish_request' or when application has finished and
; shutdown functions are being called (registered via register_shutdown_function).
; This option will enable timeout limit to be applied unconditionally
; even in such cases.
; Default Value: no
;request_terminate_timeout_track_finished = no
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
;chdir = /var/www
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
;catch_workers_output = yes
; Decorate worker output with prefix and suffix containing information about
; the child that writes to the log and if stdout or stderr is used as well as
; log level and time. This options is used only if catch_workers_output is yes.
; Settings to "no" will output data as written to the stdout or stderr.
; Default value: yes
;decorate_workers_output = no
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; execute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5 .php7
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
env[TMP] = /tmp
;env[TMPDIR] = /tmp
env[TMPDIR] = /tmp
;env[TEMP] = /tmp
env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M
ansible-paquerette-dev/roles/adm_instance/ 0000775 0000000 0000000 00000000000 14154403771 0021203 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0023012 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/defaults/main.yml 0000664 0000000 0000000 00000001065 14154403771 0024463 0 ustar 00root root 0000000 0000000 ---
#
# mail default parameters
#
mail_protocol: smtp
mail_port: 465
mail_secure: PHPMailer::ENCRYPTION_SMTPS
mail_host: ''
mail_username: ''
mail_password: ''
#
# mercanet
#
mercanet_param_dir: "{{ app_user_home }}/param"
mercanet_bin_dir: "{{ app_user_home }}/bin"
# files to change for params
#
mercanet_request_files: [
'paiement.php',
'paiement_cotisation.php',
'm_call_request.php',
'paiement_service.php'
]
mercanet_response_files: [
'm_call_response.php',
'm_call_autoresponse.php'
]
ansible-paquerette-dev/roles/adm_instance/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0023003 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/handlers/main.yml 0000664 0000000 0000000 00000000117 14154403771 0024451 0 ustar 00root root 0000000 0000000 ---
- name: restart mariadb_mysql_server
service: name=mysql state=restarted ansible-paquerette-dev/roles/adm_instance/readme.md 0000664 0000000 0000000 00000006527 14154403771 0022774 0 ustar 00root root 0000000 0000000 Specific rôle to install and configure pâquerette ADM Application
Inherit of the _web_app rôle
Needed variables :
```yml
php_server: yes
php_version: '5.6'
rev_proxy: nginx
mariadb_mysql_server: true
mariadb_mysql: mysql
mysql_root_password:
app_instances:
- role: adm_instance
app_instance_id: app_php_adm
description: ADM php application
app_domain:
database_password:
app_user: mindfulness
app_user_password: # encrypted password (see the _webapp rôle)
app_version: # Git tag
git_user: # Access to the gitlab project
git_token: # Access to the gitlab project
git_mercanet_token: # Access to the gitlab project
mail_host:
mail_username:
mail_password:
```
Create access token to the code project and the mercanet projet with the same user name **git_user**
Application installation :
If you need to restore user data before running the role you may add tar files in directory :
`/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/`
Restore of site files will be used if archives are present in this directory
* Database sql dump **without database creation (edit the file if needed)** but with drop and recreate table : app_php_adm_db.sql
* Mercanet params and cerfificates : mercanet_params.tar
* Docs, images userfiles and affiliations : var_files.tar.gz
The role will configure database access, mail access, mercanet path etc ...
Put the right tag in variable : app_version
Run the role with :
```
./play.py -y vpstest app_php_adm install
```
If the application is already installed
```
./play.py -y vpstest app_php_adm reinstall
```
You may also run part of the role using tags :
```
./play.py -y vpstest app_php_adm reinstall --apply-tags=setpath,mercanetparams
```
*setpath tag is often needed first to set up application root and path variables*
## Install with a development environment
Requires docker and docker_compose (see role docker_server)
Will install mailhog and phpmyadmin, and default mail to mailhog
phpmyadmin on port 8080 -> server alias mysql, you have to create a mariadb/mysql user with remote access allowed
'user'@'%'
## Deploy on a dev server enrionment
mailhog on port 8025
Run install with intall_type=dev
```bash
./play.py -y vpstest app_php_adm reinstall --apply-tags=setpath,dev -e "install_type=dev"
```
## Known issues and workarounds
### Changing app_user
If app_user is changed you'll have to re-put some files in app :
1. Get the not gitted files from old user folder to restore it in new user folder (with ansible restore task) :
```
# app_php_adm is used in theses examples in place of {{ app_instance_id }}
sudo -i
# create the temp folder that will be used by restore task from ansible
mkdir -p /tmp/restore_app_php_adm/app_php_adm/
# create mercanet param tar
cd /home/old_user/param
tar cf mercanet_param.tar --exclude-backups *
mv mercanet_param.tar /tmp/restore_app_php_adm/app_php_adm/
# create user_files tar gzip
cd /home/old_user/app_php_adm
tar czf var_files.tar.gz
tar czf var_files.tar.gz --exclude-backups affiliations/ docs/ images/ userfiles/
mv var_files.tar.gz /tmp/restore_app_php_adm/app_php_adm/
```
2. play the role with reinstall :
```bash
./play.py -y vpstest app_php_adm reinstall"
```
3. Restart php-fpm to recreate a fpm sock with the new user : `systemctl restart php5.6-fpm`
4. if everything is ok, delete old user and its configs : `deluser --remove-all-files` ansible-paquerette-dev/roles/adm_instance/scripts/ 0000775 0000000 0000000 00000000000 14154403771 0022672 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/scripts/install_composer.sh 0000664 0000000 0000000 00000000717 14154403771 0026610 0 ustar 00root root 0000000 0000000 #!/bin/sh
EXPECTED_SIGNATURE=$(wget -q -O - https://composer.github.io/installer.sig)
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
ACTUAL_SIGNATURE=$(php -r "echo hash_file('SHA384', 'composer-setup.php');")
if [ "$EXPECTED_SIGNATURE" != "$ACTUAL_SIGNATURE" ]
then
>&2 echo 'ERROR: Invalid installer signature'
rm composer-setup.php
exit 1
fi
php composer-setup.php --quiet
RESULT=$?
rm composer-setup.php
exit $RESULT
ansible-paquerette-dev/roles/adm_instance/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022330 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/tasks/composer.yml 0000664 0000000 0000000 00000000600 14154403771 0024676 0 ustar 00root root 0000000 0000000 ---
- name: Download Composer
script: scripts/install_composer.sh
tags:
- composer
- name: Move Composer globally
become: true
command: mv composer.phar /usr/local/bin/composer
tags:
- composer
- name: Set permissions on Composer
become: true
file:
path: /usr/local/bin/composer
mode: "a+x"
tags:
- composer
ansible-paquerette-dev/roles/adm_instance/tasks/configure.yml 0000664 0000000 0000000 00000010464 14154403771 0025041 0 ustar 00root root 0000000 0000000 ---
- name: "Update db parameters : adodb"
lineinfile:
path : "{{ app_instance_root }}/include/mod_adodb.php"
regexp: '\$DSN ='
line: "$DSN = \"mysql://{{ database_user }}:{{ database_password }}@localhost/{{ database_name }}\";"
backup: yes
state: present
tags:
- dbparams
- configure
- name: "Update db parameters : bo_admin : adodb"
lineinfile:
path : "{{ app_instance_root }}/bo_adm1/include/mod_adodb.php"
regexp: '\$DSN ='
line: "$DSN = \"mysql://{{ database_user }}:{{ database_password }}@localhost/{{ database_name }}\";"
backup: yes
state: present
tags:
- dbparams
- configure
- name: "Update db parameters : request"
lineinfile:
path : "{{ app_instance_root }}/classes/Request.php"
regexp: '^(.*)protected \$dsn ='
line: '\1protected $dsn = "mysql://{{ database_user }}:{{ database_password }}@localhost/{{ database_name }}";'
state: present
backup: yes
backrefs: yes
tags:
- dbparams
- configure
- name: "Configure mail {{ param_mail.protocol | default() }} for {{ app_instance_id }} using template"
template:
src: "config.env.inc.j2"
dest: "{{ app_instance_root }}/config.env.inc"
backup: yes
tags:
- mailparams
- configure
- name: "Configure userfiles directory"
lineinfile:
path : "{{ app_instance_root }}/bo_adm1/ckfinder/config.php"
regexp: '^\$baseDir = '
line: "$baseDir = '{{ app_instance_root }}/userfiles/';"
state: present
backup: yes
tags:
- configure
- name: "Configure images directory for tcpdf"
lineinfile:
path : "{{ app_instance_root }}/vendor/tecnickcom/tcpdf/config/tcpdf_config.php"
regexp: "^define *\\('K_PATH_IMAGES'"
line: "define ('K_PATH_IMAGES', '{{ app_instance_root }}/images/');"
state: present
backup: yes
tags:
- configure
- name: "Checking if DatabaseFactory exists"
stat:
path: "{{ app_instance_root }}/App/Factories/DatabaseFactory.php"
register: file_db_factory
- name: "Update db parameters : Factory USER"
lineinfile:
path : "{{ app_instance_root }}/App/Factories/DatabaseFactory.php"
regexp: '^const DB_USER ='
line: 'const DB_USER = "{{ database_user }}";'
state: present
backup: yes
backrefs: yes
when: file_db_factory.stat.exists
tags:
- dbparams
- configure
- name: "Update db parameters : Factory PASS"
lineinfile:
path : "{{ app_instance_root }}/App/Factories/DatabaseFactory.php"
regexp: '^const DB_PASS ='
line: 'const DB_PASS = "{{ database_password }}";'
state: present
backup: yes
backrefs: yes
when: file_db_factory.stat.exists
tags:
- dbparams
- configure
- name: "Update db parameters : Factory NAME"
lineinfile:
path : "{{ app_instance_root }}/App/Factories/DatabaseFactory.php"
regexp: '^const DB_NAME ='
line: 'const DB_NAME = "{{ database_name }}";'
state: present
backup: yes
backrefs: yes
when: file_db_factory.stat.exists
tags:
- dbparams
- configure
- name: "Checking if .config.php exists"
stat:
path: "{{ app_instance_root }}/.config.php"
register: file_dot_config
- name: "Update db parameters : .config.php"
lineinfile:
path : "{{ app_instance_root }}/.config.php"
regexp: '^const DB_NAME ='
line: 'const DB_NAME = "{{ database_name }}";'
state: present
backup: yes
backrefs: yes
when: file_dot_config.stat.exists
tags:
- dbparams
- configure
- name: "Update db parameters : .config.php"
lineinfile:
path : "{{ app_instance_root }}/.config.php"
regexp: '^const DB_USER ='
line: 'const DB_USER = "{{ database_user }}";'
state: present
backup: yes
backrefs: yes
when: file_dot_config.stat.exists
tags:
- dbparams
- configure
- name: "Update db parameters : .config.php"
lineinfile:
path : "{{ app_instance_root }}/.config.php"
regexp: '^const DB_PASS ='
line: 'const DB_PASS = "{{ database_password }}";'
state: present
backup: yes
backrefs: yes
when: file_dot_config.stat.exists
tags:
- dbparams
- configure ansible-paquerette-dev/roles/adm_instance/tasks/developt.yml 0000664 0000000 0000000 00000001134 14154403771 0024674 0 ustar 00root root 0000000 0000000 ---
- name: "install docker compose file for dev tools (PHPMyadmin - Mailhog)"
template:
src: "docker-compose.yml.j2"
dest: "{{ app_user_home }}/docker-compose.yml"
backup: yes
tags:
- dev
- name: "Configure mail for mailhog {{ param_mail.protocol | default() }} for {{ app_instance_id }} using template"
template:
src: "config.env.dev.inc.j2"
dest: "{{ app_instance_root }}/config.env.inc"
backup: yes
tags:
- dev
- name: "ufw: Allow port 3306"
ufw:
rule: allow
port: "3306"
proto: tcp
tags:
- dev
ansible-paquerette-dev/roles/adm_instance/tasks/main.yml 0000664 0000000 0000000 00000000416 14154403771 0024000 0 ustar 00root root 0000000 0000000 ---
- import_tasks: composer.yml
- import_role:
name: _web_app
- import_tasks: restore.yml
- import_tasks: postinstall.yml
- import_tasks: configure.yml
- import_tasks: mercanet.yml
- import_tasks: developt.yml
when: install_type == "dev"
ansible-paquerette-dev/roles/adm_instance/tasks/mercanet.yml 0000664 0000000 0000000 00000011632 14154403771 0024654 0 ustar 00root root 0000000 0000000 ---
- name: "test mercanet param tar presence"
stat:
path: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/mercanet_param.tar"
register: mercanet_param
tags:
- mercanet
- name: "create {{ mercanet_param_dir }}"
file:
path: "{{ mercanet_param_dir }}"
state: directory
mode: 0777
owner: "{{ app_user }}"
group: "{{ app_group }}"
when: mercanet_param.stat.exists
tags:
- mercanet
- name: "restoring mercanet param to {{ mercanet_param_dir }}"
unarchive:
src: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/mercanet_param.tar"
dest: "{{ mercanet_param_dir }}"
remote_src: True
owner: "{{ app_user }}"
group: "{{ app_group }}"
when: mercanet_param.stat.exists
tags:
- mercanet
- name: create temporary download directory
tempfile:
state: directory
suffix: "_mercanet_new"
register: _tmp_new_mercanet
changed_when: False
tags:
- mercanet
- name: "retrieving mercanet from git repo {{ mercanet_git_repo }} to {{ _tmp_new_mercanet.path }}"
git:
repo: "{{ mercanet_git_repo }}"
dest: "{{ _tmp_new_mercanet.path }}"
force: yes
tags:
- mercanet
- name: "Install mercanet bin directory in {{ app_user_home }}"
copy:
src: "{{ _tmp_new_mercanet.path }}/bin"
dest: "{{ app_user_home }}"
remote_src: True
owner: "{{ app_user }}"
group: "{{ app_group }}"
tags:
- mercanet
- name: "Install mercanet directory in {{ app_instance_root }}"
copy:
src: "{{ _tmp_new_mercanet.path }}/mercanet"
dest: "{{ app_instance_root }}"
remote_src: True
owner: "{{ app_user }}"
group: "{{ app_group }}"
tags:
- mercanet
- name: "remove {{ _tmp_new_mercanet.path }}"
file:
path: "{{ _tmp_new_mercanet.path }}"
state: absent
tags:
- mercanet
- name: "Update mercanet pathfile parameter for request files"
lineinfile:
path : "{{ app_instance_root }}/{{ param_file }}"
regexp: '^(.*)\$parm="\$parm pathfile='
line: '\1$parm="$parm pathfile={{ mercanet_param_dir }}/pathfile";'
state: present
backup: yes
backrefs: yes
loop: "{{ mercanet_request_files }}"
loop_control:
loop_var: param_file
tags:
- mercanetparams
- name: "Update mercanet request parameter"
lineinfile:
path : "{{ app_instance_root }}/{{ param_file }}"
regexp: '^(.*)\$path_bin ='
line: '\1$path_bin = "{{ mercanet_bin_dir }}/static/request";'
state: present
backup: yes
backrefs: yes
loop: "{{ mercanet_request_files }}"
loop_control:
loop_var: param_file
tags:
- mercanetparams
- name: "Update mercanet pathfile parameter for response files"
lineinfile:
path : "{{ app_instance_root }}/{{ param_file }}"
regexp: '^(.*)\$pathfile='
line: '\1$pathfile="pathfile={{ mercanet_param_dir }}/pathfile";'
state: present
backup: yes
backrefs: yes
loop: "{{ mercanet_response_files }}"
loop_control:
loop_var: param_file
tags:
- mercanetparams
- name: "Update mercanet response parameter"
lineinfile:
path : "{{ app_instance_root }}/{{ param_file }}"
regexp: '^(.*)\$path_bin ='
line: '\1$path_bin = "{{ mercanet_bin_dir }}/static/response";'
state: present
backup: yes
backrefs: yes
loop: "{{ mercanet_response_files }}"
loop_control:
loop_var: param_file
tags:
- mercanetparams
- name: update mercanet logfile
lineinfile:
path : "{{ app_instance_root }}/m_call_autoresponse.php"
regexp: '^(.*)\$logfile='
line: '\1$logfile="{{ app_instance_root }}/mercanet/log/logfile.txt";'
state: present
backup: yes
backrefs: yes
tags:
- mercanetparams
- name: "Update mercanet pathfile F_DEFAULT"
lineinfile:
path : "{{ mercanet_param_dir }}/pathfile"
regexp: '^F_DEFAULT'
line: 'F_DEFAULT!{{ mercanet_param_dir }}/parmcom.mercanet!'
state: present
backup: yes
tags:
- mercanetparams
- name: "Update mercanet pathfile F_PARAM!"
lineinfile:
path : "{{ mercanet_param_dir }}/pathfile"
regexp: '^F_PARAM'
line: 'F_PARAM!{{ mercanet_param_dir }}/parmcom!'
state: present
backup: yes
tags:
- mercanetparams
- name: "Update mercanet pathfile F_CERTIFICATE!"
lineinfile:
path : "{{ mercanet_param_dir }}/pathfile"
regexp: '^F_CERTIFICATE'
line: 'F_CERTIFICATE!{{ mercanet_param_dir }}/certif!'
state: present
backup: yes
tags:
- mercanetparams
- name: "Update mercanet pathfile D_LOGO!"
lineinfile:
path : "{{ mercanet_param_dir }}/pathfile"
regexp: '^D_LOGO'
line: 'D_LOGO!/mercanet/logo/!'
state: present
backup: yes
tags:
- mercanetparams
ansible-paquerette-dev/roles/adm_instance/tasks/postinstall.yml 0000664 0000000 0000000 00000001143 14154403771 0025426 0 ustar 00root root 0000000 0000000 ---
- name: "remove {{ app_instance_root }}/dev directory"
file:
path: "{{ app_instance_root }}/dev"
state: absent
tags:
- postinstall
- name: "installs php requirements using 5.6 composer"
become_user: "{{ run_user }}"
command: /usr/local/bin/composer -n install
args:
chdir: "{{ app_instance_root }}/"
tags:
- postinstall
- name: Disable mysql strict mode
template:
src: "01-adm.j2"
dest: "/etc/mysql/conf.d/01-adm.cnf"
owner: "root"
mode: "0644"
notify: restart mariadb_mysql_server
tags:
- postinstall ansible-paquerette-dev/roles/adm_instance/tasks/restore.yml 0000664 0000000 0000000 00000005544 14154403771 0024546 0 ustar 00root root 0000000 0000000 ---
- name: "test sql dump file presence"
stat:
path: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.sql"
register: dump_file
tags:
- restore
- restoredb
- name: "restore adm mysql/mariadb database"
mysql_db:
name: "{{ database_name }}"
encoding: "utf8"
state: import
target: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ database_name }}.sql"
when: dump_file.stat.exists
tags:
- restore
- restoredb
- name: "test userfiles tar presence"
stat:
path: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/userfiles.tar"
register: userfiles
tags:
- restore
- name: "create {{ app_instance_root }}/userfiles"
file:
path: "{{ app_instance_root }}/userfiles"
state: directory
mode: 0777
owner: "{{ app_user }}"
group: "{{ app_group }}"
when: userfiles.stat.exists
tags:
- restore
- name: "restoring userfiles to {{ app_instance_root }}/userfiles"
unarchive:
src: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/userfiles.tar"
dest: "{{ app_instance_root }}/userfiles"
remote_src: True
owner: "{{ app_user }}"
group: "{{ app_group }}"
when: userfiles.stat.exists
tags:
- restore
- name: "test {{ var_tar_files }} presence"
stat:
path: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ var_tar_files }}"
register: file_var_tar_files
tags:
- restore
- restorevar
- name: create temporary download directory
tempfile:
state: directory
suffix: "_var_tar_files_new"
register: _tmp_var_tar_files
changed_when: False
when: file_var_tar_files.stat.exists
tags:
- restore
- restorevar
- name: "Unarchive {{ var_tar_files }} param to {{ _tmp_var_tar_files.path }}"
unarchive:
src: "/tmp/restore_{{ app_instance_id }}/{{ app_instance_id }}/{{ var_tar_files }}"
dest: "{{ _tmp_var_tar_files.path }}"
remote_src: True
when: file_var_tar_files.stat.exists
tags:
- restore
- restorevar
- name: "Restore {{ var_tar_files }} to {{ app_instance_root }}"
copy:
src: "{{ _tmp_var_tar_files.path }}/{{ directory }}"
dest: "{{ app_instance_root }}"
force: yes
remote_src: True
owner: "{{ app_user }}"
group: "{{ app_group }}"
loop:
- affiliations
- docs
- images
- userfiles
loop_control:
loop_var: directory
when: file_var_tar_files.stat.exists
tags:
- restore
- restorevar
- name: "remove {{ var_tar_files }} {{ _tmp_var_tar_files.path }} temporay location "
file:
path: "{{ _tmp_var_tar_files.path }}"
state: absent
when: _tmp_var_tar_files.path is defined
tags:
- restore
- restorevar ansible-paquerette-dev/roles/adm_instance/templates/ 0000775 0000000 0000000 00000000000 14154403771 0023201 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/templates/01-adm.j2 0000664 0000000 0000000 00000000025 14154403771 0024412 0 ustar 00root root 0000000 0000000 [mysqld]
sql_mode =
ansible-paquerette-dev/roles/adm_instance/templates/config.env.dev.inc.j2 0000664 0000000 0000000 00000000275 14154403771 0027023 0 ustar 00root root 0000000 0000000 isSMTP();
$mail->Host = 'localhost';
$mail->SMTPAuth = true;
$mail->Username = '';
$mail->Password = '';
$mail->Port=1025;
}
ansible-paquerette-dev/roles/adm_instance/templates/config.env.inc.j2 0000664 0000000 0000000 00000001210 14154403771 0026234 0 ustar 00root root 0000000 0000000 Mailer = '{{ mail_protocol | default() }}';
$mail->SMTPSecure = {{ mail_secure | default() }};
$mail->Host = '{{ mail_host | default() }}';
$mail->SMTPAuth = true;
$mail->Port = {{ mail_port | default() }};
$mail->Username = '{{ mail_username | default() }}';
$mail->Password = '{{ mail_password | default() }}';
$mail->addCustomHeader('X-Mailer', 'ADM Mailer');
}
ansible-paquerette-dev/roles/adm_instance/templates/docker-compose.yml.j2 0000664 0000000 0000000 00000000543 14154403771 0027152 0 ustar 00root root 0000000 0000000 version: '3'
services:
phpmyadmin:
image: phpmyadmin
ports:
- 8080:80
environment:
- PMA_ARBITRARY=1
extra_hosts:
- "mysql:172.18.0.1"
mailhog:
image: mailhog/mailhog
logging:
driver: 'none' # disable saving logs
ports:
- 1025:1025 # smtp server
- 8025:8025 # web ui
restart: "always"
ansible-paquerette-dev/roles/adm_instance/vars/ 0000775 0000000 0000000 00000000000 14154403771 0022156 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/adm_instance/vars/main.yml 0000664 0000000 0000000 00000000535 14154403771 0023630 0 ustar 00root root 0000000 0000000 ---
app_program: "Adm"
app_git_repo: "https://{{ git_user }}:{{ git_token }}@git.paquerette.eu/adm/adm.dev.paquerette.eu.git"
database_type: "mysql"
app_user_chrooted: no
mercanet_git_repo: "https://{{ git_user }}:{{ git_mercanet_token }}@git.paquerette.eu/adm/mercanet.git"
var_tar_files: var_files.tar.gz
install_type: "production"
ansible-paquerette-dev/roles/apache2_server/ 0000775 0000000 0000000 00000000000 14154403771 0021447 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/apache2_server/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0023247 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/apache2_server/handlers/main.yml 0000664 0000000 0000000 00000000200 14154403771 0024706 0 ustar 00root root 0000000 0000000 ---
- name: restart apache2
service: name=apache2 state=restarted
- name: reload monit
service: name=monit state=reloaded
ansible-paquerette-dev/roles/apache2_server/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022574 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/apache2_server/tasks/main.yml 0000664 0000000 0000000 00000006012 14154403771 0024242 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _app_log_inventory
vars:
log_type: "server"
server_name: "apache2"
- name: "install apache2 packages for {{ ansible_distribution_release }}"
apt:
name: "{{ apache2_package }}"
state: present
loop: "{{ apache_package_list }}"
loop_control:
loop_var: apache2_package
- name: " ServerName {{ apache_default_domain }}"
lineinfile:
line: "ServerName {{ apache_default_domain }}"
path: "/etc/apache2/apache2.conf"
state: present
backup: yes
notify: restart apache2
register: ServerName
- name: restart apache2
service: name=apache2 state=restarted
when: ServerName.changed
- name: "apache2 module for {{ ansible_distribution }}"
apache2_module:
name: "{{ item }}"
state: present
with_items: "{{ apache_module_list_debian }}"
notify: restart apache2
when: ansible_distribution == 'Debian'
- name: "apache2 module for {{ ansible_distribution_release }}"
apache2_module:
name: "{{ item }}"
state: present
with_items: "{{ apache_module_list_xenial }}"
notify: restart apache2
when: ansible_distribution_release == 'xenial'
- name: "apache2 module for {{ ansible_distribution_release }}"
apache2_module:
name: "{{ item }}"
state: present
with_items: "{{ apache_module_list_bionic }}"
notify: restart apache2
when: ansible_distribution_release == 'bionic'
- name: "ufw: Allow port 80"
ufw:
rule: allow
port: "80"
proto: tcp
- name: "ufw: Allow port 443"
ufw:
rule: allow
port: "443"
proto: tcp
- name: "Move /var/www {{ www_root }}"
command: "/bin/mv /var/www {{ www_root }}"
args:
creates: "{{ www_root }}"
- name: "link /var/www to {{ www_root }}"
file:
state: link
src: "{{ www_root }}"
path: "/var/www"
- name: "log dest {{ www_log }}"
file:
state: directory
path: "{{ www_log }}"
- name: "cron stop apache for backup"
cron:
name: "stop apache"
hour: "{{ backup_web_stop_hour }}"
minute: "{{ backup_web_stop_minute }}"
job: "/bin/systemctl stop apache2.service"
- name: "cron start apache for backup"
cron:
name: "start apache"
hour: "{{ backup_web_start_hour | mandatory }}"
minute: "{{ backup_web_start_minute | mandatory }}"
job: "/bin/systemctl start apache2.service"
- name: "template for backup"
template:
src: backupninja.apache.j2
dest: "{{ backup_item_dir }}/10-apache2.sh"
mode: 0640
- name: "monit.apache.j2"
template:
src: "monit.apache.j2"
dest: "/etc/monit/conf.d/apache.conf"
notify: reload monit
- name: "disable site 000-default.conf"
file:
state: absent
path: "/etc/apache2/sites-enabled/000-default.conf"
notify: restart apache2
- name: "disable site 000-default-le-ssl.conf"
file:
state: absent
path: "/etc/apache2/sites-enabled/000-default-le-ssl.conf"
notify: restart apache2 ansible-paquerette-dev/roles/apache2_server/templates/ 0000775 0000000 0000000 00000000000 14154403771 0023445 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/apache2_server/templates/backupninja.apache.j2 0000664 0000000 0000000 00000000401 14154403771 0027402 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_base_service_conf_time | mandatory }}
rsync -aAx --del /etc/apache2 {{ backup_prod_dir | mandatory}}
rc=$?; if [[ $rc != 0 ]]; then error "apache configuration" ; fi
ansible-paquerette-dev/roles/apache2_server/templates/monit.apache.j2 0000664 0000000 0000000 00000000347 14154403771 0026254 0 ustar 00root root 0000000 0000000 check process apache2 with pidfile /var/run/apache2/apache2.pid
start program = "/bin/systemctl start apache2.service"
stop program = "/bin/systemctl stop apache2.service"
if failed host 127.0.0.1 port 443 then restart ansible-paquerette-dev/roles/apache2_server/vars/ 0000775 0000000 0000000 00000000000 14154403771 0022422 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/apache2_server/vars/main.yml 0000664 0000000 0000000 00000001555 14154403771 0024077 0 ustar 00root root 0000000 0000000 ---
apache_default_domain: "{{ ansible_fqdn }}"
apache_package_list: [
"apache2",
"libapache2-mod-wsgi-py3",
"libapache2-mpm-itk",]
apache_module_list_xenial: [
"proxy",
"proxy_http",
"proxy_connect",
"proxy_wstunnel",
"ssl",
"dir",
"env",
"headers",
"mime",
"mpm_itk",
"proxy_fcgi",
"rewrite",
"setenvif",
"wsgi"
]
apache_module_list_bionic: [
"proxy",
"proxy_http",
"proxy_connect",
"proxy_wstunnel",
"ssl",
"dir",
"env",
"headers",
"mime",
"mpm_itk",
"proxy_fcgi",
"rewrite",
"setenvif",
"wsgi"
]
apache_module_list_debian: [
"proxy",
"proxy_http",
"proxy_connect",
"proxy_wstunnel",
"ssl",
"dir",
"env",
"headers",
"mime",
"proxy_fcgi",
"rewrite",
"setenvif",
"wsgi"
]
ansible-paquerette-dev/roles/base_platform/ 0000775 0000000 0000000 00000000000 14154403771 0021374 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_platform/README.md 0000664 0000000 0000000 00000000551 14154403771 0022654 0 ustar 00root root 0000000 0000000 # Configuration of platform :
- runs server components roles according to host vars inventory
- available components :
- apache2_server
- nginx_server
- mongodb_server
- postgres_server
- mariadb_mysql_server
- nodejs
- php7_fpm
requirements :
- role : base_server
- inventory : host vars
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/base_platform/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022521 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_platform/tasks/main.yml 0000664 0000000 0000000 00000002002 14154403771 0024162 0 ustar 00root root 0000000 0000000 ---
- name: "role nginx_server"
import_role:
name: "nginx_server"
when: (rev_proxy is defined) and (rev_proxy == "nginx")
- name: "role apache2_server"
import_role:
name: "apache2_server"
when: (rev_proxy is defined) and (rev_proxy == "apache2")
- name: "role postgres_server"
import_role:
name: "postgres_server"
when: (postgres_server is defined) and (postgres_server)
- name: "role mariadb_mysql_server"
import_role:
name: "mariadb_mysql_server"
when: (mariadb_mysql_server is defined) and (mariadb_mysql_server)
tags:
- mariadb
- name: "role mongodb_server"
import_role:
name: "mongodb_server"
when: (mongodb_server is defined) and (mongodb_server)
- name: "role nodejs"
import_role:
name: "nodejs"
when: (nodejs is defined) and (nodejs)
- name: "role php7_fpm"
import_role:
name: "php7_fpm"
when: (php_server is defined) and (rev_proxy is defined) and (php_version is defined) and (php_server)
ansible-paquerette-dev/roles/base_secure_ssh/ 0000775 0000000 0000000 00000000000 14154403771 0021713 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_secure_ssh/README.md 0000664 0000000 0000000 00000002002 14154403771 0023164 0 ustar 00root root 0000000 0000000 Secure ssh for a server
* disable root login
* add a sudoer admin account and group
* disable interactive login, only users with an installed key can log into the system.
Prerequisite :
* host created with the tower ssh key for user root
* host defined in the host file with the root account
This role has to be run after host provisionned with the tower key.
This role can also be launched even after a server is created and already in use.
```bash
# example
./play.py -ry vpstest.paquerette.eu base_secure_ssh
```
When role is runned, you have to update the ansible hosts inventory file with the admin account.
Check the server is correctly configured :
```bash
ansible vpstest -m ping
vpstest | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
```
***WARNING*** : Do not set ssh option **UsePAM** to No, as it will not be possible to log into the system any more. See http://arlimus.github.io/articles/usepam/
ansible-paquerette-dev/roles/base_secure_ssh/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0023522 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_secure_ssh/defaults/main.yml 0000664 0000000 0000000 00000000131 14154403771 0025164 0 ustar 00root root 0000000 0000000 admin_group: admin
admin_user: admin
admin_shell: /bin/bash
admin_key: "{{ admin_key }}"
ansible-paquerette-dev/roles/base_secure_ssh/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0023513 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_secure_ssh/handlers/main.yml 0000664 0000000 0000000 00000000135 14154403771 0025161 0 ustar 00root root 0000000 0000000 ---
- name: restart ssh daemon base_server
service:
name: "sshd"
state: restarted
ansible-paquerette-dev/roles/base_secure_ssh/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0023040 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_secure_ssh/tasks/allow_sudo.yml 0000664 0000000 0000000 00000000322 14154403771 0025730 0 ustar 00root root 0000000 0000000 ---
- name: "Add admin group to sudoers {{ admin_group }}"
lineinfile: dest=/etc/sudoers regexp="^%{{ admin_group }}" line="%{{ admin_group }} ALL=(ALL) NOPASSWD:ALL"
notify: restart ssh daemon base_server ansible-paquerette-dev/roles/base_secure_ssh/tasks/base_admin.yml 0000664 0000000 0000000 00000000621 14154403771 0025644 0 ustar 00root root 0000000 0000000 ---
- name: Create admin group {{ admin_group }}
group:
name: "{{ admin_group }}"
state: present
- name: Create admin user {{ admin_user }}
user:
name: "{{ admin_user }}"
group: "{{ admin_group }}"
shell: "{{ admin_shell }}"
state: present
- name: Add SSH-keys to admin user {{ admin_user }}
authorized_key:
user: "{{ admin_user }}"
key: "{{ admin_key }}"
ansible-paquerette-dev/roles/base_secure_ssh/tasks/disable_root_ssh.yml 0000664 0000000 0000000 00000001726 14154403771 0027114 0 ustar 00root root 0000000 0000000 ---
- name: Disable root login over SSH
lineinfile: dest=/etc/ssh/sshd_config regexp="^PermitRootLogin" line="PermitRootLogin no" state=present
notify:
- restart ssh daemon base_server
- name: Enable login using a public key
lineinfile: dest=/etc/ssh/sshd_config regexp="^PubkeyAuthentication" line="PubkeyAuthentication yes" state=present
notify:
- restart ssh daemon base_server
- name: Disable password login
lineinfile: dest=/etc/ssh/sshd_config regexp="^PasswordAuthentication" line="PasswordAuthentication no" state=present
notify:
- restart ssh daemon base_server
- name: Disable challenge authentication
lineinfile: dest=/etc/ssh/sshd_config regexp="^ChallengeResponseAuthentication" line="ChallengeResponseAuthentication no" state=present
notify:
- restart ssh daemon base_server
- name: Enable PAM
lineinfile: dest=/etc/ssh/sshd_config regexp="^UsePAM" line="UsePAM yes" state=present
notify:
- restart ssh daemon base_server
ansible-paquerette-dev/roles/base_secure_ssh/tasks/main.yml 0000664 0000000 0000000 00000000151 14154403771 0024504 0 ustar 00root root 0000000 0000000 ---
- import_tasks: base_admin.yml
- import_tasks: allow_sudo.yml
- import_tasks: disable_root_ssh.yml ansible-paquerette-dev/roles/base_server/ 0000775 0000000 0000000 00000000000 14154403771 0021056 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_server/README.md 0000664 0000000 0000000 00000001064 14154403771 0022336 0 ustar 00root root 0000000 0000000 # Basic configuration of server :
- file system organization
- locales
- firewall
- mailing configuration
- monitoring
- backup strategy
- user group for sftp only in chroot : {{ sftp_users_chroot }}
requirements :
- role : None
- inventory : group base_server ( and secrets ... )
Tags are defined for each tasks
- backup
- filesys
- letsencrypt
- mail
- monit
- apt
- ufw
- chroot
So you may just run the tasks you need to prepare your server
i.e.
```bash
ansible-playbook play.book.yml --tags=monit,backup
```
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/base_server/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0022665 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_server/defaults/main.yml 0000664 0000000 0000000 00000000350 14154403771 0024332 0 ustar 00root root 0000000 0000000 ---
ssh_sftp_only_chroot: |
Match Group {{ sftp_users_chroot }}
ChrootDirectory {{ base_chroot_jail }}/%u
AllowTCPForwarding no
X11Forwarding no
ForceCommand internal-sftp
Match Group *
_monit_ssl_option: "tls" ansible-paquerette-dev/roles/base_server/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0022656 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_server/handlers/main.yml 0000664 0000000 0000000 00000001253 14154403771 0024326 0 ustar 00root root 0000000 0000000 ---
- name: restart cron base_server
service: name=cron state=restarted
#- name: postmap /etc/postfix/canonical
# command: /usr/sbin/postmap /etc/postfix/canonical
# notify: restart postfix base_server
#
- name: postmap /etc/postfix/sasl_passwd base_server
command: /usr/sbin/postmap /etc/postfix/sasl_passwd
notify: restart postfix base_server
- name: newaliases base_server
command: newaliases
notify: restart postfix base_server
- name: restart postfix base_server
service: name=postfix state=restarted
- name: reload monit base server
service: name=monit state=reloaded
- name: restart ssh daemon base_server
service:
name: "ssh"
state: restarted
ansible-paquerette-dev/roles/base_server/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022203 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_server/tasks/base_backup.yml 0000664 0000000 0000000 00000002652 14154403771 0025172 0 ustar 00root root 0000000 0000000 ---
- name: "user for master backup"
user:
name: "{{ backup_master_user }}"
tags:
- backup
- name: "public key in authorized keys for master backup"
authorized_key:
user: "{{ backup_master_user }}"
state: present
key: "{{ lookup('file', lookup('env','HOME') + '/{{ master_backup_key_file }}' + '.pub') }}"
tags:
- backup
- name: 'set reportsuccess to no'
lineinfile:
path: "/etc/backupninja.conf"
regexp: '^reportsuccess'
line: 'reportsuccess = no'
state: present
tags:
- backup
- name: 'set reportinfo to yes'
lineinfile:
path: "/etc/backupninja.conf"
regexp: '^reportinfo'
line: 'reportinfo = yes'
state: present
tags:
- backup
- name: "backup production directory {{ backup_prod_dir }}"
file:
state: directory
path: "{{ backup_prod_dir }}"
tags:
- backup
- name: "backup template day"
template:
src: backup.day.j2
dest: "{{ backup_item_dir }}/50-day.sh"
mode: 0640
tags:
- backup
- name: "backup template month"
template:
src: backup.month.j2
dest: "{{ backup_item_dir }}/50-month.sh"
mode: 0640
tags:
- backup
- name: "backup template {{ base_prod_options }}"
template:
src: opt_backup_day.j2
dest: "{{ backup_item_dir }}/20-prod-opt.sh"
mode: 0640
tags:
- backup
ansible-paquerette-dev/roles/base_server/tasks/base_filesystem.yml 0000664 0000000 0000000 00000002433 14154403771 0026106 0 ustar 00root root 0000000 0000000 ---
- name: "directory and permissions on {{ base_prod_path }}"
file:
state: directory
path: "{{ base_prod_path }}"
mode: 0755
tags:
- filesys
- name: "{{ base_prod_ansible_log }}"
file:
state: directory
path: "{{ base_prod_ansible_log }}"
tags:
- filesys
- name: "{{ base_prod_options }}"
file:
state: directory
path: "{{ base_prod_options }}"
tags:
- filesys
- name: "Move /opt to {{ base_root_opt }}"
command: "/bin/mv /opt {{ base_root_opt }}"
args:
creates: "{{ base_root_opt }}"
tags:
- filesys
- name: "link /opt to {{ base_root_opt }}"
file:
state: link
src: "{{ base_root_opt }}"
path: "/opt"
tags:
- filesys
- name: "retention for tmp files"
cron:
name: "delete old tmp files"
weekday: "1"
hour: "5"
minute: "30"
job: "find /tmp -depth -mindepth 1 -mtime +{{ tmp_file_retention }} -delete >/dev/null 2>&1"
tags:
- filesys
- name: "retention for log files"
cron:
name: "delete old log files"
weekday: "1"
hour: "5"
minute: "30"
job: "find /var/log -depth -mindepth 1 -mtime +{{ log_file_retention }} -delete >/dev/null 2>&1"
tags:
- filesys
ansible-paquerette-dev/roles/base_server/tasks/base_letsencrypt.yml 0000664 0000000 0000000 00000002423 14154403771 0026275 0 ustar 00root root 0000000 0000000 ---
- name: "install certbot"
apt:
name: "certbot"
state: latest
update_cache: yes
tags:
- letsencrypt
- name: "letsencrypt_new install directory"
file:
state: directory
mode: 0777
path: "{{ base_prod_options }}/letsencrypt_new"
tags:
- letsencrypt
- name: "letsencrypt new certificates facility {{ base_prod_options }}/letsencrypt_new/letsencrypt_new.py"
template:
src: "letsencrypt_new.py.j2"
dest: "{{ base_prod_options }}/letsencrypt_new/letsencrypt_new.py"
mode: "755"
tags:
- letsencrypt
- name: "cron for automatic letsencrypt new certificates"
cron:
name: "letsencrypt certbot automatic new certificates"
hour: "{{ new_cert_standalone_hour | mandatory}}"
minute: "{{ new_cert_standalone_minute | mandatory }}"
job: '{{ base_prod_options }}/letsencrypt_new/letsencrypt_new.py'
tags:
- letsencrypt
- name: "cron for automatic letsencrypt renew"
cron:
name: "letsencrypt certbot automatic renew"
weekday: "{{ renew_cert_days }}"
hour: "{{ renew_cert_standalone_hour | mandatory}}"
minute: "{{ renew_cert_standalone_minute | mandatory }}"
job: '/usr/bin/certbot renew --quiet'
tags:
- letsencrypt
ansible-paquerette-dev/roles/base_server/tasks/base_mail.yml 0000664 0000000 0000000 00000006372 14154403771 0024652 0 ustar 00root root 0000000 0000000 ---
- name: 'set hostname to {{ ansible_hostname }}'
lineinfile:
path: "/etc/postfix/main.cf"
regexp: '^myhostname'
line: "myhostname = {{ ansible_fqdn }}"
state: present
backup: yes
notify: restart postfix base_server
tags:
- mail
- name: 'set myorigin to $myhostname'
lineinfile:
path: "/etc/postfix/main.cf"
regexp: '^myorigin'
line: "myorigin = $myhostname"
state: present
notify: restart postfix base_server
tags:
- mail
- name: 'set relayhost to {{ smtp_host}}'
lineinfile:
path: "/etc/postfix/main.cf"
regexp: '^relayhost'
line: "relayhost = [{{ smtp_host}}]:{{ smtp_port }}"
state: present
notify: restart postfix base_server
tags:
- mail
- name: 'bcc root mail copy'
lineinfile:
path: "/etc/postfix/main.cf"
line: "{{comment_for_copy_mail | mandatory}}always_bcc = {{ mail_in_copy }}"
state: present
notify: restart postfix base_server
tags:
- mail
- name: 'smtp_sasl_auth_enable = yes'
lineinfile:
path: "/etc/postfix/main.cf"
line: "smtp_sasl_auth_enable = yes"
state: present
notify: restart postfix base_server
tags:
- mail
- name: 'smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd'
lineinfile:
path: "/etc/postfix/main.cf"
line: "smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd"
state: present
notify: restart postfix base_server
tags:
- mail
- name: 'smtp_sasl_security_options = noanonymous'
lineinfile:
path: "/etc/postfix/main.cf"
line: "smtp_sasl_security_options = noanonymous"
state: present
notify: restart postfix base_server
tags:
- mail
- name: '/etc/postfix/sasl_passwd'
lineinfile:
path: "/etc/postfix/sasl_passwd"
line: "[{{ smtp_host }}]:{{ smtp_port }} {{ smtp_user }}:{{ smtp_password }}"
state: present
create: yes
mode: 0600
notify: postmap /etc/postfix/sasl_passwd base_server
tags:
- mail
# - name: 'canonical_maps = hash:/etc/postfix/canonical'
# lineinfile:
# path: "/etc/postfix/main.cf"
# line: "canonical_maps = hash:/etc/postfix/canonical"
# state: present
# notify: restart postfix base_server
#
# - name: '/etc/postfix/canonical'
# lineinfile:
# path: "/etc/postfix/canonical"
# line: root@{{ansible_hostname}} {{ base_postmaster }}
# state: present
# create: yes
# mode: 0600
# notify: postmap /etc/postfix/canonical
#
# - name: '/etc/postfix/canonical'
# lineinfile:
# path: "/etc/postfix/canonical"
# line: root@localhost.localdomain {{ base_postmaster }}
# state: present
# create: yes
# mode: 0600
# notify: postmap /etc/postfix/canonical
# - name: '/etc/postfix/canonical'
# lineinfile:
# path: "/etc/postfix/canonical"
# line: "\\.* {{ base_postmaster }}"
# state: present
# create: yes
# mode: 0600
# notify: postmap /etc/postfix/canonical
#
#
- name: 'alias for root {{ base_postmaster }}'
lineinfile:
path: "/etc/aliases"
regexp: "^root"
line: "root: {{ base_postmaster }}"
state: present
notify: newaliases base_server
tags:
- mail
ansible-paquerette-dev/roles/base_server/tasks/base_monitoring.yml 0000664 0000000 0000000 00000003117 14154403771 0026107 0 ustar 00root root 0000000 0000000 ---
- name: 'set monit cycle to {{ monit_cycle_duration }} seconds'
lineinfile:
path: "/etc/monit/monitrc"
regexp: "^[\t ]*set daemon"
line: "set daemon {{ monit_cycle_duration }} start delay {{ monit_start_delay }}"
state: present
notify: reload monit base server
tags:
- monit
- name: "base configuration for system monitoring - single partition"
template:
src: "monit.conf.single.j2"
dest: "/etc/monit/conf.d/base.conf"
when: not data_partition
notify: reload monit base server
tags:
- monit
- name: "base configuration for system monitoring - dual partition"
template:
src: "monit.conf.dual.j2"
dest: "/etc/monit/conf.d/base.conf"
when: data_partition
notify: reload monit base server
tags:
- monit
- name: "cron stop monit for backup"
cron:
name: "stop monit"
hour: "{{ backup_monit_stop_hour }}"
minute: "{{ backup_monit_stop_minute }}"
job: "/bin/systemctl stop monit.service"
tags:
- monit
- name: "cron start monit after backup"
cron:
name: "start monit"
hour: "{{ backup_monit_start_hour | mandatory }}"
minute: "{{ backup_monit_start_minute | mandatory }}"
job: "/bin/systemctl start monit.service"
tags:
- monit
- name: "cron start monit anyway"
cron:
name: "start monit anyway"
hour: "{{ monit_start_anyway_hour | mandatory }}"
minute: "{{ monit_start_anyway_minute | mandatory }}"
job: "/bin/systemctl start monit.service >/dev/null 2>&1"
tags:
- monit
ansible-paquerette-dev/roles/base_server/tasks/base_packages.yml 0000664 0000000 0000000 00000001523 14154403771 0025477 0 ustar 00root root 0000000 0000000 ---
- name: "add backports repository"
apt_repository:
repo: "deb {{debian_backports_uri}} {{debian_backports_components}}"
state: "present"
update_cache: "True"
when: ansible_distribution == "Debian"
tags:
- apt
- name: "Apt update"
apt:
update_cache: "True"
tags:
- apt
- name: "base packages"
apt:
name: "{{ base_package }}"
state: present
loop: "{{ base_packages_list }}"
loop_control:
loop_var: base_package
tags:
- apt
- name: "Upgrade system safe"
apt:
update_cache: "True"
upgrade: "True"
tags:
- apt
- name: "Ufw limit for ssh"
ufw:
rule: limit
port: ssh
proto: tcp
tags:
- ufw
- name: "Allow OpenSSH"
ufw:
rule: allow
name: OpenSSH
tags:
- ufw
- name: "Start firewall ufw"
ufw:
state: enabled
tags:
- ufw
ansible-paquerette-dev/roles/base_server/tasks/base_users.yml 0000664 0000000 0000000 00000001405 14154403771 0025061 0 ustar 00root root 0000000 0000000 ---
- name: "Ensure {{ sftp_users_chroot }} group exists"
group:
name: "{{ sftp_users_chroot }}"
state: present
tags:
- chroot
- name: "Ensure SSHD config uses internal-sftp"
lineinfile:
path: /etc/ssh/sshd_config
regexp: "^Subsystem[ \\t]+sftp"
line: "Subsystem sftp internal-sftp"
state: present
notify: restart ssh daemon base_server
tags:
- chroot
- name: "Ensure SSHD config contains sftp only directive"
blockinfile:
path: /etc/ssh/sshd_config
block: "{{ ssh_sftp_only_chroot }}"
insertafter: EOF
notify: restart ssh daemon base_server
tags:
- chroot
- name: "Ensure base chroot exists {{ base_chroot_jail }}"
file:
state: directory
path: "{{ base_chroot_jail }}"
tags:
- chroot
ansible-paquerette-dev/roles/base_server/tasks/local_settings.yml 0000664 0000000 0000000 00000000332 14154403771 0025736 0 ustar 00root root 0000000 0000000 ---
- locale_gen:
name: fr_FR.UTF-8
state: present
- locale_gen:
name: en_US.UTF-8
state: present
- name: set timezone to Paris
timezone:
name: "Europe/Paris"
notify: restart cron base_server ansible-paquerette-dev/roles/base_server/tasks/main.yml 0000664 0000000 0000000 00000000652 14154403771 0023655 0 ustar 00root root 0000000 0000000 ---
- import_tasks: base_packages.yml
- import_tasks: base_filesystem.yml
- import_tasks: local_settings.yml
- import_tasks: base_backup.yml
- import_tasks: base_monitoring.yml
- import_tasks: base_mail.yml
- import_tasks: base_letsencrypt.yml
- import_tasks: base_users.yml
- import_role:
name: _python3
- import_role:
name: _master_backup_server
when: backup_slaves is defined and backup_slaves != [] ansible-paquerette-dev/roles/base_server/templates/ 0000775 0000000 0000000 00000000000 14154403771 0023054 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/base_server/templates/backup.day.j2 0000664 0000000 0000000 00000000617 14154403771 0025336 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_day_conf_time }}
export LANG=en_US.utf8
day_name=$(date "+%A")
tar -czf {{ backup_base_dir }}/prod.$day_name.tar.gz {{ backup_prod_dir }}
chown {{ backup_master_user }}:{{ backup_master_user }} {{ backup_base_dir }}/prod.$day_name.tar.gz
rc=$?; if [[ $rc != 0 ]]; then error "$day_name prod archive" ; fi
ansible-paquerette-dev/roles/base_server/templates/backup.month.j2 0000664 0000000 0000000 00000000732 14154403771 0025704 0 ustar 00root root 0000000 0000000 # Note: the spaces around the equal sign ('=') are optional.
when = 01 at {{ backup_month_conf_time }}
# rsync -aAx --del {{ backup_prod_dir }}/ {{ backup_base_dir }}/Month
# rc=$?; if [[ $rc != 0 ]]; then error "mensuelle prod rsync" ; fi
tar -czf {{ backup_base_dir }}/prod.Month.tar.gz {{ backup_prod_dir }}
chown {{ backup_master_user }}:{{ backup_master_user }} {{ backup_base_dir }}/prod.Month.tar.gz
rc=$?; if [[ $rc != 0 ]]; then error "Month prod archive" ; fi
ansible-paquerette-dev/roles/base_server/templates/letsencrypt_new.py.j2 0000775 0000000 0000000 00000002627 14154403771 0027177 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import subprocess
import os
import argparse
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.description = \
"""
Send request to letsencrypt for new certificates in standalone mode,
requests are stored in ./newcertificates file.
File format : : e.g.
www.mydomain.com:postmaster@mydomain.com
"""
args = arg_parser.parse_args()
current_dir = os.path.dirname(os.path.realpath(__file__))
in_file = current_dir + "/new_certificates"
if not os.path.exists(in_file):
exit(0)
with open(in_file, 'r') as requests:
for request in requests.readlines():
domain, mail = request.rstrip().split(':')
try:
cmd = "certbot certonly --standalone --agree-tos -n -m {1} -d {0} ".format(domain, mail).split()
rc = subprocess.run(cmd, check=True)
os.system("echo '{0}:{1} ok' >> {2}/new_certificates.log".format(domain, mail, current_dir))
except subprocess.CalledProcessError:
os.system("echo '{0}:{1} nok' >> {2}/new_certificates.log".format(domain, mail, current_dir))
except Exception as e:
os.system("echo '{0}:{1} error {2}' >> {3}/new_certificates.log".format(domain, mail, e, current_dir))
raise e
os.remove(in_file)
ansible-paquerette-dev/roles/base_server/templates/monit.conf.dual.j2 0000664 0000000 0000000 00000002706 14154403771 0026314 0 ustar 00root root 0000000 0000000 #set mailserver localhost
set mailserver {{ smtp_host }} port {{ smtp_port }}
USERNAME {{ smtp_user }} PASSWORD {{ smtp_password }}
using {{ _monit_ssl_option }} with timeout 30 seconds
set alert {{ base_postmaster | mandatory }} not on { instance, action }
{{ comment_for_copy_mail | mandatory }}set alert {{ mail_in_copy }} not on { instance, action }
set mail-format {
from: {{ base_postmaster }}
subject: [Monit - {{ alert_group }} - {{ ansible_hostname }}] $SERVICE $EVENT at $DATE
message: Monit $ACTION $SERVICE at $DATE on $HOST: $DESCRIPTION.
Yours sincerely,
monit
}
set httpd port 2812 and
use address localhost
allow localhost
check system $HOST
if loadavg (5min) > 3 for 20 cycles then alert
if cpu usage > {{monit_system_cpu_usage }}% for 20 cycles then alert
if memory usage > {{monit_system_memory_usage }}% for 20 cycles then alert
if swap usage > {{monit_system_swap_usage }}% for 20 cycles then alert
check filesystem rootfs with path /
if space usage > {{monit_system_root_partition_usage }}% then alert
check filesystem datafs with path {{ base_prod_path }}
start program = "/bin/mount {{ base_prod_path }}"
stop program = "/bin/umount {{ base_prod_path }}"
if space usage > {{ monit_system_data_partition_usage }}% for 5 times within 15 cycles then alert
if space usage > 99% then stop
if inode usage > 99% then stop
if changed fsflags then alert
ansible-paquerette-dev/roles/base_server/templates/monit.conf.single.j2 0000664 0000000 0000000 00000002173 14154403771 0026646 0 ustar 00root root 0000000 0000000 #set mailserver localhost
set mailserver {{ smtp_host }} port {{ smtp_port }}
USERNAME {{ smtp_user }} PASSWORD {{ smtp_password }}
using {{ _monit_ssl_option }}
with timeout 30 seconds
set alert {{ base_postmaster | mandatory }} not on { instance, action }
{{ comment_for_copy_mail | mandatory }}set alert {{ mail_in_copy }} not on { instance, action }
set mail-format {
from: {{ base_postmaster }}
subject: [Monit - {{ alert_group }} - {{ ansible_hostname }}] $SERVICE $EVENT at $DATE
message: Monit $ACTION $SERVICE at $DATE on $HOST: $DESCRIPTION.
Yours sincerely,
monit
}
set httpd port 2812 and
use address localhost
allow localhost
check system $HOST
if loadavg (1min) > 2 for 20 cycles then alert
if loadavg (5min) > 1.5 for 10 cycles then alert
if cpu usage > {{monit_system_cpu_usage }}% for 5 cycles then alert
if memory usage > {{monit_system_memory_usage }}% for 5 cycles then alert
if swap usage > {{monit_system_swap_usage }}% then alert
check filesystem rootfs with path /
if space usage > {{monit_system_root_partition_usage }}% then alert
ansible-paquerette-dev/roles/base_server/templates/opt_backup_day.j2 0000664 0000000 0000000 00000000724 14154403771 0026300 0 ustar 00root root 0000000 0000000
# Note: the spaces around the equal sign ('=') are optional.
when = everyday at {{ backup_app_service_conf_time | mandatory }}
rsync -aAx --del {{ base_prod_options }}/ {{ backup_opt_dir | mandatory}}
rc=$?; if [[ $rc != 0 ]]; then error "paquerette server prod/opt" ; fi
# too big when collabora is installed
# rsync -aAx --del {{ base_root_opt }}/ {{ backup_root_opt_dir | mandatory}}
# rc=$?; if [[ $rc != 0 ]]; then error "paquerette server prod/opt-root" ; fi
ansible-paquerette-dev/roles/bbb_instance/ 0000775 0000000 0000000 00000000000 14154403771 0021167 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/bbb_instance/README.md 0000664 0000000 0000000 00000001160 14154403771 0022444 0 ustar 00root root 0000000 0000000 # Big Blue Button Application
This role is only installing a script to do the [greenlight](https://docs.bigbluebutton.org/greenlight/gl-overview.html) database backup.
vars :
- **postgres_docker_server**
- **postgres_docker_image**
- **app_instance_id**
- **database_password**
- **database_docker_name**
- **database_docker_network**
Server has to be prepared using roles
- base_secure_ssh
- base_server
Big Blue Button, is installed using the provided [installation script](https://github.com/bigbluebutton/bbb-install/blob/master/bbb-install.sh)
See [Big Blue Button documentation](https://docs.bigbluebutton.org/) ansible-paquerette-dev/roles/bbb_instance/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0022776 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/bbb_instance/defaults/main.yml 0000664 0000000 0000000 00000000040 14154403771 0024437 0 ustar 00root root 0000000 0000000 database_type: "postgres_docker" ansible-paquerette-dev/roles/bbb_instance/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0022314 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/bbb_instance/tasks/install.yml 0000664 0000000 0000000 00000001004 14154403771 0024500 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _app_backup
- name: Replace landing page
get_url:
url: https://git.paquerette.eu/paquerette/infrastructure/bbb_paquerette/-/raw/master/index.html
dest: /var/www/bigbluebutton-default/index.html
mode: '0644'
- name: Add paquerette logo
get_url:
url: https://git.paquerette.eu/paquerette/infrastructure/bbb_paquerette/-/raw/master/images/logo-paquerette.png
dest: /var/www/bigbluebutton-default/images/logo-paquerette.png
mode: '0644'
ansible-paquerette-dev/roles/bbb_instance/tasks/main.yml 0000664 0000000 0000000 00000000115 14154403771 0023760 0 ustar 00root root 0000000 0000000 ---
- import_tasks: install.yml
when: app_run in ['install', 'reinstall']
ansible-paquerette-dev/roles/collabora_online_instance/ 0000775 0000000 0000000 00000000000 14154403771 0023744 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/collabora_online_instance/README.md 0000664 0000000 0000000 00000001152 14154403771 0025222 0 ustar 00root root 0000000 0000000 # Collabora Online Instance :
- for Collabora partners with partner key
- based on collabora packages so only one instance is allowed on a single server
- vars :
- collabora_version: **4**
- collabora_distro_repo: **ubuntu1804** / ubuntu1604...
- **collabora_domain**
- **collabora_client_domains** : list of client domains
- collabora_port: **9980**
- platform roles :
- apache2 / nginx
to run install :
./play.py -r myhost collabora_online_instance
to run reinstall :
./play.py -r myhost collabora_online_instance -e 'app_run=reinstall'
[paquerette.eu](http://paquerette.eu) ansible-paquerette-dev/roles/collabora_online_instance/defaults/ 0000775 0000000 0000000 00000000000 14154403771 0025553 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/collabora_online_instance/defaults/main.yml 0000664 0000000 0000000 00000001472 14154403771 0027226 0 ustar 00root root 0000000 0000000 ---
app_program: "Collabora"
collabora_version: '4'
collabora_port: 9980
app_domain: "{{ collabora_domain }}"
app_instance_id: "{{ app_domain }}"
# ubuntu1604, ubuntu1804, ...
collabora_distro_repo: "ubuntu1604"
app_run: "install"
log_type: "install"
app_version: "{{ collabora_version }}"
app_port: "{{ collabora_port }}"
app_instance_root: "{{ base_root_opt }}"
loolwsd_log_dest: "{{ base_prod_path }}/log/loolwsd"
# fatal, critical, error, warning, notice, information, debug, trace
collabora_log_level: "error"
#collabora_admin_user: "admin"
#collabora_admin_password: "admin_password"
hunspell_module_list: [
"hunspell-fr",
"hunspell-en-us",
"hunspell-en-gb",
]
monit_request: "https://{{ collabora_domain }}/hosting/discovery"
monit_expect: "discovery"
ansible-paquerette-dev/roles/collabora_online_instance/handlers/ 0000775 0000000 0000000 00000000000 14154403771 0025544 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/collabora_online_instance/handlers/main.yml 0000664 0000000 0000000 00000000272 14154403771 0027214 0 ustar 00root root 0000000 0000000 ---
- name: reload nginx collabora_online_instance
service: name=nginx state=reloaded
- name: reload apache2 collabora_online_instance
service: name=apache2 state=reloaded
ansible-paquerette-dev/roles/collabora_online_instance/tasks/ 0000775 0000000 0000000 00000000000 14154403771 0025071 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/collabora_online_instance/tasks/main.yml 0000664 0000000 0000000 00000007310 14154403771 0026541 0 ustar 00root root 0000000 0000000 ---
- import_role:
name: _app_log_inventory
- import_role:
name: _letsencrypt_certificate
- name: "log dest {{ loolwsd_log_dest }}"
file:
state: directory
path: "{{ loolwsd_log_dest }}"
mode: "0777"
- name: "apt key for collabora - paquerette"
apt_key: keyserver="keyserver.ubuntu.com" id="0C54D189F4BA284D"
- name: "apt_repository for collabora - paquerette"
apt_repository:
repo: "deb https://collaboraoffice.com/repos/CollaboraOnline/{{ collabora_version }}/customer-{{ collabora_distro_repo }}-{{ collabora_secret }} ./"
state: present
- name: "install hunspell"
apt:
name: "{{ hunspell_module_list }}"
state: present
update_cache: yes
- name: "install loolwsd"
apt:
name: loolwsd
state: latest
update_cache: yes
- name: "install collabora-online-brand"
apt:
name: collabora-online-brand
state: latest
update_cache: yes
- name: "copy letsencrypt cert.pem"
copy:
src: "/etc/letsencrypt/live/{{ collabora_domain }}/cert.pem"
dest: "/etc/loolwsd/cert.pem"
mode: "0744"
remote_src: yes
- name: "copy letsencrypt privkey.pem"
copy:
src: "/etc/letsencrypt/live/{{ collabora_domain }}/privkey.pem"
dest: "/etc/loolwsd/key.pem"
mode: "0744"
remote_src: yes
- name: "copy letsencrypt chain.pem"
copy:
src: "/etc/letsencrypt/live/{{ collabora_domain }}/chain.pem"
dest: "/etc/loolwsd/chain.pem"
mode: "0744"
remote_src: yes
- name: "copy letsencrypt fullchain.pem"
copy:
src: "/etc/letsencrypt/live/{{ collabora_domain }}/fullchain.pem"
dest: "/etc/loolwsd/ca-chain.cert.pem"
mode: "0744"
remote_src: yes
- name: "ensure presence of {{ base_prod_options }}/collabora/"
file:
path: "{{ base_prod_options }}/collabora/"
state: directory
- name: "script for copy certificates after renew"
template:
src: letsencrypt_cert_collabora.j2
dest: "{{ base_prod_options }}/collabora/collabora-renew-cert.sh"
mode: 0700
backup: yes
- name: "cron for copy certificates after renew"
cron:
name: "collabora : copy certificates after renew"
weekday: "1"
hour: "{{ renew_cert_standalone_hour }}"
minute: "{{ renew_cert_copy_minute }}"
job: "{{ base_prod_options }}/collabora/collabora-renew-cert.sh"
- name: "template for loolwsd.xml"
template:
src: loolwsd_xml.j2
dest: "/etc/loolwsd/loolwsd.xml"
backup: yes
- name: Replace lool log file destination in service.
replace:
path: /lib/systemd/system/loolwsd.service
regexp: '\/var\/log.*$'
replace: '/mnt/vdb/log'
- name: "reload and restart loolwsd service"
systemd:
state: restarted
daemon_reload: yes
name: loolwsd
- name: "template nginx_collabora.j2 {{ collabora_domain }}"
template:
src: nginx_collabora.j2
dest: "/etc/nginx/sites-available/{{ collabora_domain }}.conf"
notify: reload nginx collabora_online_instance
when: rev_proxy == "nginx"
- name: "template apache2_collabora.j2 {{ collabora_domain }}"
template:
src: apache2_collabora.j2
dest: "/etc/apache2/sites-available/{{ collabora_domain }}.conf"
notify: reload apache2 collabora_online_instance
when: rev_proxy == "apache2"
- name: "enable site for {{ collabora_domain }}"
file:
state: link
path: "/etc/{{ rev_proxy }}/sites-enabled/{{ collabora_domain }}.conf"
src: "/etc/{{ rev_proxy }}/sites-available/{{ collabora_domain }}.conf"
notify: reload {{ rev_proxy }} collabora_online_instance
- import_role:
name: _app_logrotate
- import_role:
name: _app_monit
ansible-paquerette-dev/roles/collabora_online_instance/templates/ 0000775 0000000 0000000 00000000000 14154403771 0025742 5 ustar 00root root 0000000 0000000 ansible-paquerette-dev/roles/collabora_online_instance/templates/apache2_collabora.j2 0000664 0000000 0000000 00000005070 14154403771 0031522 0 ustar 00root root 0000000 0000000
ServerName {{ collabora_domain | mandatory }}:443
SetEnvIFNoCase User-Agent "Monit" dontlog
CustomLog {{ www_log | mandatory }}/{{ collabora_domain }}/access.log combined env=!dontlog
ErrorLog {{ www_log | mandatory }}/{{ collabora_domain }}/error.log
# SSL configuration, you may want to take the easy route instead and use Lets Encrypt!
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/{{ collabora_domain }}/cert.pem
SSLCertificateChainFile /etc/letsencrypt/live/{{ collabora_domain }}/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/{{ collabora_domain }}/privkey.pem
SSLProtocol all -SSLv2 -SSLv3
SSLCipherSuite ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS
SSLHonorCipherOrder on
# Encoded slashes need to be allowed
AllowEncodedSlashes NoDecode
# Container uses a unique non-signed certificate
SSLProxyEngine On
SSLProxyVerify None
SSLProxyCheckPeerCN Off
SSLProxyCheckPeerName Off
# keep the host
ProxyPreserveHost On
# static html, js, images, etc. served from loolwsd
# loleaflet is the client part of LibreOffice Online
ProxyPass /loleaflet https://127.0.0.1:{{ collabora_port | mandatory }}/loleaflet retry=0
ProxyPassReverse /loleaflet https://127.0.0.1:{{ collabora_port }}/loleaflet
# WOPI discovery URL
ProxyPass /hosting/discovery https://127.0.0.1:{{ collabora_port }}/hosting/discovery retry=0
ProxyPassReverse /hosting/discovery https://127.0.0.1:{{ collabora_port }}/hosting/discovery
# Main websocket
ProxyPassMatch "/lool/(.*)/ws$" wss://127.0.0.1:{{ collabora_port }}/lool/$1/ws nocanon
# Admin Console websocket
ProxyPass /lool/adminws wss://127.0.0.1:{{ collabora_port }}/lool/adminws
# Download as, Fullscreen presentation and Image upload operations
ProxyPass /lool https://127.0.0.1:{{ collabora_port }}/lool
ProxyPassReverse /lool https://127.0.0.1:{{ collabora_port }}/lool
ansible-paquerette-dev/roles/collabora_online_instance/templates/collabora_logrotate_apache2.j2 0000664 0000000 0000000 00000001130 14154403771 0033573 0 ustar 00root root 0000000 0000000 {{ www_log | mandatory }}/{{ collabora_domain }}/*.log {
weekly
missingok
rotate 54
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
prerotate
if [ -d /etc/logrotate.d/httpd-prerotate ]; then \
run-parts /etc/logrotate.d/httpd-prerotate; \
fi \
endscript
postrotate
if /etc/init.d/apache2 status > /dev/null ; then \
/etc/init.d/apache2 reload > /dev/null; \
fi;
endscript
}
ansible-paquerette-dev/roles/collabora_online_instance/templates/collabora_logrotate_nginx.j2 0000664 0000000 0000000 00000000774 14154403771 0033430 0 ustar 00root root 0000000 0000000 {{ www_log | mandatory }}/{{ collabora_domain }}/*.log {
weekly
missingok
rotate 54
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
prerotate
if [ -d /etc/logrotate.d/httpd-prerotate ]; then \
run-parts /etc/logrotate.d/httpd-prerotate; \
fi \
endscript
postrotate
invoke-rc.d nginx rotate >/dev/null 2>&1
endscript
}
ansible-paquerette-dev/roles/collabora_online_instance/templates/letsencrypt_cert_collabora.j2 0000664 0000000 0000000 00000001145 14154403771 0033607 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Copy certificates files from lestencrypt to /etc/loowsd after renew of {{ collabora_domain }}
cp "/etc/letsencrypt/live/{{ collabora_domain }}/cert.pem" "/etc/loolwsd/cert.pem"
chmod 0744 "/etc/loolwsd/cert.pem"
cp "/etc/letsencrypt/live/{{ collabora_domain }}/privkey.pem" "/etc/loolwsd/key.pem"
chmod 0744 "/etc/loolwsd/key.pem"
cp "/etc/letsencrypt/live/{{ collabora_domain }}/chain.pem" "/etc/loolwsd/chain.pem"
chmod 0744 "/etc/loolwsd/chain.pem"
cp "/etc/letsencrypt/live/{{ collabora_domain }}/fullchain.pem" "/etc/loolwsd/ca-chain.cert.pem"
chmod 0744 "/etc/loolwsd/ca-chain.cert.pem"
ansible-paquerette-dev/roles/collabora_online_instance/templates/loolwsd_xml.j2 0000664 0000000 0000000 00000030707 14154403771 0030551 0 ustar 00root root 0000000 0000000
de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru
{{ collabora_domain }}
1
4
true
3600
30
300
0
0
8000
0
0
100
60
900
loleaflet.html
true
{{ collabora_log_level }}
{{ loolwsd_log_dest }}/loolwsd.log
never
timestamp
true
10 days
10
true
false
false
false
false
false
all
any
192\.168\.[0-9]{1,3}\.[0-9]{1,3}
::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3}
127\.0\.0\.1
::ffff:127\.0\.0\.1
::1
true
false
/etc/loolwsd/cert.pem
/etc/loolwsd/key.pem
/etc/loolwsd/ca-chain.cert.pem
1000
true
true
localhost
{% for item in collabora_client_domains %}
{{ item }}
{% endfor %}
10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}
172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}
172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}
172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}
192\.168\.[0-9]{1,3}\.[0-9]{1,3}
192\.168\.1\.1
0
localhost
true
true
false