Commit 5d47d267 authored by Maiken's avatar Maiken
Browse files

Uploading also the elasticluster playbooks I am using for reference, but this...

Uploading also the elasticluster playbooks I am using for reference, but this should be gotten from elasticluster repo
parent a3084dda
Pipeline #4977 failed with stages
---
#
# This playbook is for site-local customization to ElastiCluster's
# playbooks. It runs *after* any other playbook distributed with
# ElastiCluster has gotten its chance to run.
#
# An empty playbook is checked into the Git repository. If you make
# any local modifications, please run `git update-index
# --assume-unchanged after.yml` to avoid committing them accidentally
# into ElastiCluster's main branch.
#
- name: Apply local customizations (after)
tags:
- after
- local
hosts: all
# by default these are no-op (empty task list)
roles: []
tasks: []
---
#
# This playbook is for site-local customization to ElastiCluster's
# playbooks. It runs *after* any other playbook distributed with
# ElastiCluster has gotten its chance to run.
#
# An empty playbook is checked into the Git repository. If you make
# any local modifications, please run `git update-index
# --assume-unchanged after.yml` to avoid committing them accidentally
# into ElastiCluster's main branch.
# the nfs-server coincides with slurm-master
# the nfs-client coincides with slurm-worker
- hosts: slurm_master
tags:
- after
vars:
NFS_EXPORTS:
- path: '/wlcg/session'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/runtime'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/cache'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
tasks:
- name: After - Ensure shared dirs exist on nfs server
file:
path: '{{ item.path }}'
state: directory
with_items: '{{ NFS_EXPORTS }}'
- name: After - roles for nfs-server
include_role:
name: 'nfs-server'
- hosts: slurm_worker
tags:
- after
vars:
NFS_MOUNTS:
- fs: '{{groups.slurm_master[0]}}:/wlcg/session'
mountpoint: '/wlcg/session'
options: 'rw,async'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/runtime'
mountpoint: '/wlcg/runtime'
options: 'rw,async'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/cache'
mountpoint: '/wlcg/cache'
options: 'rw,async'
state: 'present'
tasks:
- name: 'ensure {{ item. mountpoint }} directory exists and owned by user'
file:
path='{{ item.mountpoint }}'
state=directory
group=root
owner=root
with_items: '{{ NFS_MOUNTS }}'
- name: After - mount nfs shares
mount:
name='{{item.mountpoint}}'
src='{{item.fs}}'
fstype=nfs
opts='{{item.options|default("rw,async")}}'
state=mounted
with_items: '{{ NFS_MOUNTS }}'
- name: After - nfs-client - add to fstab
include_role:
name: 'nfs-client'
- name: After - Restart SLURMd after all config is done
service:
name=slurmd
state=restarted
when: '{{is_debian_compatible}} and ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})'
service:
name=slurm-llnl
state=restarted
when: '{{is_debian_compatible}} and (not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}))'
service:
name=slurmd
state=restarted
when: '{{is_rhel7_compatible}}'
service:
name=slurm
state=restarted
when: '{{is_rhel6_compatible}}'
#### For grid jobs must comment out VSizeFactor entry in slurm config
#not tested!
- hosts: slurm_master
tasks:
- name: Comment out the VSizeFactor for grid jobs
lineinfile:
path: /etc/slurm/slurm.conf
regexp: '^VSizeFactor'
line: '\#VSizeFactor'
backup: yes
###########
## not working as of now
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach wlcg volume to frontend host
# os_server_volume:
# state: present
# server: '{{ inventory_hostname }}'
# volume: wlcg
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach cvmfs-cache volume to compute node(s)
# os_server_volume:
# state: present
# server: '{{ ansible_hostname }}'
# volume: cvmfs-cache
---
#
# This playbook is for site-local customization to ElastiCluster's
# playbooks. It runs *after* any other playbook distributed with
# ElastiCluster has gotten its chance to run.
#
# An empty playbook is checked into the Git repository. If you make
# any local modifications, please run `git update-index
# --assume-unchanged after.yml` to avoid committing them accidentally
# into ElastiCluster's main branch.
# the nfs-server coincides with slurm-master
# the nfs-client coincides with slurm-worker
- hosts: slurm_master
tags:
- after
- local
vars:
NFS_EXPORTS:
- path: '/wlcg/session'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/runtime'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/cache'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
tasks:
- name: After - Ensure shared dirs exist on nfs server
file:
path: '{{ item.path }}'
state: directory
with_items: '{{ NFS_EXPORTS }}'
- name: After - roles for nfs-server
include_role:
name: 'nfs-server'
- hosts: slurm_worker
tags:
- after
- local
vars:
NFS_MOUNTS:
- fs: '{{groups.slurm_master[0]}}:/wlcg/session'
mountpoint: '/wlcg/session'
options: 'rw,async,user,exec'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/runtime'
mountpoint: '/wlcg/runtime'
options: 'rw,async,user,exec'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/cache'
mountpoint: '/wlcg/cache'
options: 'rw,async,user,exec'
state: 'present'
tasks:
- name: 'ensure {{ item. mountpoint }} directory exists and owned by user'
file:
path='{{ item.mountpoint }}'
state=directory
group=centos
owner=centos
with_items: '{{ NFS_MOUNTS }}'
- name: After - mount nfs shares
mount:
name='{{item.mountpoint}}'
src='{{item.fs}}'
fstype=nfs
opts='{{item.options|default("rw,async")}}'
state=mounted
with_items: '{{ NFS_MOUNTS }}'
- name: After - nfs-client - add to fstab
include_role:
name: 'nfs-client'
- name: After - Restart SLURMd after all config is done
service:
name=slurmd
state=restarted
when: '{{is_debian_compatible}} and ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})'
service:
name=slurm-llnl
state=restarted
when: '{{is_debian_compatible}} and (not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}))'
service:
name=slurmd
state=restarted
when: '{{is_rhel7_compatible}}'
service:
name=slurm
state=restarted
when: '{{is_rhel6_compatible}}'
#### For grid jobs must comment out VSizeFactor entry in slurm config
#not tested!
- hosts: slurm_master
tasks:
- name: Comment out the VSizeFactor for grid jobs
lineinfile:
path: /etc/slurm/slurm.conf
regexp: '^VSizeFactor'
line: '\#VSizeFactor'
backup: yes
## not working as of now
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach wlcg volume to frontend host
# os_server_volume:
# state: present
# server: '{{ inventory_hostname }}'
# volume: wlcg
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach cvmfs-cache volume to compute node(s)
# os_server_volume:
# state: present
# server: '{{ ansible_hostname }}'
# volume: cvmfs-cache
ElastiCluster Ansible playbooks
===============================
This repository contains the modules and playbooks used by the ElastiCluster
to configure the VMs. They can however be used independently of ElastiCluster.
The structure of the repository follow this schema::
| # Group variables
+-- group_vars
| +-- all # variables set on all hosts where playbooks run;
| # currently mainly used to provide conditionals
| # about OS version and features
|
| # Collection of playbooks divided by *role*
+-- roles
| - role-foo.yml # playbook for role `role-foo`
| - role-foo # directory containing stuff used by `role-foo`
| - files # files to be copied on the managed machine.
| - handles # handlers used by the role
| - tasks # collection of tasks executed by the playbook
| - templates # templates used by the playbook
|
+-- site.yml
| # This is the main playbook. It includes all the playbooks created
| # in `roles` directory. Each role is supposed to be applied only
| # to specific group of nodes. For instance, the `ganglia` role
| # will configure only hosts in the `ganglia_monitor` or
| # `ganglia_master` groups.
|
+-- after.yml
| # Playbook executed by `site.yml` after all the other tasks have
| # successfully run. Can be used to add local customizations.
|
+-- modules
| # This directory contains extra Ansible modules
|
+-- examples
| # directory containing examples and code snippets.
|
+-- README.rst
The playbooks distributed in the ``roles/`` directory are documented in section
`"Playbooks distributed with ElastiCluster"
<http://elasticluster.readthedocs.io/en/latest/playbooks.html>`_ of the
`ElastiCluster manual <http://elasticluster.readthedocs.io/>`_. Some of the
roles are also accompanied by a small "README" file that states purpose and
customization variables.
Extra modules are defined in the ``modules`` directory. In order to
use them you need to either run ``ansible-playbook`` with option ``-M
modules``, **or** edit your ansible configuration file and update the
`library` option, **or** set the environment variable
``ANSIBLE_LIBRARY``. The latter is what ElastiCluster main code does.
---
#
# This playbook is for site-local customization to ElastiCluster's
# playbooks. It runs *after* any other playbook distributed with
# ElastiCluster has gotten its chance to run.
#
# An empty playbook is checked into the Git repository. If you make
# any local modifications, please run `git update-index
# --assume-unchanged after.yml` to avoid committing them accidentally
# into ElastiCluster's main branch.
#
- name: Apply local customizations (after)
tags:
- after
- local
hosts: all
# by default these are no-op (empty task list)
roles: []
tasks: []
---
#
# This playbook is for site-local customization to ElastiCluster's
# playbooks. It runs *after* any other playbook distributed with
# ElastiCluster has gotten its chance to run.
#
# An empty playbook is checked into the Git repository. If you make
# any local modifications, please run `git update-index
# --assume-unchanged after.yml` to avoid committing them accidentally
# into ElastiCluster's main branch.
# the nfs-server coincides with slurm-master
# the nfs-client coincides with slurm-worker
- hosts: slurm_master
tags:
- after
vars:
NFS_EXPORTS:
- path: '/wlcg/session'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/runtime'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/cache'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
tasks:
- name: After - Ensure shared dirs exist on nfs server
file:
path: '{{ item.path }}'
state: directory
with_items: '{{ NFS_EXPORTS }}'
- name: After - roles for nfs-server
include_role:
name: 'nfs-server'
- hosts: slurm_worker
tags:
- after
vars:
NFS_MOUNTS:
- fs: '{{groups.slurm_master[0]}}:/wlcg/session'
mountpoint: '/wlcg/session'
options: 'rw,async'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/runtime'
mountpoint: '/wlcg/runtime'
options: 'rw,async'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/cache'
mountpoint: '/wlcg/cache'
options: 'rw,async'
state: 'present'
tasks:
- name: 'ensure {{ item. mountpoint }} directory exists and owned by user'
file:
path='{{ item.mountpoint }}'
state=directory
group=root
owner=root
with_items: '{{ NFS_MOUNTS }}'
- name: After - mount nfs shares
mount:
name='{{item.mountpoint}}'
src='{{item.fs}}'
fstype=nfs
opts='{{item.options|default("rw,async")}}'
state=mounted
with_items: '{{ NFS_MOUNTS }}'
- name: After - nfs-client - add to fstab
include_role:
name: 'nfs-client'
- name: After - Restart SLURMd after all config is done
service:
name=slurmd
state=restarted
when: '{{is_debian_compatible}} and ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})'
service:
name=slurm-llnl
state=restarted
when: '{{is_debian_compatible}} and (not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}))'
service:
name=slurmd
state=restarted
when: '{{is_rhel7_compatible}}'
service:
name=slurm
state=restarted
when: '{{is_rhel6_compatible}}'
#### For grid jobs must comment out VSizeFactor entry in slurm config
#not tested!
- name: Comment out the VSizeFactor for grid jobs
replace:
path: /etc/slurm/slurm.conf
regexp: '^VSizeFactor'
replace: '\#VSizeFactor'
backup: yes
###########
## not working as of now
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach wlcg volume to frontend host
# os_server_volume:
# state: present
# server: '{{ inventory_hostname }}'
# volume: wlcg
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach cvmfs-cache volume to compute node(s)
# os_server_volume:
# state: present
# server: '{{ ansible_hostname }}'
# volume: cvmfs-cache
---
#
# This playbook is for site-local customization to ElastiCluster's
# playbooks. It runs *after* any other playbook distributed with
# ElastiCluster has gotten its chance to run.
#
# An empty playbook is checked into the Git repository. If you make
# any local modifications, please run `git update-index
# --assume-unchanged after.yml` to avoid committing them accidentally
# into ElastiCluster's main branch.
# the nfs-server coincides with slurm-master
# the nfs-client coincides with slurm-worker
- hosts: slurm_master
tags:
- after
- local
vars:
NFS_EXPORTS:
- path: '/wlcg/session'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/runtime'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
- path: '/wlcg/cache'
clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
options: 'rw,no_root_squash'
tasks:
- name: After - Ensure shared dirs exist on nfs server
file:
path: '{{ item.path }}'
state: directory
with_items: '{{ NFS_EXPORTS }}'
- name: After - roles for nfs-server
include_role:
name: 'nfs-server'
- hosts: slurm_worker
tags:
- after
- local
vars:
NFS_MOUNTS:
- fs: '{{groups.slurm_master[0]}}:/wlcg/session'
mountpoint: '/wlcg/session'
options: 'rw,async,user,exec'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/runtime'
mountpoint: '/wlcg/runtime'
options: 'rw,async,user,exec'
state: 'present'
- fs: '{{groups.slurm_master[0]}}:/wlcg/cache'
mountpoint: '/wlcg/cache'
options: 'rw,async,user,exec'
state: 'present'
tasks:
- name: 'ensure {{ item. mountpoint }} directory exists and owned by user'
file:
path='{{ item.mountpoint }}'
state=directory
group=centos
owner=centos
with_items: '{{ NFS_MOUNTS }}'
- name: After - mount nfs shares
mount:
name='{{item.mountpoint}}'
src='{{item.fs}}'
fstype=nfs
opts='{{item.options|default("rw,async")}}'
state=mounted
with_items: '{{ NFS_MOUNTS }}'
- name: After - nfs-client - add to fstab
include_role:
name: 'nfs-client'
- name: After - Restart SLURMd after all config is done
service:
name=slurmd
state=restarted
when: '{{is_debian_compatible}} and ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})'
service:
name=slurm-llnl
state=restarted
when: '{{is_debian_compatible}} and (not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}))'
service:
name=slurmd
state=restarted
when: '{{is_rhel7_compatible}}'
service:
name=slurm
state=restarted
when: '{{is_rhel6_compatible}}'
#### For grid jobs must comment out VSizeFactor entry in slurm config
#not tested!
- name: Comment out the VSizeFactor for grid jobs
replace:
path: /etc/slurm/slurm.conf
regexp: '^VSizeFactor'
replace: '\#VSizeFactor'
backup: yes
## not working as of now
# - name: After - Install pip in order to install shade needed to use os_server_volume ansible module
# yum:
# name: python-pip
# - name: After - Install shade needed to use os_server_volume ansible module
# pip:
# name: shade
# - name: After - attach wlcg volume to frontend host
# os_server_volume:
# state: present
# server: '{{ inventory_hostname }}'