summaryrefslogtreecommitdiffstats
path: root/ansible/roles
diff options
context:
space:
mode:
Diffstat (limited to 'ansible/roles')
-rw-r--r--ansible/roles/application-install/defaults/main.yml1
-rw-r--r--ansible/roles/application-install/tasks/main.yml22
-rw-r--r--ansible/roles/application-override/tasks/main.yml16
-rw-r--r--ansible/roles/application/defaults/main.yml11
-rw-r--r--ansible/roles/application/tasks/custom_role.yml (renamed from ansible/roles/application-install/tasks/custom_role.yml)2
-rw-r--r--ansible/roles/application/tasks/install.yml (renamed from ansible/roles/application-install/tasks/install.yml)30
-rw-r--r--ansible/roles/application/tasks/main.yml24
-rw-r--r--ansible/roles/application/tasks/post-install.yml (renamed from ansible/roles/application-install/tasks/post-install.yml)2
-rw-r--r--ansible/roles/application/tasks/pre-install.yml5
-rw-r--r--ansible/roles/application/tasks/transfer-helm-charts.yml (renamed from ansible/roles/application-install/tasks/pre-install.yml)9
-rw-r--r--ansible/roles/chrony/defaults/main.yml16
-rw-r--r--ansible/roles/chrony/handlers/main.yml5
-rw-r--r--ansible/roles/chrony/tasks/main.yml26
-rw-r--r--ansible/roles/chrony/templates/chrony.conf.j222
-rw-r--r--ansible/roles/firewall/tasks/firewall-disable.yml1
-rw-r--r--ansible/roles/kubectl/defaults/main.yml5
-rw-r--r--ansible/roles/kubectl/tasks/main.yml10
-rw-r--r--ansible/roles/kubectl/templates/kube_config.j2 (renamed from ansible/roles/rancher/templates/kube_config.j2)2
-rw-r--r--ansible/roles/nginx/templates/nginx.conf.j21
-rw-r--r--ansible/roles/package-repository-check/tasks/RedHat.yml20
-rw-r--r--ansible/roles/package-repository-check/tasks/main.yml12
-rw-r--r--ansible/roles/rancher/defaults/main.yml4
-rw-r--r--ansible/roles/rancher/tasks/main.yml2
-rw-r--r--ansible/roles/rancher/tasks/rancher_agent.yml4
-rw-r--r--ansible/roles/rancher/tasks/rancher_health.yml8
-rw-r--r--ansible/roles/rancher/tasks/rancher_server.yml14
-rw-r--r--ansible/roles/resource-data/tasks/main.yml27
27 files changed, 225 insertions, 76 deletions
diff --git a/ansible/roles/application-install/defaults/main.yml b/ansible/roles/application-install/defaults/main.yml
deleted file mode 100644
index 473fbb80..00000000
--- a/ansible/roles/application-install/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
-phase: install
diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml
deleted file mode 100644
index ba522792..00000000
--- a/ansible/roles/application-install/tasks/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- debug:
- msg: "phase is {{ phase }}"
-
-- name: Check if install needed
- block:
- - name: "Does {{ app_helm_charts_install_directory }} exist and contain Helm Charts"
- find:
- paths: "{{ app_helm_charts_install_directory }}"
- recurse: yes
- delegate_to: localhost
- register: charts_files
- - name: Set install active fact
- set_fact:
- install_needed: "{{ true if charts_files.matched | int > 0 else false }}"
- when: phase == "pre-install"
-
-- include_tasks: "{{ phase }}.yml"
- when: install_needed
-
-- debug:
- msg: "Install needed {{ install_needed }}"
diff --git a/ansible/roles/application-override/tasks/main.yml b/ansible/roles/application-override/tasks/main.yml
new file mode 100644
index 00000000..1ecf7c79
--- /dev/null
+++ b/ansible/roles/application-override/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# Role for generating Helm override.yml file
+- name: Register root certificate
+ slurp:
+ src: "{{ playbook_dir }}/certs/rootCA.crt"
+ register: root_cert
+ delegate_to: localhost
+
+- name: "Set root ca certificate"
+ set_fact:
+ merged_overrides: "{{ overrides | default({}) | combine({'global': {'cacert': root_cert.content | b64decode}}, recursive=True) }}"
+
+- name: "Create {{ app_helm_override_file }}"
+ copy:
+ dest: "{{ app_helm_override_file }}"
+ content: "{{ merged_overrides | to_nice_yaml }}"
diff --git a/ansible/roles/application/defaults/main.yml b/ansible/roles/application/defaults/main.yml
new file mode 100644
index 00000000..dec17601
--- /dev/null
+++ b/ansible/roles/application/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+helm_repository_name: local
+helm_repository_url: http://127.0.0.1:8879
+# Override file generation for Helm application can be customized by any role
+# given by user and found by ansible from roles_path.
+# By default override file is generated by 'application-override' role that is
+# specific for offline installer (for onap) as it's generating server
+# certificate needed to simulate internet by offline installer.
+app_skip_helm_override: false
+app_helm_override_role: application-override
+app_helm_override_file: "{{ app_data_path }}/override.yaml"
diff --git a/ansible/roles/application-install/tasks/custom_role.yml b/ansible/roles/application/tasks/custom_role.yml
index b6f6f351..f0b9a84f 100644
--- a/ansible/roles/application-install/tasks/custom_role.yml
+++ b/ansible/roles/application/tasks/custom_role.yml
@@ -1,6 +1,6 @@
---
# Caller fills application_custom_role variable with actual role name.
-- name: "Execute custom role {{ application_custom_role }} {{ phase }} Helm install."
+- name: "Execute custom role {{ application_custom_role }} for Helm install."
include_role:
name: "{{ application_custom_role }}"
when:
diff --git a/ansible/roles/application-install/tasks/install.yml b/ansible/roles/application/tasks/install.yml
index 96f66b90..103ecc8b 100644
--- a/ansible/roles/application-install/tasks/install.yml
+++ b/ansible/roles/application/tasks/install.yml
@@ -6,8 +6,8 @@
--skip-refresh
changed_when: true # init is always changed type of action
-#A correct way to implement this would be using --wait option in helm init invocation.
-#However, it does not work due to https://github.com/helm/helm/issues/4031 (fixed in newer helm release)
+# A correct way to implement this would be using --wait option in helm init invocation.
+# However, it does not work due to https://github.com/helm/helm/issues/4031 (fixed in newer helm release)
- name: "Wait for helm upgrade to finish"
command: "{{ helm_bin_dir }}/helm version --tiller-connection-timeout 10"
register: result
@@ -42,7 +42,7 @@
- "'Error: no repositories to show' not in helm_repo_list.stderr"
- name: Helm Add Repo
- command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name }} {{ helm_repository_url }}"
+ command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name | mandatory }} {{ helm_repository_url | mandatory }}"
when: "'local' not in helm_repo_list.stdout"
changed_when: true # when executed its a changed type of action
@@ -50,24 +50,14 @@
make:
chdir: "{{ app_helm_charts_infra_directory }}"
target: "{{ item }}"
- with_items: "{{ app_helm_build_targets }}"
+ loop: "{{ app_helm_build_targets }}"
environment:
PATH: "{{ helm_bin_dir }}:{{ ansible_env.PATH }}"
-- name: Register root certificate
- slurp:
- src: "{{ playbook_dir }}/certs/rootCA.crt"
- register: root_cert
- delegate_to: localhost
-
-# WA: this is required because deploy plugin dont process params properly
-- name: Create override file with global.cacert
- copy:
- dest: "{{ app_data_path }}/override.yaml"
- content: |
- global:
- cacert: |
- {{ root_cert['content'] | b64decode | indent( width=4, indentfirst=False) }}
+- name: Generate Helm application override file with custom role
+ include_role:
+ name: "{{ app_helm_override_role }}"
+ when: not app_skip_helm_override
- name: Check for deploy plugin presence
stat:
@@ -81,5 +71,7 @@
{{ app_helm_release_name }}
{{ helm_repository_name }}/{{ app_helm_chart_name }}
--namespace {{ app_kubernetes_namespace }}
- -f {{ app_data_path }}/override.yaml
+ {{ '' if app_skip_helm_override else '-f ' + app_helm_override_file }}
changed_when: true # when executed its a changed type of action
+ register: helm_install
+ failed_when: helm_install.stderr
diff --git a/ansible/roles/application/tasks/main.yml b/ansible/roles/application/tasks/main.yml
new file mode 100644
index 00000000..3018e95f
--- /dev/null
+++ b/ansible/roles/application/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Check if application Helm charts exist and install is even needed
+ block:
+ - name: "Does {{ app_helm_charts_install_directory }} directory exist and contain Helm Charts"
+ find:
+ paths: "{{ app_helm_charts_install_directory }}"
+ recurse: true
+ delegate_to: localhost
+ register: charts_files
+ - name: Set install active fact
+ set_fact:
+ install_needed: "{{ true if charts_files.matched | int > 0 else false }}"
+
+- name: Install app with Helm charts
+ block:
+ - include_tasks: transfer-helm-charts.yml
+ - include_tasks: pre-install.yml
+ - include_tasks: install.yml
+ - include_tasks: post-install.yml
+ when: install_needed
+
+- debug:
+ msg: "NOTE, nothing done as application Helm charts does not exist!"
+ when: not install_needed
diff --git a/ansible/roles/application-install/tasks/post-install.yml b/ansible/roles/application/tasks/post-install.yml
index 10594233..5464cb46 100644
--- a/ansible/roles/application-install/tasks/post-install.yml
+++ b/ansible/roles/application/tasks/post-install.yml
@@ -2,4 +2,4 @@
- name: "Execute custome role {{ application_post_install_role }} if defined."
include_tasks: custom_role.yml
vars:
- application_custom_role: "{{ application_post_install_role }}"
+ application_custom_role: "{{ application_post_install_role | default('') }}"
diff --git a/ansible/roles/application/tasks/pre-install.yml b/ansible/roles/application/tasks/pre-install.yml
new file mode 100644
index 00000000..74f1548f
--- /dev/null
+++ b/ansible/roles/application/tasks/pre-install.yml
@@ -0,0 +1,5 @@
+---
+- name: "Execute custom role {{ application_pre_install_role }} if defined."
+ include_tasks: custom_role.yml
+ vars:
+ application_custom_role: "{{ application_pre_install_role | default('') }}"
diff --git a/ansible/roles/application-install/tasks/pre-install.yml b/ansible/roles/application/tasks/transfer-helm-charts.yml
index bf6619b0..0cd7c02f 100644
--- a/ansible/roles/application-install/tasks/pre-install.yml
+++ b/ansible/roles/application/tasks/transfer-helm-charts.yml
@@ -38,14 +38,7 @@
copy:
src: "{{ item.path }}"
dest: "{{ helm_home_dir.stdout }}/plugins"
- directory_mode: yes
+ directory_mode: true
mode: 0755
with_items: "{{ list_of_plugins.files }}"
- become: true
when: app_helm_plugins_directory is defined and app_helm_plugins_directory is not none
-
-- name: "Execute custom role {{ application_pre_install_role }} if defined."
- include_tasks: custom_role.yml
- vars:
- application_custom_role: "{{ application_pre_install_role }}"
-
diff --git a/ansible/roles/chrony/defaults/main.yml b/ansible/roles/chrony/defaults/main.yml
new file mode 100644
index 00000000..af433dac
--- /dev/null
+++ b/ansible/roles/chrony/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+timesync: {}
+chrony:
+ servers: "{{ timesync.servers | default([hostvars[groups.infrastructure[0]].cluster_ip]) }}" # chronyd's NTP servers
+ slewclock: "{{ timesync.slewclock | default(false) }}" # chronyd's makestep property
+ timezone: "{{ timesync.timezone | default('Universal') }}" # Timezone name according to tz database
+ makestep: '1 -1'
+ maxjitter: 10 # Max allowed jitter if using infra as time source as it may by unstable due to pretending stratum 1 time source
+ initstepslew: 30
+ conf:
+ RedHat:
+ config_file: /etc/chrony.conf
+ driftfile: /var/lib/chrony/drift
+ Debian:
+ config_file: /etc/chrony/chrony.conf
+ driftfile: /var/lib/chrony/chrony.drift
diff --git a/ansible/roles/chrony/handlers/main.yml b/ansible/roles/chrony/handlers/main.yml
new file mode 100644
index 00000000..80ab9fa9
--- /dev/null
+++ b/ansible/roles/chrony/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart chronyd
+ systemd:
+ name: chronyd
+ state: restarted
diff --git a/ansible/roles/chrony/tasks/main.yml b/ansible/roles/chrony/tasks/main.yml
new file mode 100644
index 00000000..69a11587
--- /dev/null
+++ b/ansible/roles/chrony/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- name: Check if server mode
+ set_fact:
+ chrony_mode: 'server'
+ when: "'infrastructure' in group_names and timesync.servers is not defined"
+
+- name: Check if client mode
+ set_fact:
+ chrony_mode: 'client'
+ when: "timesync.servers is defined or 'infrastructure' not in group_names"
+
+- name: "Upload chronyd {{ chrony_mode }} configuration"
+ template:
+ src: "chrony.conf.j2"
+ dest: "{{ chrony['conf'][ansible_os_family]['config_file'] }}"
+ notify: Restart chronyd
+
+- name: Ensure chronyd is enabled/running
+ systemd:
+ name: chronyd
+ state: started
+ enabled: true
+
+- name: Setup timezone
+ timezone:
+ name: "{{ chrony.timezone }}"
diff --git a/ansible/roles/chrony/templates/chrony.conf.j2 b/ansible/roles/chrony/templates/chrony.conf.j2
new file mode 100644
index 00000000..3bfb4e40
--- /dev/null
+++ b/ansible/roles/chrony/templates/chrony.conf.j2
@@ -0,0 +1,22 @@
+{% if chrony_mode == 'server' %}
+local stratum 1
+allow
+{% elif chrony_mode == 'client' %}
+{% for tserver in chrony.servers %}
+server {{ tserver }} iburst
+{% endfor %}
+{% if chrony.slewclock == false %}
+{# Step the time by default #}
+makestep {{ chrony.makestep }}
+{% else %}
+{# Slew the clock but step at boot time if time error larger than 30 seconds #}
+initstepslew {{ chrony.initstepslew }}{% for tserver in chrony.servers %} {{ tserver }}{% endfor %}
+
+{% endif %}
+{% if timesync.servers is not defined %}
+maxjitter {{ chrony.maxjitter }}
+{% endif %}
+{% endif %}
+driftfile {{ chrony['conf'][ansible_os_family]['driftfile'] }}
+rtcsync
+logdir /var/log/chrony
diff --git a/ansible/roles/firewall/tasks/firewall-disable.yml b/ansible/roles/firewall/tasks/firewall-disable.yml
index f406d943..5f1ab537 100644
--- a/ansible/roles/firewall/tasks/firewall-disable.yml
+++ b/ansible/roles/firewall/tasks/firewall-disable.yml
@@ -13,3 +13,4 @@
- name: Flush iptables
iptables:
flush: true
+ changed_when: false # for idempotence
diff --git a/ansible/roles/kubectl/defaults/main.yml b/ansible/roles/kubectl/defaults/main.yml
new file mode 100644
index 00000000..78c15c75
--- /dev/null
+++ b/ansible/roles/kubectl/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+kubectl_bin_dir: /usr/local/bin
+kube_directory: ~/.kube
+# Defaulting to rancher setup
+kube_server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id | mandatory }}/kubernetes:6443" \ No newline at end of file
diff --git a/ansible/roles/kubectl/tasks/main.yml b/ansible/roles/kubectl/tasks/main.yml
index 7c77c3c5..9ecb5c44 100644
--- a/ansible/roles/kubectl/tasks/main.yml
+++ b/ansible/roles/kubectl/tasks/main.yml
@@ -5,3 +5,13 @@
dest: "{{ kubectl_bin_dir }}/kubectl"
remote_src: true
mode: 0755
+
+- name: Ensure .kube directory exists
+ file:
+ path: "{{ kube_directory }}"
+ state: directory
+
+- name: Create kube config
+ template:
+ src: kube_config.j2
+ dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/rancher/templates/kube_config.j2 b/ansible/roles/kubectl/templates/kube_config.j2
index 87f332e6..586c59d4 100644
--- a/ansible/roles/rancher/templates/kube_config.j2
+++ b/ansible/roles/kubectl/templates/kube_config.j2
@@ -4,7 +4,7 @@ clusters:
- cluster:
api-version: v1
insecure-skip-tls-verify: true
- server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id }}/kubernetes:6443"
+ server: "{{ kube_server }}"
name: "{{ app_name }}"
contexts:
- context:
diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 b/ansible/roles/nginx/templates/nginx.conf.j2
index ff9d2a9c..9860a168 100644
--- a/ansible/roles/nginx/templates/nginx.conf.j2
+++ b/ansible/roles/nginx/templates/nginx.conf.j2
@@ -5,6 +5,7 @@ events {
}
http {
+ server_names_hash_bucket_size 64;
error_log /var/log/nginx/error.log debug;
access_log /var/log/nginx/access.log;
diff --git a/ansible/roles/package-repository-check/tasks/RedHat.yml b/ansible/roles/package-repository-check/tasks/RedHat.yml
new file mode 100644
index 00000000..ed496f99
--- /dev/null
+++ b/ansible/roles/package-repository-check/tasks/RedHat.yml
@@ -0,0 +1,20 @@
+---
+- name: verify
+ block:
+ # Clean cache prior to refreshing
+ - name: Clean yum cache
+ command: yum clean all
+ args:
+ warn: false
+ # Refresh cache to ensure repo is reachable
+ - name: Update yum cache
+ yum:
+ update_cache: yes
+ state: latest
+ tags:
+ - skip_ansible_lint # Prevent '[403] Package installs should not use latest' ansible lint task rule
+ rescue:
+ - name: Fail if yum cache updating failed
+ fail:
+ msg: "Couldn't refresh yum cache, repositories not configured properly. Check ansible logs for details."
+ become: true
diff --git a/ansible/roles/package-repository-check/tasks/main.yml b/ansible/roles/package-repository-check/tasks/main.yml
new file mode 100644
index 00000000..ac63eceb
--- /dev/null
+++ b/ansible/roles/package-repository-check/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+# Purpose of this role is to check whether configured repositories are working.
+#
+# Successfull prior invocation of 'package-repository' role on 'infrastructure' hosts
+# is prerequisite for playing this one on 'infrastructure' group.
+#
+# Successfull prior invocation of 'package-repository' and 'nginx' role on infrastructure hosts
+# is prerequisite for playing this one on 'kubernetes' group.
+
+#Set of tasks designated to failing fast if configured repos are not functioning properly
+- include_tasks: "{{ ansible_os_family }}.yml"
+ when: ansible_os_family == 'RedHat'
diff --git a/ansible/roles/rancher/defaults/main.yml b/ansible/roles/rancher/defaults/main.yml
new file mode 100644
index 00000000..8edc5180
--- /dev/null
+++ b/ansible/roles/rancher/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].ansible_host }}:8080"
+rancher_remove_other_env: true
+rancher_redeploy_k8s_env: true
diff --git a/ansible/roles/rancher/tasks/main.yml b/ansible/roles/rancher/tasks/main.yml
index 1370a39f..045363d0 100644
--- a/ansible/roles/rancher/tasks/main.yml
+++ b/ansible/roles/rancher/tasks/main.yml
@@ -1,2 +1,2 @@
---
-- include_tasks: "rancher_{{ rancher_role }}.yml"
+- include_tasks: "rancher_{{ mode }}.yml"
diff --git a/ansible/roles/rancher/tasks/rancher_agent.yml b/ansible/roles/rancher/tasks/rancher_agent.yml
index 4c9cb8dd..091503c7 100644
--- a/ansible/roles/rancher/tasks/rancher_agent.yml
+++ b/ansible/roles/rancher/tasks/rancher_agent.yml
@@ -7,7 +7,7 @@
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/var/lib/rancher:/var/lib/rancher"
- auto_remove: yes
- privileged: yes
+ auto_remove: true
+ privileged: true
vars:
server_hostvars: "{{ hostvars[groups.infrastructure.0] }}"
diff --git a/ansible/roles/rancher/tasks/rancher_health.yml b/ansible/roles/rancher/tasks/rancher_health.yml
new file mode 100644
index 00000000..b0323739
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_health.yml
@@ -0,0 +1,8 @@
+---
+- name: Check cluster health
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta/projects/{{ k8s_env_id }}"
+ register: env_info
+ retries: 30
+ delay: 15
+ until: "env_info.json.healthState == 'healthy'"
diff --git a/ansible/roles/rancher/tasks/rancher_server.yml b/ansible/roles/rancher/tasks/rancher_server.yml
index 64b35e4c..ebee6cc7 100644
--- a/ansible/roles/rancher/tasks/rancher_server.yml
+++ b/ansible/roles/rancher/tasks/rancher_server.yml
@@ -2,7 +2,7 @@
# DO NOT ADD SPACE AROUND ';'
- name: Start rancher/server:{{ rancher_server_version }}
docker_container:
- name: rancher_server
+ name: rancher-server
image: rancher/server:{{ rancher_server_version }}
command: ["sh", "-c", "/usr/sbin/update-ca-certificates;/usr/bin/entry /usr/bin/s6-svscan /service"]
ports: 8080:8080
@@ -19,7 +19,7 @@
delay: 30
until: not response.failed
-- name: Create kubernetes environment
+- name: Create rancher kubernetes environment
rancher_k8s_environment:
name: "{{ app_name }}"
descr: "Kubernetes environment for {{ app_name }}"
@@ -39,13 +39,3 @@
key_private: "{{ env.data.apikey.private }}"
rancher_agent_image: "{{ env.data.registration_tokens.image }}"
rancher_agent_reg_url: "{{ env.data.registration_tokens.reg_url }}"
-
-- name: Ensure .kube directory exists
- file:
- path: "{{ kube_directory }}"
- state: directory
-
-- name: Create kube config
- template:
- src: kube_config.j2
- dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/resource-data/tasks/main.yml b/ansible/roles/resource-data/tasks/main.yml
index 41046d81..023a160e 100644
--- a/ansible/roles/resource-data/tasks/main.yml
+++ b/ansible/roles/resource-data/tasks/main.yml
@@ -3,23 +3,34 @@
block:
- name: Check if source dir and files are present
stat:
- path: "{{ item.source }}"
+ path: "{{ item.source_dir }}/{{ item.source_filename }}"
get_checksum: false
loop:
- - { source: "{{ resources_dir }}/{{ resources_filename | default('thisdoesnotexists', true) }}",
- target: "{{ app_data_path }}/{{ resources_filename | default('thisdoesnotexists', true) }}" }
- - { source: "{{ resources_dir }}/{{ aux_resources_filename | default('thisdoesnotexists', true) }}",
- target: "{{ aux_data_path }}/{{ aux_resources_filename | default('thisdoesnotexists', true) }}" }
+ - { source_dir: "{{ resources_dir | default('', true) }}",
+ source_filename: "{{ resources_filename | default('', true) }}",
+ target_dir: "{{ app_data_path | default('', true) }}",
+ target_filename: "{{ resources_filename | default('', true) }}" }
+
+ - { source_dir: "{{ resources_dir | default('', true) }}",
+ source_filename: "{{ aux_resources_filename | default('', true) }}",
+ target_dir: "{{ aux_data_path | default('', true) }}",
+ target_filename: "{{ aux_resources_filename | default('', true) }}" }
register: source_path
+ when:
+ - item.source_dir | length > 0
+ - item.source_filename | length > 0
+ - item.target_dir | length > 0
+ - item.target_filename | length > 0
- name: Create initial resources list of dicts
set_fact:
to_be_uploaded_resources_list: "{{
to_be_uploaded_resources_list | default([]) + [
- {'file': item.item.target | basename,
- 'destination_dir': item.item.target | dirname } ] }}"
+ {'file': item.item.target_filename,
+ 'destination_dir': item.item.target_dir } ] }}"
loop: "{{ source_path.results }}"
- when: item.stat.exists
+ when: item.stat is defined
+ failed_when: not item.stat.exists
when: inventory_hostname in groups.resources
- name: "Upload resource files {{ hostvars[groups.resources.0].to_be_uploaded_resources_list }} to infrastructure"