summaryrefslogtreecommitdiffstats
path: root/ansible/roles
diff options
context:
space:
mode:
Diffstat (limited to 'ansible/roles')
-rw-r--r--ansible/roles/application-install/defaults/main.yml1
-rw-r--r--ansible/roles/application-install/tasks/install.yml34
-rw-r--r--ansible/roles/application-install/tasks/main.yml5
-rw-r--r--ansible/roles/nexus/defaults/main.yml2
-rw-r--r--ansible/roles/nexus/files/configure.groovy37
-rw-r--r--ansible/roles/nexus/tasks/configure.yml34
-rw-r--r--ansible/roles/nexus/tasks/insert-images.yml19
-rw-r--r--ansible/roles/nexus/tasks/install.yml29
-rw-r--r--ansible/roles/nexus/tasks/main.yml2
-rw-r--r--ansible/roles/nexus/tasks/runtime-populate.yml12
-rw-r--r--ansible/roles/nexus/vars/main.yml1
-rw-r--r--ansible/roles/nginx/tasks/main.yml37
-rw-r--r--ansible/roles/nginx/templates/nginx.conf.j2105
-rw-r--r--ansible/roles/rancher/tasks/main.yml2
-rw-r--r--ansible/roles/rancher/tasks/rancher_agent.yml13
-rw-r--r--ansible/roles/rancher/tasks/rancher_server.yml51
-rw-r--r--ansible/roles/rancher/templates/kube_config.j219
-rw-r--r--ansible/roles/resource-data/tasks/main.yml2
-rw-r--r--ansible/roles/resource-data/tasks/nfs-upload.yml52
-rw-r--r--ansible/roles/resource-data/tasks/ssh-upload.yml59
-rw-r--r--ansible/roles/vncserver/tasks/main.yml19
21 files changed, 535 insertions, 0 deletions
diff --git a/ansible/roles/application-install/defaults/main.yml b/ansible/roles/application-install/defaults/main.yml
new file mode 100644
index 00000000..473fbb80
--- /dev/null
+++ b/ansible/roles/application-install/defaults/main.yml
@@ -0,0 +1 @@
+phase: install
diff --git a/ansible/roles/application-install/tasks/install.yml b/ansible/roles/application-install/tasks/install.yml
new file mode 100644
index 00000000..54b64439
--- /dev/null
+++ b/ansible/roles/application-install/tasks/install.yml
@@ -0,0 +1,34 @@
+---
+- name: Helm init and upgrade
+ command: |
+ {{ helm_bin_dir }}/helm init
+ --upgrade
+ --skip-refresh
+
+- name: Wait for helm
+ wait_for: timeout=10
+ delegate_to: localhost
+
+- name: Get all helm repos
+ command: "{{ helm_bin_dir }}/helm repo list"
+ register: repos
+
+- name: Remove stable repo
+ command: "{{ helm_bin_dir }}/helm repo remove stable"
+ when: "'stable' in repos.stdout"
+
+- name: Helm Serve
+ shell: "{{ helm_bin_dir }}/helm serve &"
+ async: 45
+ poll: 0
+
+- name: Helm Add Repo
+ command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name }} {{ helm_repository_url }}"
+
+- name: Helm Make All
+ make:
+ chdir: "{{ app_helm_charts_directory }}"
+ target: all
+
+- name: Helm Install application {{ app_name }}
+ command: "helm install {{ helm_repository_name }}/{{ app_helm_chart_name }} --name {{ app_helm_release_name }} --namespace {{ app_kubernetes_namespace }}"
diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml
new file mode 100644
index 00000000..3306d9e4
--- /dev/null
+++ b/ansible/roles/application-install/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- debug:
+ msg: "phase is {{ phase }}"
+
+- include_tasks: "{{ phase }}.yml"
diff --git a/ansible/roles/nexus/defaults/main.yml b/ansible/roles/nexus/defaults/main.yml
new file mode 100644
index 00000000..57a79f95
--- /dev/null
+++ b/ansible/roles/nexus/defaults/main.yml
@@ -0,0 +1,2 @@
+#Defaults to install, can be set to configure.
+phase: install
diff --git a/ansible/roles/nexus/files/configure.groovy b/ansible/roles/nexus/files/configure.groovy
new file mode 100644
index 00000000..5691fe64
--- /dev/null
+++ b/ansible/roles/nexus/files/configure.groovy
@@ -0,0 +1,37 @@
+import org.sonatype.nexus.security.realm.RealmManager
+import org.sonatype.nexus.repository.attributes.AttributesFacet
+import org.sonatype.nexus.security.user.UserManager
+import org.sonatype.nexus.repository.manager.RepositoryManager
+import org.sonatype.nexus.security.user.UserNotFoundException
+
+/* Use the container to look up some services. */
+realmManager = container.lookup(RealmManager.class)
+userManager = container.lookup(UserManager.class, "default") //default user manager
+repositoryManager = container.lookup(RepositoryManager.class)
+
+/* Managers are used when scripting api cannot. Note that scripting api can only create mostly, and that creation methods return objects of created entities. */
+/* Perform cleanup by removing all repos and users. Realms do not need to be re-disabled, admin and anonymous user will not be removed. */
+userManager.listUserIds().each({ id ->
+ if (id != "anonymous" && id != "admin")
+ userManager.deleteUser(id)
+})
+
+repositoryManager.browse().each {
+ repositoryManager.delete(it.getName())
+}
+
+/* Add bearer token realms at the end of realm lists... */
+realmManager.enableRealm("NpmToken")
+realmManager.enableRealm("DockerToken")
+
+/* Create the docker user. */
+security.addUser("docker", "docker", "docker", "docker@example.com", true, "docker", ["nx-anonymous"])
+
+/* Create npm and docker repositories. Their default configuration should be compliant with our requirements, except the docker registry creation. */
+repository.createNpmHosted("npm-private")
+def r = repository.createDockerHosted("docker", 8082, 0)
+
+/* force basic authentication true by default, must set to false for docker repo. */
+conf=r.getConfiguration()
+conf.attributes("docker").set("forceBasicAuth", false)
+repositoryManager.update(conf)
diff --git a/ansible/roles/nexus/tasks/configure.yml b/ansible/roles/nexus/tasks/configure.yml
new file mode 100644
index 00000000..66712d8f
--- /dev/null
+++ b/ansible/roles/nexus/tasks/configure.yml
@@ -0,0 +1,34 @@
+---
+- name: "check if the configuration script is uploaded"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script/configure"
+ method: GET
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ status_code: [200, 404]
+ register: script
+- block:
+ - name: "upload the configuration script"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script"
+ method: POST
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ body_format: json
+ body:
+ name: configure
+ type: groovy
+ content: "{{ lookup('file', 'files/configure.groovy') }}"
+ status_code: [204]
+ - name: "execute configuration script"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script/configure/run"
+ method: POST
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ body_format: raw
+ headers: { "Content-Type": "text/plain" }
+ when: script.status == 404
diff --git a/ansible/roles/nexus/tasks/insert-images.yml b/ansible/roles/nexus/tasks/insert-images.yml
new file mode 100644
index 00000000..2e2a45c3
--- /dev/null
+++ b/ansible/roles/nexus/tasks/insert-images.yml
@@ -0,0 +1,19 @@
+---
+- name: Load docker images and push into registry
+ block:
+ - set_fact:
+ component: "{{ (item.path | basename | splitext)[0] }}"
+
+ - name: Docker login
+ docker_login:
+ registry: "{{ runtime_images[component].registry }}"
+ username: admin
+ password: admin123
+
+ - name: Load and push component {{ component }}
+ docker_image:
+ name: "{{ runtime_images[component].registry }}{{ runtime_images[component].path }}"
+ tag: "{{ runtime_images[component].tag }}"
+ push: yes
+ load_path: "{{ item.path }}"
+
diff --git a/ansible/roles/nexus/tasks/install.yml b/ansible/roles/nexus/tasks/install.yml
new file mode 100644
index 00000000..6dc82fe6
--- /dev/null
+++ b/ansible/roles/nexus/tasks/install.yml
@@ -0,0 +1,29 @@
+---
+- name: Change ownership of nexus_data
+ file:
+ path: "{{ app_data_path }}/nexus_data"
+ owner: 200
+ group: 200
+ recurse: yes
+
+- name: Load nexus image
+ docker_image:
+ name: sonatype/nexus3
+ load_path: "{{ app_data_path }}/offline_data/docker_images_infra/sonatype_nexus3_latest.tar"
+ state: present
+
+- name: Create nexus network
+ docker_network:
+ name: nexus_network
+ state: present
+
+- name: Run nexus container
+ docker_container:
+ name: nexus
+ image: sonatype/nexus3
+ networks:
+ - name: nexus_network
+ volumes:
+ - "{{ app_data_path }}/nexus_data:/nexus-data:rw"
+ state: started
+ restart_policy: unless-stopped
diff --git a/ansible/roles/nexus/tasks/main.yml b/ansible/roles/nexus/tasks/main.yml
new file mode 100644
index 00000000..c5905b13
--- /dev/null
+++ b/ansible/roles/nexus/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ phase }}.yml"
diff --git a/ansible/roles/nexus/tasks/runtime-populate.yml b/ansible/roles/nexus/tasks/runtime-populate.yml
new file mode 100644
index 00000000..e22b650e
--- /dev/null
+++ b/ansible/roles/nexus/tasks/runtime-populate.yml
@@ -0,0 +1,12 @@
+---
+- name: Find images to be inserted into nexus in runtime
+ find:
+ paths: "{{ aux_data_path }}"
+ patterns: '*.tar'
+ register: tar_images
+
+# WA: block of tasks cant be executed in iterations
+# need to iterate over those tasks in include
+- include: "insert-images.yml"
+ with_items: "{{ tar_images.files }}"
+
diff --git a/ansible/roles/nexus/vars/main.yml b/ansible/roles/nexus/vars/main.yml
new file mode 100644
index 00000000..63944161
--- /dev/null
+++ b/ansible/roles/nexus/vars/main.yml
@@ -0,0 +1 @@
+nexus_url: "https://nexus.{{ hostvars[groups.infrastructure[0]].ansible_nodename }}"
diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml
new file mode 100644
index 00000000..5c010848
--- /dev/null
+++ b/ansible/roles/nginx/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+- name: Create configuration directory
+ file:
+ path: "{{ app_data_path }}/cfg"
+ state: directory
+
+- name: Upload configuration to server
+ template:
+ src: nginx.conf.j2
+ dest: "{{ app_data_path }}/cfg/nginx.conf"
+
+- name: Load nginx image
+ docker_image:
+ name: own_nginx
+ load_path: "{{ app_data_path }}/offline_data/docker_images_infra/own_nginx_latest.tar"
+ state: present
+ timeout: 120
+
+- name: Start nginx
+ docker_container:
+ name: own_nginx
+ image: own_nginx
+ networks:
+ - name: nexus_network
+ ports:
+ - "80:80"
+ - "443:443"
+ - "10001:443"
+ volumes:
+ - "{{ app_data_path }}/cfg/nginx.conf:/etc/nginx/nginx.conf:ro"
+ - "{{ app_data_path }}/certs:/etc/nginx/certs:ro"
+ - "{{ app_data_path }}/git-repo:/srv/git:rw"
+ - "{{ app_data_path }}/http:/srv/http:rw"
+ - "{{ app_data_path }}/pkg/rhel:/srv/http/repo.infra-server:rw"
+ - /var/log/nginx:/var/log/nginx:rw
+ state: started
+ restart_policy: unless-stopped
diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 b/ansible/roles/nginx/templates/nginx.conf.j2
new file mode 100644
index 00000000..fb48565f
--- /dev/null
+++ b/ansible/roles/nginx/templates/nginx.conf.j2
@@ -0,0 +1,105 @@
+worker_processes 2;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ error_log /var/log/nginx/error.log debug;
+ access_log /var/log/nginx/access.log;
+
+ proxy_intercept_errors on;
+ proxy_send_timeout 120;
+ proxy_read_timeout 300;
+
+ upstream nexus {
+ server nexus:8081;
+ }
+
+ upstream registry {
+ server nexus:8082;
+ }
+
+# http simulations
+ server {
+ listen 80;
+ listen 443 ssl;
+ server_name _;
+ ssl_certificate /etc/nginx/certs/nexus_server.crt;
+ ssl_certificate_key /etc/nginx/certs/nexus_server.key;
+
+ keepalive_timeout 5 5;
+
+ location / {
+ root /srv/http/$host;
+ index index.html;
+ }
+ }
+
+# nexus simulations
+ server {
+ listen 80;
+ listen 443 ssl;
+ server_name {% for host in simulated_hosts.nexus -%}
+ {{ host + " " }}
+ {%- endfor %};
+ ssl_certificate /etc/nginx/certs/nexus_server.crt;
+ ssl_certificate_key /etc/nginx/certs/nexus_server.key;
+
+ keepalive_timeout 5 5;
+ proxy_buffering off;
+
+ # allow large uploads
+ client_max_body_size 3G;
+
+ location / {
+ # redirect to docker registry
+ if ($http_user_agent ~ docker ) {
+ proxy_pass http://registry;
+ }
+ proxy_pass http://nexus;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+ }
+
+# git simulations
+ server {
+ listen 80;
+ listen 443 ssl;
+ server_name {% for host in simulated_hosts.git -%}
+ {{ host + " " }}
+ {%- endfor %};
+ ssl_certificate /etc/nginx/certs/nexus_server.crt;
+ ssl_certificate_key /etc/nginx/certs/nexus_server.key;
+
+ keepalive_timeout 5 5;
+ proxy_buffering off;
+
+ location / {
+ try_files $uri $uri/ @git;
+ }
+
+ location @git {
+
+ # Set chunks to unlimited, as the body's can be huge
+ client_max_body_size 0;
+
+ fastcgi_param SCRIPT_FILENAME /usr/libexec/git-core/git-http-backend;
+ fastcgi_param QUERY_STRING $args;
+ fastcgi_param HTTP_HOST $server_name;
+ fastcgi_param PATH_INFO $uri;
+
+ include fastcgi_params;
+
+ fastcgi_param GIT_HTTP_EXPORT_ALL "";
+ fastcgi_param GIT_PROJECT_ROOT /srv/git/$host/;
+
+ # Forward REMOTE_USER as we want to know when we are authenticated
+ fastcgi_param REMOTE_USER $remote_user;
+
+ fastcgi_pass unix:/var/run/fcgiwrap.socket;
+ }
+ }
+}
diff --git a/ansible/roles/rancher/tasks/main.yml b/ansible/roles/rancher/tasks/main.yml
new file mode 100644
index 00000000..1370a39f
--- /dev/null
+++ b/ansible/roles/rancher/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "rancher_{{ rancher_role }}.yml"
diff --git a/ansible/roles/rancher/tasks/rancher_agent.yml b/ansible/roles/rancher/tasks/rancher_agent.yml
new file mode 100644
index 00000000..4c9cb8dd
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_agent.yml
@@ -0,0 +1,13 @@
+---
+- name: Add Rancher Agent
+ docker_container:
+ name: rancher_agent
+ image: "{{ server_hostvars.rancher_agent_image }}"
+ command: "{{ server_hostvars.rancher_agent_reg_url }}"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock"
+ - "/var/lib/rancher:/var/lib/rancher"
+ auto_remove: yes
+ privileged: yes
+ vars:
+ server_hostvars: "{{ hostvars[groups.infrastructure.0] }}"
diff --git a/ansible/roles/rancher/tasks/rancher_server.yml b/ansible/roles/rancher/tasks/rancher_server.yml
new file mode 100644
index 00000000..9abf986b
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_server.yml
@@ -0,0 +1,51 @@
+---
+# DO NOT ADD SPACE AROUND ';'
+- name: Start rancher/server:v1.6.14
+ docker_container:
+ name: rancher_server
+ image: rancher/server:v1.6.14
+ command: ["sh", "-c", "/usr/sbin/update-ca-certificates;/usr/bin/entry /usr/bin/s6-svscan /service"]
+ ports: 8080:8080
+ state: started
+ restart_policy: unless-stopped
+ volumes:
+ - "{{ app_data_path }}/certs:/usr/local/share/ca-certificates/extra:ro"
+
+- name: Wait for rancher server to be ready
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta"
+ register: response
+ retries: 10
+ delay: 30
+ until: not response.failed
+
+- name: Create kubernetes environment
+ rancher_k8s_environment:
+ name: "{{ app_name }}"
+ descr: "Kubernetes environment for {{ app_name }}"
+ server: "{{ rancher_server_url }}"
+ delete_other_k8s: "{{ rancher_remove_other_env }}"
+ force: "{{ rancher_redeploy_k8s_env }}"
+ host_os: "{{ ansible_os_family }}"
+ register: env
+ retries: 10
+ delay: 5
+ until: env.data is defined
+
+- name: Set apikey values
+ set_fact:
+ k8s_env_id: "{{ env.data.environment.id }}"
+ key_public: "{{ env.data.apikey.public }}"
+ key_private: "{{ env.data.apikey.private }}"
+ rancher_agent_image: "{{ env.data.registration_tokens.image }}"
+ rancher_agent_reg_url: "{{ env.data.registration_tokens.reg_url }}"
+
+- name: Ensure .kube directory exists
+ file:
+ path: "{{ kube_directory }}"
+ state: directory
+
+- name: Create kube config
+ template:
+ src: kube_config.j2
+ dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/rancher/templates/kube_config.j2 b/ansible/roles/rancher/templates/kube_config.j2
new file mode 100644
index 00000000..87f332e6
--- /dev/null
+++ b/ansible/roles/rancher/templates/kube_config.j2
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id }}/kubernetes:6443"
+ name: "{{ app_name }}"
+contexts:
+- context:
+ cluster: "{{ app_name }}"
+ user: "{{ app_name }}"
+ name: "{{ app_name }}"
+current-context: "{{ app_name }}"
+users:
+- name: "{{ app_name }}"
+ user:
+ token: "{{ (['Basic', [key_public, key_private] | join(':') | b64encode] | join(' ')) | b64encode }}"
+
diff --git a/ansible/roles/resource-data/tasks/main.yml b/ansible/roles/resource-data/tasks/main.yml
new file mode 100644
index 00000000..51127226
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ transport }}-upload.yml"
diff --git a/ansible/roles/resource-data/tasks/nfs-upload.yml b/ansible/roles/resource-data/tasks/nfs-upload.yml
new file mode 100644
index 00000000..825486b6
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/nfs-upload.yml
@@ -0,0 +1,52 @@
+---
+- name: Upload resources to infrastructure servers over nfs
+ block:
+ - name: Mount resources
+ mount:
+ path: /tmp/resource_data
+ src: "{{ hostvars[groups.resources.0].ansible_host }}:{{ hostvars[groups.resources.0].resources_dir }}"
+ fstype: nfs
+ state: mounted
+
+ - name: Unarchive resources
+ unarchive:
+ src: "/tmp/resource_data/{{ hostvars[groups.resources.0].resources_filename }}"
+ remote_src: yes
+ dest: "{{ app_data_path }}"
+ when: not resources_data_check.stat.exists
+
+ - name: Unarchive auxiliary resources
+ unarchive:
+ src: "/tmp/resource_data/{{ hostvars[groups.resources.0].aux_resources_filename }}"
+ remote_src: yes
+ dest: "{{ aux_data_path }}"
+ when: >
+ hostvars[groups.resources.0].aux_resources_filename is defined
+ and aux_data_path is defined and aux_data_path is not none
+ and hostvars[groups.resources.0].aux_file_presence.stat.exists
+ and not aux_resources_data_check.stat.exists
+
+ rescue:
+ - name: Removing the resources data due to an error - so the next run can try again
+ command: /bin/false
+ register: upload_failed
+
+ always:
+ - name: unmount resource dir
+ mount:
+ path: /tmp/resource_data
+ src: "{{ hostvars[groups.resources.0].ansible_host }}:{{hostvars[groups.resources.0].resources_dir }}"
+ fstype: nfs
+ state: absent
+
+ - name: Remove the resource data on error
+ file:
+ path: "{{ app_data_path }}"
+ state: absent
+ when: upload_failed is defined
+
+ - name: Remove the auxilliary resource data on error
+ file:
+ path: "{{ aux_data_path }}"
+ state: absent
+ when: upload_failed is defined
diff --git a/ansible/roles/resource-data/tasks/ssh-upload.yml b/ansible/roles/resource-data/tasks/ssh-upload.yml
new file mode 100644
index 00000000..8e04d5c0
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/ssh-upload.yml
@@ -0,0 +1,59 @@
+---
+- name: Upload resources to infrastructure servers over ssh
+ block:
+ - name: Upload ssh private key
+ copy:
+ src: "{{ ansible_ssh_private_key_file }}"
+ dest: /root/.ssh/infra_to_resource.privkey
+ mode: 0600
+ owner: root
+ group: root
+ remote_src: no
+
+ - name: Unarchive resources
+ shell: >
+ ssh -o StrictHostKeyChecking=no -o BatchMode=yes
+ -i /root/.ssh/infra_to_resource.privkey
+ {{ hostvars[groups.resources.0].ansible_host }}
+ 'cat "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].resources_filename }}"'
+ | tar -C "{{ app_data_path }}" -xf -
+ args:
+ warn: False
+ when: not resources_data_check.stat.exists
+
+ - name: Unarchive auxiliary resources
+ shell: >
+ ssh -i /root/.ssh/infra_to_resource.privkey
+ {{ hostvars[groups.resources.0].ansible_host }}
+ 'cat "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].aux_resources_filename }}"'
+ | tar -C "{{ aux_data_path }}" -xf -
+ when: >
+ hostvars[groups.resources.0].aux_resources_filename is defined
+ and aux_data_path is defined and aux_data_path is not none
+ and hostvars[groups.resources.0].aux_file_presence.stat.exists
+ and not aux_resources_data_check.stat.exists
+ args:
+ warn: False
+
+ rescue:
+ - name: Removing the resources data due to an error - so the next run can try again
+ command: /bin/false
+ register: upload_failed
+
+ always:
+ - name: Remove the ssh private key
+ file:
+ path: /root/.ssh/infra_to_resource.privkey
+ state: absent
+
+ - name: Remove the resource data on error
+ file:
+ path: "{{ app_data_path }}"
+ state: absent
+ when: upload_failed is defined
+
+ - name: Remove the auxilliary resource data on error
+ file:
+ path: "{{ aux_data_path }}"
+ state: absent
+ when: upload_failed is defined
diff --git a/ansible/roles/vncserver/tasks/main.yml b/ansible/roles/vncserver/tasks/main.yml
new file mode 100644
index 00000000..56ae707b
--- /dev/null
+++ b/ansible/roles/vncserver/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Load VNC server image
+ docker_image:
+ name: consol/centos-icewm-vnc:latest
+ load_path: '{{ app_data_path }}/offline_data/docker_images_infra/consol_centos_icewm_vnc_latest.tar'
+ state: present
+ timeout: 120
+
+- name: Run VNC server
+ docker_container:
+ name: vnc_server
+ image: consol/centos-icewm-vnc
+ state: started
+ restart_policy: unless-stopped
+ ports:
+ - "5901:5901"
+ - "6901:6901"
+ env:
+ VNC_PW: "{{ vnc_passwd }}"