diff --git a/00-template/mysql/automated_deploy-mysql-cluster-master/README.md b/00-template/mysql/automated_deploy-mysql-cluster-master/README.md new file mode 100755 index 0000000..af9c99b --- /dev/null +++ b/00-template/mysql/automated_deploy-mysql-cluster-master/README.md @@ -0,0 +1,93 @@ +## Ansible playbook for deploying mysql-server 5.7 galera cluster with HAproxy as load balancer +### Features of Mysql 5.7 Galera Cluster +* True Multi-master, Active-Active Cluster Read and write to any node at any time. +* Synchronous Replication No slave lag, no data is lost at node crash. +* Tightly Coupled All nodes hold the same state. No diverged data between nodes allowed. +* Multi-threaded Slave For better performance. For any workload. +* No Master-Slave Failover Operations or Use of VIP. +* Hot Standby No downtime during failover (since there is no failover). +* Automatic Node Provisioning No need to manually back up the database and copy it to the new node. +* Supports InnoDB. +* Transparent to Applications Required no (or minimal changes) to the application. +* No Read and Write Splitting Needed. +* Easy to Use and Deploy + +### Requirements + +This playbook requires minimum of 3 ubuntu 16.04 nodes (you can add more nodes as needed) for installing mysql 5.7 galera cluster and 1 ubuntu 16.04 node for installing HAproxy. cluster nodes and proxy node must be defined inside the included inventory.yml file. + + +![mysql galera cluster](https://i.imgur.com/YSR0Pnul.png ) + +### Running Playbook + +add hosts to `inventory.yml` + +Note: Do not modify the host group names [mysql_cluster] and [load_balancer] when adding hosts to `inventory.yml` file since it has dependencies inside the playbook. + +``` +[mysql_cluster] +mysql_node_1 ansible_host=192.168.10.2 ansible_port=22 ansible_user=ubuntu +mysql_node_2 ansible_host=192.168.10.3 ansible_port=22 ansible_user=ubuntu +mysql_node_3 ansible_host=192.168.10.4 ansible_port=22 ansible_user=ubuntu +[load_balancer] +haproxy_load_balancer ansible_host=192.168.10.5 ansible_port=22 ansible_user=ubuntu +``` +Before running the playbook you have to configure below variables inside the playbook : `deploy-mysql-cluster.yml` + +`mysql_cluster_name: demo_mysql_cluster` <= name of your mysql cluster without spaces + +`mysql_root_password: your_mysql_root_password` <= a strong password for mysql root user + +#### To run the playbook + +`ansible-playbook --fork=1 deploy-mysql-cluster.yml -i inventory.yml` + +##### Note: The ansible option `--fork=1` is used to disable parallel execution of tasks, which helps in data consistency when stopping/bootstrapping the cluster. + +After successful completion of playbook, you can access the cluster by `mysql -h load_balancer_IP -P 3306 -u root -p your_mysql_root_password` + +### Verifying cluster status + +check the last part of playbook output + +``` +ok: [haproxy_load_balancer] => { + "msg": [ + " Test connection successfull", + " Total number of active mysql nodes in cluster: '3 '", + " Setup Completed!" + ] +} + + +``` + +You can also verfiy the status of mysql cluster by manually running below query +~~~~sql +`mysql -h load_balancer_IP -P 3306 -u root -p your_mysql_root_password` + +`mysql> SHOW STATUS LIKE 'wsrep_cluster_size'; ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| wsrep_cluster_size | 3 | ++--------------------+-------+ +1 row in set (0.00 sec)` + +~~~~ +Which means we have 3 active nodes + + +### Stop Mysql Cluster + +You can use the --tag option "stop_cluster" to safely stop the mysql cluster + +`ansible-playbook --fork=1 deploy-mysql-cluster.yml -i inventory.yml --tags "stop_cluster"` + +### Start Mysql Clsuter + + +You can use the --tag option "start_cluster" to safely start/bootstrap the mysql cluster + +`ansible-playbook --fork=1 deploy-mysql-cluster.yml -i inventory.yml --tags "start_cluster"` \ No newline at end of file diff --git a/00-template/mysql/automated_deploy-mysql-cluster-master/deploy-mysql-cluster.yml b/00-template/mysql/automated_deploy-mysql-cluster-master/deploy-mysql-cluster.yml new file mode 100755 index 0000000..2e18f4f --- /dev/null +++ b/00-template/mysql/automated_deploy-mysql-cluster-master/deploy-mysql-cluster.yml @@ -0,0 +1,177 @@ +--- +- hosts: all + become: true + become_method: sudo + vars: + mysql_cluster_name: demo_mysql_cluster + mysql_root_password: "root" + + tasks: + +# Setup Mysql server 5.7 galera cluster repositories + + - name: view ips + debug: msg={{ ansible_default_ipv4 }} + when: "'mysql_cluster' in group_names" + with_items: "{{ groups['mysql_cluster'] }}" + + - name: import repository key + apt_key: keyserver=hkp://keyserver.ubuntu.com:80 id=BC19DDBA + when: "'mysql_cluster' in group_names" + + - name: add apt repository for mysql-wsrep-5.7 and Galera Cluster + apt_repository: repo='{{item}}' + state=present update_cache=yes + with_items: + - "deb http://releases.galeracluster.com/mysql-wsrep-5.7/{{ ansible_distribution|lower() }} {{ ansible_distribution_release }} main" + - "deb http://releases.galeracluster.com/galera-3/{{ ansible_distribution|lower() }} {{ ansible_distribution_release }} main" + when: "'mysql_cluster' in group_names" + + - name: create a preference file for galera repository. + blockinfile: | + create=yes + dest=/etc/apt/preferences.d/mysql-galera-cluster.pref + content="Package: * + Pin: origin releases.galeracluster.com + Pin-Priority: 1001" + when: "'mysql_cluster' in group_names" + +# Install Mysql and Galera Cluster packages + + - name: Disable mysql AppArmor rule + file: + src: /etc/apparmor.d/usr.sbin.mysqld + dest: /etc/apparmor.d/disable/usr.sbin.mysqld + state: link + force: yes + when: "'mysql_cluster' in group_names" + + - name: Restart AppArmor + systemd: state=restarted name=apparmor + when: "'mysql_cluster' in group_names" + + - name: install Mysql-server 5.7 and Galera Cluster packages + apt: + name: ['galera-3', 'galera-arbitrator-3', 'mysql-wsrep-5.7', 'rsync', 'python-mysqldb'] + update_cache: yes + when: "'mysql_cluster' in group_names" + + - name: Disable mysql systemd unit to prevent cluster from starting in wrong order + systemd: name=mysql enabled=no + when: "'mysql_cluster' in group_names" + +# Create Mysql Galera Cluster Configuration file + + - name: Create Mysql Galera Cluster Configuration file + template: + src: mysql-cluster-config.j2 + dest: /etc/mysql/conf.d/mysql_galera_cluster.cnf + owner: mysql + group: mysql + when: "'mysql_cluster' in group_names" + + - name: Stop slave mysql nodes + systemd: state=stopped name=mysql + when: "('load_balancer' not in group_names) and (inventory_hostname != groups['mysql_cluster'][0])" + tags: + - stop_cluster + + - name: Wait 20 seconds to safely shutdown all slave mysql nodes + pause: + seconds: 20 + tags: + - stop_cluster + + - name: Stop Primary Mysql Node + systemd: state=stopped name=mysql + when: inventory_hostname == groups['mysql_cluster'][0] + tags: + - stop_cluster + + - name: Wait 10 seconds to safely shutdown mysql primary node + pause: + seconds: 10 + + - name: Bootstarping Primary Mysql Node + shell: /usr/bin/mysqld_bootstrap + when: inventory_hostname == groups['mysql_cluster'][0] + any_errors_fatal: true + tags: + - start_cluster + + - name: Wait 10 seconds after bootstarping Primary Mysql Node + pause: + seconds: 10 + tags: + - start_cluster + + - name: Start slave Mysql nodes + systemd: state=started name=mysql + when: "('load_balancer' not in group_names) and (inventory_hostname != groups['mysql_cluster'][0])" + any_errors_fatal: true + tags: + - start_cluster + + - name: install haproxy + apt: + name: ['haproxy', 'python-mysqldb'] + update_cache: yes + when: "'load_balancer' in group_names" + + + - name: add mysql nodes to haproxy Configuration file + template: + src: haproxy-config.j2 + dest: /etc/haproxy/haproxy.cfg + validate: 'haproxy -c -f %s' + when: "'load_balancer' in group_names" + +# Set mysql root user password and only allow access to root user from loadbalancer IP and localhost + + - name: Set mysql root user password and only allow access to root user from loadbalancer IP and localhost + mysql_user: + name: root + host: '{{ item }}' + check_implicit_admin: yes + login_user: root + login_password: '{{ mysql_root_password }}' + password: '{{ mysql_root_password }}' + priv: '*.*:ALL' + state: present + update_password: always + when: inventory_hostname == groups['mysql_cluster'][0] + with_items: + - "{{ hostvars[groups['load_balancer'][0]]['ansible_default_ipv4']['address'] }}" + - 127.0.0.1 + - "localhost" + + - name: Create haproxy mysql user to perform basic health checks + mysql_user: + name: haproxy + host: "{{ hostvars[groups['load_balancer'][0]]['ansible_default_ipv4']['address'] }}" + state: present + check_implicit_admin: yes + login_user: root + login_password: '{{ mysql_root_password }}' + when: inventory_hostname == groups['mysql_cluster'][0] + + - name: Restart haproxy + systemd: state=restarted name=haproxy + when: "'load_balancer' in group_names" + + + - name: Testing cluster status by connecting to load balancer + shell: mysql -h {{ hostvars[groups['load_balancer'][0]]['ansible_default_ipv4']['address'] }} -u root -p{{ mysql_root_password }} -e "SHOW STATUS LIKE 'wsrep_cluster_size'" | grep 'wsrep_cluster_size' | awk '{print $2}' + when: inventory_hostname == groups['load_balancer'][0] + run_once: true + register: cluster_status + + + - name: Test status + debug: + msg: + - " Test connection successfull" + - " Total number of active mysql nodes in cluster: '{{ cluster_status.stdout }} '" + - " Setup Completed!" + when: inventory_hostname == groups['load_balancer'][0] + run_once: true diff --git a/00-template/mysql/automated_deploy-mysql-cluster-master/haproxy-config.j2 b/00-template/mysql/automated_deploy-mysql-cluster-master/haproxy-config.j2 new file mode 100755 index 0000000..c35ab2c --- /dev/null +++ b/00-template/mysql/automated_deploy-mysql-cluster-master/haproxy-config.j2 @@ -0,0 +1,46 @@ +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin + stats timeout 30s + user haproxy + group haproxy + daemon + + # Default SSL material locations + ca-base /etc/ssl/certs + crt-base /etc/ssl/private + + # Default ciphers to use on SSL-enabled listening sockets. + # For more information, see ciphers(1SSL). This list is from: + # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + + +# Load Balancing for Mysql Galera Cluster +listen mysql_galera_cluster +bind {{ ansible_default_ipv4.address }}:3306 + balance roundrobin + mode tcp + option tcpka + option mysql-check user haproxy + {% for host in groups['mysql_cluster'] %} + server {{ hostvars[host]['inventory_hostname'] }} {{ hostvars[host]['ansible_default_ipv4']['address'] }}:3306 check weight 1 + {% endfor %} + diff --git a/00-template/mysql/automated_deploy-mysql-cluster-master/inventory.yml b/00-template/mysql/automated_deploy-mysql-cluster-master/inventory.yml new file mode 100755 index 0000000..cd77e00 --- /dev/null +++ b/00-template/mysql/automated_deploy-mysql-cluster-master/inventory.yml @@ -0,0 +1,17 @@ +[mysql_cluster] +mysql1 ansible_host=172.17.0.214 ansible_port=22 +mysql2 ansible_host=172.17.0.215 ansible_port=22 +mysql3 ansible_host=172.17.0.216 ansible_port=22 +[load_balancer] +haproxy ansible_host=172.17.0.210 ansible_port=22 + + +[cluster:children] +mysql_cluster +load_balancer + +[cluster:vars] +ansible_connection=ssh +ansible_user=vagrant +ansible_ssh_pass=vagrant +; ansible_ssh_private_key_file= ~/.vagrant/machines/default/virtualbox/private_key diff --git a/00-template/mysql/automated_deploy-mysql-cluster-master/mysql-cluster-config.j2 b/00-template/mysql/automated_deploy-mysql-cluster-master/mysql-cluster-config.j2 new file mode 100755 index 0000000..9e0cc85 --- /dev/null +++ b/00-template/mysql/automated_deploy-mysql-cluster-master/mysql-cluster-config.j2 @@ -0,0 +1,13 @@ +[mysqld] +binlog_format=ROW +default-storage-engine=innodb +innodb_autoinc_lock_mode=2 +bind-address=0.0.0.0 +# Galera Cluster Config +wsrep_on=ON +wsrep_provider=/usr/lib/galera/libgalera_smm.so +wsrep_cluster_name="{{ mysql_cluster_name }}" +wsrep_cluster_address="gcomm://{{ groups['mysql_cluster'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | join(',') }}" +wsrep_sst_method=rsync +wsrep_node_address="{{ ansible_default_ipv4.address }}" +wsrep_node_name="{{ inventory_hostname }}" diff --git a/00-template/mysql/automated_deploy-mysql-cluster-master/poc-mysql_cluster_db.pdf b/00-template/mysql/automated_deploy-mysql-cluster-master/poc-mysql_cluster_db.pdf new file mode 100644 index 0000000..abe7db0 Binary files /dev/null and b/00-template/mysql/automated_deploy-mysql-cluster-master/poc-mysql_cluster_db.pdf differ