🔥 Remove lagacy code

- Drop Vagrant support
- Drop TravisCI
- Remove legacy tests
dependabot/pip/requests-2.32.2
Emilien Mantel 2024-08-27 11:28:39 +02:00 committed by Emilien M
parent c32411c6e4
commit edbd43067e
10 changed files with 1 additions and 329 deletions

View File

@ -6,7 +6,6 @@ skip_list: []
exclude_paths: exclude_paths:
- venv/ - venv/
- tests/ # TODO: Remove this line when tests are migrated to molecule
- .github/ - .github/
offline: false offline: false

1
.gitignore vendored
View File

@ -1,4 +1,3 @@
.vagrant*
*.swp *.swp
*.retry *.retry
/.idea /.idea

View File

@ -1,57 +0,0 @@
---
env:
global:
- VAGRANT_VERSION='2.2.18'
jobs:
- PLATFORM='docker-buster-default-master' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-buster-upstream-master' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-buster-default-galera-1' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-buster-upstream-galera-1' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-bullseye-default-master' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-bullseye-upstream-master' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-bullseye-default-galera-1' ANSIBLE_VERSION='>=2.11,<2.12'
- PLATFORM='docker-bullseye-upstream-galera-1' ANSIBLE_VERSION='>=2.11,<2.12'
os:
- linux
dist: focal
language: python
python:
- 3.8
services:
- docker
before_install:
- sudo apt-get -q update
- sudo apt-get install -y yamllint
- sudo wget -nv https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb
- sudo dpkg -i vagrant_${VAGRANT_VERSION}_x86_64.deb
- vagrant plugin install vagrant-hostmanager
install:
- sudo pip install "ansible-core$ANSIBLE_VERSION"
- sudo pip install ansible-lint
- ansible-galaxy collection install community.general community.mysql community.crypto ansible.posix
script:
- VAGRANT_DEFAULT_PROVIDER=docker vagrant up $PLATFORM
- >
VAGRANT_DEFAULT_PROVIDER=docker vagrant provision $PLATFORM
| grep -q 'changed=0.*failed=0'
&& (echo 'Idempotence test: pass' && exit 0)
|| (echo 'Idempotence test: fail' && exit 1)
- VAGRANT_DEFAULT_PROVIDER=docker vagrant status
- >
yamllint .
&& (echo 'YAML lint test: pass' && exit 0)
|| (echo 'YAML lint test: fail' && exit 1)
- >
ansible-lint -v tests/test.yml
&& (echo 'Ansible lint test: pass' && exit 0)
|| (echo 'Ansible lint test: fail' && exit 1)
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@ -7,4 +7,3 @@ rules:
ignore: ignore:
- /venv - /venv
- /tests # TODO: Remove this line when tests are migrated to molecule

View File

@ -16,9 +16,7 @@ If you need to manage previous Debian versions, please use the [latest managed v
Notes Notes
----- -----
* Galera Cluster is experimental * Galera Cluster is experimental. Feel free to test it and report issues.
* Due to Vagrant + Docker limitation (private network), replication/galera can't be checked with Travis
* If you need to test this role with Vagrant, you must install hostmanager plugin: `vagrant plugin install vagrant-hostmanager`
Requirements Requirements
------------ ------------

114
Vagrantfile vendored
View File

@ -1,114 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# vi: set tabstop=2 :
# vi: set shiftwidth=2 :
Vagrant.configure("2") do |config|
vbox_deb_buster = 'debian/buster64'
dk_deb_buster = 'hanxhx/vagrant-ansible:debian10'
vbox_deb_bullseye = 'debian/bullseye64'
dk_deb_bullseye = 'hanxhx/vagrant-ansible:debian11'
config.hostmanager.enabled = true
config.hostmanager.manage_host = false
config.hostmanager.manage_guest = true
config.hostmanager.ignore_private_ip = false
config.hostmanager.include_offline = false
cases = [
# ========================
# Debian Buster (10)
# ========================
# Default replication
{ os_name: 'buster', vbox: vbox_deb_buster, docker: dk_deb_buster, vars: {mariadb_origin: 'default' }, groups: ['master'] },
{ os_name: 'buster', vbox: vbox_deb_buster, docker: nil, vars: {mariadb_origin: 'default' }, groups: ['slave'] },
# Upstream replication
{ os_name: 'buster', vbox: vbox_deb_buster, docker: dk_deb_buster, vars: {mariadb_origin: 'upstream' }, groups: ['master'] },
{ os_name: 'buster', vbox: vbox_deb_buster, docker: nil, vars: {mariadb_origin: 'upstream' }, groups: ['slave'] },
# Galera Debian
{ os_name: 'buster', vbox: vbox_deb_buster, docker: dk_deb_buster, vars: {mariadb_origin: 'default' }, groups: ['galera', '1'] },
{ os_name: 'buster', vbox: vbox_deb_buster, docker: nil, vars: {mariadb_origin: 'default' }, groups: ['galera', '2'] },
{ os_name: 'buster', vbox: vbox_deb_buster, docker: nil, vars: {mariadb_origin: 'default' }, groups: ['galera', '3'] },
# Galera Upstream
{ os_name: 'buster', vbox: vbox_deb_buster, docker: dk_deb_buster, vars: {mariadb_origin: 'upstream' }, groups: ['galera', '1'] },
{ os_name: 'buster', vbox: vbox_deb_buster, docker: nil, vars: {mariadb_origin: 'upstream' }, groups: ['galera', '2'] },
{ os_name: 'buster', vbox: vbox_deb_buster, docker: nil, vars: {mariadb_origin: 'upstream' }, groups: ['galera', '3'] },
# ========================
# Debian Bullseye (11)
# ========================
# Default replication
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: dk_deb_bullseye, vars: {mariadb_origin: 'default' }, groups: ['master'] },
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: nil, vars: {mariadb_origin: 'default' }, groups: ['slave'] },
# Upstream replication
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: dk_deb_bullseye, vars: {mariadb_origin: 'upstream' }, groups: ['master'] },
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: nil, vars: {mariadb_origin: 'upstream' }, groups: ['slave'] },
# Galera Debian
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: dk_deb_bullseye, vars: {mariadb_origin: 'default' }, groups: ['galera', '1'] },
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: nil, vars: {mariadb_origin: 'default' }, groups: ['galera', '2'] },
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: nil, vars: {mariadb_origin: 'default' }, groups: ['galera', '3'] },
# Galera Upstream
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: dk_deb_bullseye, vars: {mariadb_origin: 'upstream' }, groups: ['galera', '1'] },
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: nil, vars: {mariadb_origin: 'upstream' }, groups: ['galera', '2'] },
{ os_name: 'bullseye', vbox: vbox_deb_bullseye, docker: nil, vars: {mariadb_origin: 'upstream' }, groups: ['galera', '3'] },
]
cases.each_with_index do |opts,index|
name = 'docker-' + opts[:os_name] + '-' + opts[:vars].map{|k,v| "#{v}"}.join('-') + '-' + opts[:groups].join('-')
iplsb = 10 + index
ip = '192.168.201.' + iplsb.to_s
next if opts[:docker].nil?
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.define name do |m|
m.vm.network "private_network", ip: ip
m.vm.provider "docker" do |d|
d.image = opts[:docker]
d.remains_running = true
d.has_ssh = true
end
m.vm.provision "ansible" do |ansible|
ansible.playbook = "tests/test.yml"
ansible.verbose = 'vv'
ansible.become = true
ansible.extra_vars = opts[:vars].merge({ mariadb_debug_role: true, is_docker: true })
ansible.groups = { opts[:groups][0] => name }
end
end
end
cases.each_with_index do |opts,index|
name = 'vbox-' + opts[:os_name] + '-' + opts[:vars].map{|k,v| "#{v}"}.join('-') + '-' + opts[:groups].join('-')
iplsb = 10 + index
ip = '192.168.200.' + iplsb.to_s
config.vm.define name do |m|
m.vm.hostname = name
m.vm.box = opts[:vbox]
m.vm.network "private_network", ip: ip
m.vm.provider "virtualbox" do |v|
v.cpus = 1
v.memory = 512
end
m.vm.provision "ansible" do |ansible|
ansible.playbook = "tests/test.yml"
ansible.verbose = 'vv'
ansible.become = true
ansible.extra_vars = opts[:vars].merge({ mariadb_debug_role: true, is_docker: false })
ansible.groups = { opts[:groups][0] => name }
end
end
end
end

View File

@ -1,23 +0,0 @@
About tests
===========
IMPORTANT
---------
- DO NOT `vagrant up`! My Vagrantfile provides many VMs...
- Each slave communicate to his master.
Tests
-----
- vagrant up the-master
- vagrant up the-slave
Wait master fully installed before run slave.
Example:
```
vagrant up vbox-buster-default-master
vagrant up vbox-buster-default-slave
```

View File

@ -1,6 +0,0 @@
INSERT IGNORE INTO `user` (`id`,`email`) VALUES (51,"vulputate.eu.odio@elitdictumeu.net"),(52,"Sed.eu@erosnec.edu"),(53,"magna@interdum.co.uk"),(54,"ornare.libero.at@Proin.net"),(55,"turpis@aneque.org"),(56,"ut.eros.non@Duisrisusodio.com"),(57,"Pellentesque.ultricies.dignissim@malesuada.edu"),(58,"vel.nisl@mifringilla.net"),(59,"dui@laoreet.com"),(60,"vitae@Suspendissedui.net");
INSERT IGNORE INTO `user` (`id`,`email`) VALUES (61,"turpis@Curabituregestasnunc.co.uk"),(62,"arcu@enim.co.uk"),(63,"consectetuer.rhoncus.Nullam@dolordolortempus.co.uk"),(64,"sem@felisadipiscingfringilla.net"),(65,"aliquet.metus.urna@a.net"),(66,"Fusce.fermentum.fermentum@variusNam.com"),(67,"dolor@velnisl.ca"),(68,"et.netus@Duisrisusodio.edu"),(69,"malesuada@purus.edu"),(70,"gravida.sagittis@pulvinararcuet.ca");
INSERT IGNORE INTO `user` (`id`,`email`) VALUES (71,"et.eros.Proin@Cras.co.uk"),(72,"eleifend.nunc.risus@metuseu.edu"),(73,"pede.Nunc@Phasellusnulla.net"),(74,"vitae.sodales.at@ipsumdolor.edu"),(75,"nunc.sed.pede@aliquetlobortisnisi.co.uk"),(76,"consectetuer@nonenim.ca"),(77,"ultrices@tinciduntvehicula.co.uk"),(78,"Nullam.enim.Sed@Morbiaccumsan.com"),(79,"auctor@Phasellus.net"),(80,"enim.Etiam@interdum.com");
INSERT IGNORE INTO `user` (`id`,`email`) VALUES (81,"sapien.Cras.dolor@consectetuer.com"),(82,"malesuada.fames.ac@feugiattelluslorem.edu"),(83,"risus@vestibulum.co.uk"),(84,"Nunc@Duisgravida.ca"),(85,"ornare.egestas@sitamet.edu"),(86,"Proin.ultrices@senectus.ca"),(87,"ligula@magna.edu"),(88,"orci.tincidunt.adipiscing@sed.com"),(89,"et@venenatis.edu"),(90,"leo.Cras.vehicula@eteuismod.org");
INSERT IGNORE INTO `user` (`id`,`email`) VALUES (91,"consequat.auctor.nunc@utsemNulla.net"),(92,"nec.leo@orci.com"),(93,"Nulla@atvelit.edu"),(94,"tempor.augue.ac@eleifend.edu"),(95,"fermentum.risus.at@penatibusetmagnis.edu"),(96,"id.erat.Etiam@porttitortellus.edu"),(97,"amet.metus.Aliquam@mus.co.uk"),(98,"dolor.tempus.non@risus.org"),(99,"vulputate.posuere.vulputate@purus.ca"),(100,"inceptos@pede.edu");

View File

@ -1 +0,0 @@
localhost

View File

@ -1,122 +0,0 @@
---
- hosts: all
gather_facts: false
pre_tasks:
- name: SETUP | Get facts
ansible.builtin.setup:
register: s
- name: DEBUG | Show facts
ansible.builtin.debug:
var: s
tasks:
- name: APT | Install some packages
ansible.builtin.apt:
name: "{{ p }}"
update_cache: true
cache_valid_time: 3600
vars:
p: ['ca-certificates', 'curl', 'strace', 'rsyslog', 'vim']
- name: SERVICE | Ensure rsyslog is started
ansible.builtin.service:
name: rsyslog
state: started
- hosts: master
roles:
- ../../
tasks:
- name: COPY | Deploy first dump
ansible.builtin.copy:
src: import1.sql
dest: /tmp/import1.sql
mode: 0644
owner: root
group: root
register: c
- name: MYSQL_DB | Import first dump
community.mysql.mysql_db:
name: "{{ item }}"
state: import
target: /tmp/import1.sql
login_unix_socket: "{{ mariadb_socket }}"
loop: ['testrepl', 'norepl']
when: c.changed
tags:
- skip_ansible_lint
- hosts: slave
pre_tasks:
- name: SHELL | Get master IP
ansible.builtin.shell: set -o pipefail && getent hosts {{ ansible_hostname | replace ('slave', 'master') }} | cut -d ' ' -f 1
args:
executable: /bin/bash
register: ip
changed_when: false
- name: SET_FACT | Apply some configuration
ansible.builtin.set_fact:
# MariaDB don't read /etc/hosts (from vagrant host plugin)
mariadb_replication_host: "{{ ip.stdout }}"
# Need this to use vagrant 'delegate_to'
mariadb_slave_import_from: "{{ ansible_hostname | replace ('slave', 'master') }}"
roles:
- ../../
tasks:
- block:
- name: COPY | Deploy dump
ansible.builtin.copy:
src: import2.sql
dest: /tmp/import2.sql
mode: 0644
owner: root
group: root
delegate_to: "{{ mariadb_slave_import_from }}"
register: c
- name: MYSQL_DB | Import another dump
community.mysql.mysql_db:
name: "{{ item }}"
state: import
target: /tmp/import2.sql
login_unix_socket: "{{ mariadb_socket }}"
loop: ['testrepl', 'norepl']
when: c.changed
delegate_to: "{{ mariadb_slave_import_from }}"
- name: MYSQL_REPLICATION | Get slave infos
community.mysql.mysql_replication:
mode: getslave
register: slave
- name: FAIL | if slave threads are not running
ansible.builtin.fail:
msg: "Slave issue"
when: slave.Slave_IO_Running != 'Yes' or slave.Slave_SQL_Running != 'Yes'
- hosts: galera
pre_tasks:
- name: APT_REPOSITORY | Force galera-3 on Buster + MariaDB from Debian repository (prevent crashes)
ansible.builtin.set_fact:
mariadb_galera_package_name: 'galera-3'
when: ansible_distribution_release == 'buster' and mariadb_origin == 'default'
- name: SET_FACT | Apply some configuration
ansible.builtin.set_fact:
mariadb_galera_primary_node: '{% if is_docker %}docker-{% else %}vbox-{% endif %}{{ ansible_distribution_release }}-{{ mariadb_origin }}-galera-1'
mariadb_wsrep_node_address: "{{ '127.0.0.1' if is_docker else ansible_eth1.ipv4.address }}"
roles:
- ../../