diff --git a/hosts.yml b/hosts.yml new file mode 100644 index 0000000..3ab3a92 --- /dev/null +++ b/hosts.yml @@ -0,0 +1,448 @@ +# -*- mode: yaml; indent-tabs-mode: nil; tab-width: 2; coding: utf-8 -*- +# use double quotes exclusively around strings and +# use single quotes exclusively with lists - for bash post-processing + +all: + + children: + + vbox_winrm_group: + + hosts: + + y_UEFI_MediCat_VHD_DW: + # /var/lib/libvirt/qemu/channel/target/domain-37-y_UEFI_MediCat_VHD_D/org.qemu.guest_agent.0 + # doesnt work: ansible_connection: "libvirt_qemu" + + BOX_SERVICE_MGR: "win11" + BOX_HOST_NAME: "y_UEFI_MediCat_VHD_DW" + + UPD_WINRM_CRT_PASSWORD: "" + UPD_WINRM_CRT_NAME: "WINRM_WIN11VBOX cert for " + UPD_WINRM_FILE_BASE: "winrm-win11vbox" + UPD_WINRM_KEY_BITS: 4096 + + UPD_WINRM_HOST_NAME: "y_UEFI_MediCat_VHD_D" + UPD_WINRM_HOST_DEV: "vboxnet0" + UPD_WINRM_ADMIN_NAME: "administrator" + UPD_WINRM_ADMIN_PASS: "" + + # NOT remote_addr: + ansible_winrm_host: "192.168.56.1" + # remote_user + ansible_winrm_user: "administrator" + BOX_DEFAULT_OUTPUT_IF: fixme + + UPD_WINRM_WINRM_ADMIN_NAME: "winrmadmin" + UPD_WINRM_WINRM_ADMIN_PASS: "winrmadmin" + + # List of winrm transports to attempt to to use (ssl, plaintext, kerberos, etc) + # python2 -c 'import winrm;print winrm.FEATURE_SUPPORTED_AUTHTYPES' + # ['basic', 'certificate', 'ntlm', 'kerberos', 'plaintext', 'ssl', 'credssp'] + # FixMe: which one works? + UPD_WINRM_WINRM_TRANSPORT: "basic" + # Lati sda Disk identifier: 0A00A495-684B-425E-823F-60257EBD6D3B + + vars: + #maybe ansible_connection: "winrm" + BOX_ANSIBLE_CONNECTIONS: ["libvirt_qemu"] + ansible_winrm_port: 5985 + ansible_winrm_scheme: http + ansible_winrm_transport: ['basic', 'plaintext', 'certificate', 'ssl'] + # NOT remote_user + # ansible_user + ansible_winrm_user: "Administrator" + #? ansible_password: "" + ansible_winrm_server_cert_validation: ignore + validate_certs: false + # NO proxy from environment - or ensure no_proxy + no_proxy: "localhost,127.0.0.1,192.168.56.1" + + linux_unix_group: + + children: + + linux_local_group: + + hosts: + + pentoo: + ansible_remote_addr: "/mnt/linuxPen19" + BOX_HOST_NAME: "pentoo" + BOX_SERVICE_MGR: "openrc" + BOX_USER_NAME: "vagrant" + BOX_USER_GROUP: "users" + BOX_USER_HOME: "/home/vagrant" + BOX_OS_FAMILY: Gentoo + BOX_OS_NAME: gentoo + BOX_OS_FLAVOR: "Pentoo" + BOX_USR_LIB: lib + BOX_DEFAULT_OUTPUT_IF: wlan4 + BOX_PROXY_MODE: selektor + BOX_WHONIX_PROXY_HOST: "" + BOX_GENTOO_DISTFILES_ARCHIVES: "/i/net/Http/distfiles.gentoo.org/distfiles" + BOX_PROXY_JAVA_NET_PROPERTIES: /etc/java-config-2/current-system-vm/jre/lib/net.properties + # /usr/lib/jvm/openjdk-bin-*/conf/net.properties + BOX_ALSO_USERS: + - pentoo + BOX_PORTAGE_PYTHON_MINOR: "3.11" + BOX_PYTHON2_MINOR: "2.7" + BOX_PYTHON3_MINOR: "3.11" + BOX_GENTOO_FROM_MP: "/" + + devuan: + ansible_remote_addr: "/mnt/linuxDev4" #ignored for local + BOX_HOST_NAME: "devuan" + BOX_SERVICE_MGR: "sysvinit" + BOX_USER_NAME: "devuan" + BOX_USER_GROUP: "adm" + BOX_USER_HOME: "/home/devuan" + BOX_OS_FAMILY: Debian + BOX_OS_NAME: Devuan + BOX_OS_FLAVOR: "Devuan" + BOX_USR_LIB: lib + BOX_DEFAULT_OUTPUT_IF: wlan6 + BOX_DEVUAN5_VAR_APT_ARCHIVES: "/mnt/o/Cache/Devuan/5/var/cache/apt/archives" + BOX_ALSO_USERS: [] + BOX_PORTAGE_PYTHON_MINOR: "3.11" + BOX_PYTHON2_MINOR: "2.7" + BOX_PYTHON3_MINOR: "3.11" + + BOX_JAVA_NET_PROPERTIES: /etc/java-11-openjdk/net.properties + + BOX_WHONIX_PROXY_HOST: "" + BOX_PROXY_MODE: tor + BOX_GENTOO_FROM_MP: "/mnt/linuxPen19" + + vars: + BOX_ANSIBLE_CONNECTIONS: ["local"] + BOX_REMOTE_MOUNTS: ['/mnt/h', '/mnt/j','/mnt/i', '/mnt/o', '/mnt/mnt/linuxPen19'] + BOX_BASE_FEATURES: ['insecure_sudo'] + BOX_PROXY_FEATURES: ['run_dnsmasq', 'run_privoxy'] + BOX_TOXCORE_FEATURES: [] + + # libvirt_group could also be ssh_group + linux_libvirt_group: + + hosts: + + gentoo1: + + ansible_remote_addr: "gentoo1" + ansible_host: "gentoo1" + ansible_ssh_user: "gentoo" + BOX_SERVICE_MGR: "openrc" + BOX_HOST_NAME: "gentoo1" + BOX_USER_NAME: "gentoo" + BOX_USER_GROUP: "adm" + BOX_ALSO_GROUP: "adm" + BOX_USER_HOME: "/home/gentoo" + BOX_OS_NAME: Gentoo + BOX_OS_FAMILY: Gentoo + BOX_OS_FLAVOR: "Gentoo" + BOX_USR_LIB: lib64 + BOX_DEFAULT_OUTPUT_IF: eth0 + BOX_PYTHON2_MINOR: "" + BOX_PYTHON3_MINOR: "3.11" + BASE_PORTAGE_PYTHON_MINOR: 3.11 + BOX_HOST_CONTAINER_MOUNTS: [] + BOX_GENTOO_DISTFILES_ARCHIVES: "/mnt/linuxPen19/usr/portage/distfiles" + BOX_PROXY_JAVA_NET_PROPERTIES: /etc/java-config-2/current-system-vm/jre/lib/net.properties + BOX_ALSO_USERS: + - gentoo + BOX_BASE_FEATURES: [] + BOX_TOXCORE_FEATURES: ['libvirt', 'docker'] + BOX_GENTOO_FROM_MP: "/mnt/linuxPen19" + + ubuntu18.04: + # /mnt + ansible_remote_addr: "ubuntu18.04" + # this is what the libvirt-qemu connector uses + ansible_host: "ubuntu18.04" + ansible_ssh_user: "vagrant" + BOX_SERVICE_MGR: systemd + BOX_HOST_NAME: "Ubuntu18.04" + BOX_USER_NAME: "vagrant" + BOX_USER_GROUP: "users" + BOX_USER_HOME: "/home/vagrant" + BOX_OS_FAMILY: Debian + BOX_OS_NAME: Ubuntu + BOX_OS_FLAVOR: "Ubuntu18" + BOX_USR_LIB: lib + BOX_DEFAULT_OUTPUT_IF: eth0 + BOX_UBUNTU16_VAR_APT_ARCHIVES: "/o/Cache/Apt/Ubuntu/18/var/cache/apt/archives" + ansible_python_interpreter: "/usr/bin/python3.6" + BOX_PYTHON2_MINOR: "" + BOX_PYTHON3_MINOR: "3.6" + BOX_REMOTE_MOUNTS: ['/mnt/o'] + # BOX_WHONIX_PROXY_HOST: "Whonix-Gateway" + # BOX_PROXY_MODE: ws + # FixMe + base_system_users: ['vagrant'] + BOX_TOXCORE_FEATURES: ['libvirt', 'docker'] + + vars: + BOX_ANSIBLE_CONNECTIONS: ["ssh", "libvirt_qemu"] + # proxy from environment + # ansible_ssh_extra_args: "-o StrictHostKeyChecking=no" + # ansible_ssh_host: "127.0.0.1" + BOX_ROOT_GROUP: root + BOX_PROXY_MODE: client + http_proxy: "http://127.0.0.1:3128" + https_proxy: "http://127.0.0.1:9128" + socks_proxy: "socks5://127.0.0.1:9050" + no_proxy: "localhost,127.0.0.1,127.0.0.1" + + linux_chroot_group : + + hosts: + + linuxGentoo: + + ansible_remote_addr: "/mnt/gentoo" + # required + ansible_host: "/mnt/gentoo" + BOX_SERVICE_MGR: "openrc" + BOX_HOST_NAME: "gentoo" + BOX_USER_NAME: "gentoo" + BOX_USER_GROUP: "adm" + BOX_USER_HOME: "/home/gentoo" + BOX_OS_FAMILY: Gentoo + BOX_OS_NAME: gentoo + BOX_OS_FLAVOR: "Gentoo" + BOX_USR_LIB: lib64 + BOX_DEFAULT_OUTPUT_IF: wlan6 + BASE_PORTAGE_PYTHON_MINOR: 3.11 + ansible_python_interpreter: "/usr/bin/python3.11" + BOX_GENTOO_DISTFILES_ARCHIVES: "/mnt/linuxPen19/usr/portage/distfiles" + BOX_PROXY_JAVA_NET_PROPERTIES: /etc/java-config-2/current-system-vm/jre/lib/net.properties + BOX_ALSO_USERS: + - gentoo + BOX_PROXY_MODE: "{{lookup('env', 'MODE'|default('tor'}}" + BOX_NBD_DEV: nbd1 + BOX_NBD_MP: /mnt/gentoo + BOX_NBD_FILES: "/i/data/Agile/tmp/Topics/GentooImgr" + BOX_NBD_BASE_QCOW: "/g/Agile/tmp/Topics/GentooImgr/gentoo.qcow2" + BOX_NBD_OVERLAY_NAME: "gentoo1" + BOX_NBD_OVERLAY_QCOW: "/o/var/lib/libvirt/images/gentoo1.qcow2" + BOX_GENTOO_FROM_MP: "/mnt/linuxPen19" + + linuxPen19: + + ansible_remote_addr: "/mnt/linuxPen19" + # required + ansible_host: "/mnt/linuxPen19" + BOX_SERVICE_MGR: "openrc" + BOX_HOST_NAME: "linuxPen19" + BOX_USER_NAME: "vagrant" + BOX_USER_GROUP: "adm" + BOX_USER_HOME: "/home/vagrant" + BOX_OS_FAMILY: Gentoo + BOX_OS_NAME: gentoo + BOX_OS_FLAVOR: "Pentoo" + BOX_USR_LIB: lib64 + BOX_DEFAULT_OUTPUT_IF: wlan6 + BASE_PORTAGE_PYTHON_MINOR: 3.11 + ansible_python_interpreter: "/usr/bin/python3.11" + BOX_GENTOO_DISTFILES_ARCHIVES: "/mnt/i/net/Http/distfiles.gentoo.org/distfiles" + BOX_PROXY_JAVA_NET_PROPERTIES: /etc/java-config-2/current-system-vm/jre/lib/net.properties + BOX_ALSO_USERS: + - gentoo + BOX_BASE_FEATURES: [] + BOX_TOXCORE_FEATURES: ['nbd', 'libvirt', 'docker'] + BOX_PROXY_MODE: "{{lookup('env', 'MODE'|default('tor'}}" + BOX_NBD_DEV: nbd1 + BOX_NBD_MP: /mnt/gentoo + BOX_NBD_OVERLAY_NAME: "gentoo1" + BOX_NBD_BASE_QCOW: "/g/Agile/tmp/Topics/GentooImgr/gentoo.qcow2" + BOX_NBD_OVERLAY_QCOW: "/o/var/lib/libvirt/images/gentoo1.qcow2" + BOX_NBD_FILES: "/i/data/Agile/tmp/Topics/GentooImgr" + + # linux_chroot_group vars + vars: + BOX_ANSIBLE_CONNECTIONS: ["local", "chroot"] + # ignored? chroot_connection/exe in ansible.cfg? + ansible_chroot_exe: "/usr/local/sbin/base_chroot.bash" + + #? ansible_ssh_common_args: "/usr/bin/env -i CHROOT=1" + # -i "PATH" + # -i "http_proxy https_proxy socks_proxy no_proxy" + #? -l + # for a non-root login: ansible_ssh_extra_args: "--userspec=foo:adm" + vars: # linux_unix_group + # toxcore + BOX_NBD_DEV: nbd1 + BOX_NBD_MP: /mnt/gentoo + BOX_NBD_OVERLAY_NAME: "gentoo1" + BOX_NBD_FILES: "/i/data/Agile/tmp/Topics/GentooImgr" + BOX_NBD_PORTAGE_FILE: "{{AGI_NBD_FILES}}/portage-20231223.tar.xz" + BOX_NBD_STAGE3_FILE: "{{AGI_NBD_FILES}}/stage3-amd64-openrc-20231217T170203Z.tar.xz" + BOX_NBD_KERNEL_DIR: /usr/src/linux + BOX_NBD_BASE_PROFILE: openrc + BOX_NBD_BASE_DIR: "/a/tmp/GentooImgr" + BOX_NBD_BASE_QCOW: "{{BOX_NBD_BASE_DIR}}/gentoo.qcow2" + # BOX_NBD_OVERLAY_QCOW: "/o/var/lib/libvirt/images/gentoo1.qcow2" + BOX_NBD_BASE_PUBKEY: "/root/.ssh/id_rsa-ansible.pub" + + # libvirt overlay + BOX_NBD_OVERLAY_DIR: "/a/tmp/GentooImgr/create-vm" + BOX_NBD_OVERLAY_GB: "20" + BOX_NBD_OVERLAY_CPUS: 1 + BOX_NBD_OVERLAY_RAM: 2048 + BOX_NBD_OVERLAY_BR: virbr1 + BOX_NBD_OVERLAY_NETWORK: default + # plaintext + BOX_NBD_OVERLAY_PASS: "gentoo" + + + vars: + # These come from the inventory overridden for connection = local,chroot in base_proxy.yml + http_proxy: "" + https_proxy: "" + socks_proxy: "" + ftp_proxy: "" + no_proxy: "localhost,127.0.0.1" + SSL_CERT_FILE: "/usr/local/etc/ssl/cacert-testforge.pem" + RSYNC_PROXY: "" + + BOX_OS_FAMILY: "" + BOX_OS_NAME: "" + BOX_OS_FLAVOR: "" + BOX_DEFAULT_OUTPUT_IF: "" + BOX_ALSO_GROUP: "adm" + + # only common to local and vagrant because /mnt/j is remote mounted - need a linux_group + BOX_ROOT_PIP_CACHE: "/mnt/o/Cache/Pip" + BOX_BOXUSER_PIP_CACHE: "/mnt/o/Cache/Pip" + + HOST_MOUNT_SYMLINKS: [] + HOST_MOUNT_SYMLINK_CONTENTS: {} + + LXD_TRUST_PASSWORD: sekret + + BOX_HOST_CONTAINER_MOUNTS: + - /mnt/l + - /mnt/e + - /mnt/h + - /mnt/i + - /mnt/j + - /mnt/q + - /mnt/w + - /mnt/o + + BOX_DOS_SCAN_DIRS: + - /mnt/h + - /mnt/i + - /mnt/j + - /mnt/e + - /mnt/q + - /mnt/w + - /mnt/c + + # These will fluctuate with what's been started - it's safe to open them all + # FixMe: should these go on no_proxy systematically + PRIV_TOR_LOCAL_NETS: + - "192.168.56.0/24" + + BOX_ALSO_USERS: [] + BOX_PYTHON2_MINOR: "" + BOX_PYTHON3_MINOR: "3.11" + BOX_BASH_SHELL: /bin/bash + BOX_IPV6_DISABLE: 1 + BOX_EMACS_VERSION: 27 + + BOX_ROOT_USER: root + BOX_ROOT_GROUP: root + + BOX_BYPASS_PROXY_GROUP: tor + BOX_FIREWALL_ALLOW_TRANS: false + BOX_PROXY_JAVA_NET_PROPERTIES: /etc/java-config-2/current-system-vm/jre/lib/net.properties + + BOX_BASE_FEATURES: [] + BOX_LOGG_FEATURES: [] + BOX_KEYS_FEATURES: ['tpm2'] # truecrypt + BOX_HARDEN_FEATURES: ['bubblewrap', 'sysctl', 'jabber'] # 'clamscan', firejail + # libvirt means 'qemu' + BOX_HOSTVMS_FEATURES: [] + + BOX_MISP_FEATURES: [] # 'kitchen' + BOX_W3AF_FEATURES: [] # 'kitchen' + BOX_MISP_GPG_PASS: gpg_pass_to_change_fast + + BOX_timezone: UTC + BOX_hwclock_local: false + BOX_hwclock_systohc: true + BOX_hwclock_hctosys: false + + BOX_PROXY_MODE: "" + BOX_DNS_PROXY: dnsmasq + BOX_TIME_DAEMON: ntpd + BOX_NTP_GROUP: ntp + BOX_NET_MANAGER: "networkmanager" + BOX_HTTP_PROXY: privoxy + + # toxcore + BOX_NBD_DEV: "" + BOX_NBD_MP: "" + BOX_NBD_FILES: "" + BOX_NBD_LOGLEVEL: 20 + BOX_NBD_BASE_QCOW: "" + BOX_NBD_BASE_PUBKEY: "/root/.ssh/id_rsa-ansible.pub" + + # libvirt overlay + BOX_NBD_OVERLAY_DIR: "" + BOX_NBD_OVERLAY_BR: "" + BOX_NBD_OVERLAY_GB: "20" + BOX_NBD_OVERLAY_NAME: "gentoo1" + BOX_NBD_OVERLAY_CPUS: 1 + BOX_NBD_OVERLAY_RAM: 2048 + # plaintext + BOX_NBD_OVERLAY_PASS: "" + +# Controls what compression method is used for new-style ansible modules when +# they are sent to the remote system. The compression types depend on having +# support compiled into both the controller's python and the client's python. +# The names should match with the python Zipfile compression types: +# * ZIP_STORED (no compression. available everywhere) +# * ZIP_DEFLATED (uses zlib, the default) +# These values may be set per host via the ansible_module_compression inventory variable. +# + ansible_module_compression: "ZIP_STORED" + ansible_python_interpreter: "/usr/local/bin/python3.sh" + + BOX_ANSIBLE_VERSION: "2.9.22" + # Cannot communicate securely with peer: no common encryption algorithm(s). + # git.kernel.org/ sslversion = tlsv1.3 + BOX_TLS_VERSION: "1.3" + BOX_SSL_GIT_SSLVERSION: "1.3" + + # unused so far - needed by src/ansible_gentooimgr/gentooimgr/ + BOX_ARCHITECTURE: amd64 + BOX_SUBTYPE: -hardened + # https://distfiles.gentoo.org/releases/amd64/autobuilds/latest-stage3-amd64-hardened-openrc.txt + GENTOO_BASE_STAGE_OPENRC_TXT_URL: "https://distfiles.gentoo.org/releases/{{BOX_ARCHITECTURE}}/autobuilds/latest-stage3-{{BOX_ARCHITECTURE}}{{BOX_SUBTYPE}}-openrc.txt" + # plus .gpgsig and .md5sum + GENTOO_BASE_PORTAGE_URL: "https://distfiles.gentoo.org/snapshots/portage-latest.tar.xz" + BOX_GENTOO_DISTFILES_ARCHIVES: "/i/net/Http/distfiles.gentoo.org/distfiles" + #? Gentoo specific? + + # unused so far + # missing HOSTVMS_LXD_TRUST_PASSWORD base_passwords_database + # /mnt/o/data/TestForge/src/ansible/roles/hostvms/tasks/vms.yml + box_passwords_database: "{{ lookup('env', 'USER')}}/Passwords.kdbx" + + BOX_WHONIX_PROXY_HOST: "" + BOX_PROXY_FEATURES: [] + BOX_GPG_SERVER: "keys.gnupg.net" + BOX_USR_LIB: lib + # if you are on a Gentoo, then / else the mp of a Gentoo if you have one, else '' + BOX_GENTOO_FROM_MP: '' + + # bc + MOUNT_GENTOO_DISTFILES_ARCHIVES: "{{BOX_GENTOO_DISTFILES_ARCHIVES}}" + +# # These are inventory overridden for connection = chroot in base_proxy.yml +# http_proxy: "{{ lookup('env', 'http_proxy')|default('http://127.0.0.1:3128') }}" +# https_proxy: "{{ lookup('env', 'https_proxy')|default('http://10.0.2.15:9128') }}" +# socks_proxy: "{{ lookup('env', 'socks_proxy')|default('socks5://10.0.2.15:9050') }}" +# no_proxy: "{{ lookup('env', 'no_proxy')|default('10.0.2.15,127.0.0.1,localhost') }}" diff --git a/lib/plugins/libvirt_qemu.py b/lib/plugins/libvirt_qemu.py new file mode 100644 index 0000000..b933b04 --- /dev/null +++ b/lib/plugins/libvirt_qemu.py @@ -0,0 +1,361 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +import sys +import time + +__metaclass__ = type + +DOCUMENTATION = """ + author: Jesse Pretorius + connection: community.libvirt.libvirt_qemu + short_description: Run tasks on libvirt/qemu virtual machines + description: + - Run commands or put/fetch files to libvirt/qemu virtual machines using the qemu agent API. + notes: + - Currently DOES NOT work with selinux set to enforcing in the VM. + - Requires the qemu-agent installed in the VM. + - Requires access to the qemu-ga commands guest-exec, guest-exec-status, guest-file-close, guest-file-open, guest-file-read, guest-file-write. + version_added: "2.10" + options: + remote_addr: + description: Virtual machine name + default: inventory_hostname + vars: + - name: ansible_host + executable: + description: Shell to use for execution inside container + default: /bin/sh + vars: + - name: ansible_executable + virt_uri: + description: libvirt URI to connect to to access the virtual machine + default: qemu:///system + vars: + - name: ansible_libvirt_uri +""" + +import base64 +import json +import libvirt +import libvirt_qemu +import shlex +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.plugins.shell.powershell import _parse_clixml +from ansible.utils.display import Display +from ansible.plugins.callback.minimal import CallbackModule +from functools import partial +from os.path import exists, getsize + +display = Display() + +iMAX_WAIT = 10 # sec. + +REQUIRED_CAPABILITIES = [ + {'enabled': True, 'name': 'guest-exec', 'success-response': True}, + {'enabled': True, 'name': 'guest-exec-status', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-close', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-open', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-read', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-write', 'success-response': True} +] + + +class Connection(ConnectionBase): + ''' Local libvirt qemu based connections ''' + + transport = 'community.libvirt.libvirt_qemu' + # TODO(odyssey4me): + # Figure out why pipelining does not work and fix it + has_pipelining = False + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._host = self._play_context.remote_addr + + # Windows operates differently from a POSIX connection/shell plugin, + # we need to set various properties to ensure SSH on Windows continues + # to work + if getattr(self._shell, "_IS_WINDOWS", False): + self.has_native_async = True + self.always_pipeline_modules = True + self.module_implementation_preferences = ('.ps1', '.exe', '') + self.allow_executable = False + + def _connect(self): + ''' connect to the virtual machine; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + + self._virt_uri = self.get_option('virt_uri') + + self._display.vvv(u"CONNECT TO {0}".format(self._virt_uri), host=self._host) + try: + self.conn = libvirt.open(self._virt_uri) + except libvirt.libvirtError as err: + self._display.vv(u"ERROR: libvirtError CONNECT TO {0}\n{1}".format(self._virt_uri, to_native(err)), host=self._host) + self._connected = False + raise AnsibleConnectionFailure(to_native(err)) + + self._display.vvv(u"FIND DOMAIN {0}".format(self._host), host=self._host) + try: + self.domain = self.conn.lookupByName(self._host) + except libvirt.libvirtError as err: + raise AnsibleConnectionFailure(to_native(err)) + + request_cap = json.dumps({'execute': 'guest-info'}) + response_cap = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_cap, 5, 0)) + self.capabilities = response_cap['return']['supported_commands'] + self._display.vvvvv(u"GUEST CAPABILITIES: {0}".format(self.capabilities), host=self._host) + missing_caps = [] + for cap in REQUIRED_CAPABILITIES: + if cap not in self.capabilities: + missing_caps.append(cap['name']) + if len(missing_caps) > 0: + self._display.vvv(u"REQUIRED CAPABILITIES MISSING: {0}".format(missing_caps), host=self._host) + raise AnsibleConnectionFailure('Domain does not have required capabilities') + + display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._host) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the virtual machine host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + + cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict')) + + if getattr(self._shell, "_IS_WINDOWS", False): + # Become method 'runas' is done in the wrapper that is executed, + # need to disable sudoable so the bare_run is not waiting for a + # prompt that will not occur + sudoable = False + + # Generate powershell commands + cmd_args_list = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) + + # TODO(odyssey4me): + # Implement buffering much like the other connection plugins + # Implement 'env' for the environment settings + # Implement 'input-data' for whatever it might be useful for + request_exec = { + 'execute': 'guest-exec', + 'arguments': { + 'path': cmd_args_list[0], + 'capture-output': True, + 'arg': cmd_args_list[1:] + } + } + request_exec_json = json.dumps(request_exec) + + display.vvv("GA send: {0}".format(request_exec_json), host=self._host) +# sys.stderr.write("GA send: {0}\n".format(request_exec_json)) + command_start = time.clock_gettime(time.CLOCK_MONOTONIC) + # TODO(odyssey4me): + # Add timeout parameter + try: + result_exec = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_exec_json, 5, 0)) + except libvirt.libvirtError as err: + self._display.vv(u"ERROR: libvirtError EXEC TO {0}\n{1}".format(self._virt_uri, to_native(err)), host=self._host) + sys.stderr.write(u"ERROR: libvirtError EXEC TO {0}\n{1}\n".format(self._virt_uri, to_native(err))) + self._connected = False + raise AnsibleConnectionFailure(to_native(err)) + + display.vvv(u"GA return: {0}".format(result_exec), host=self._host) + + request_status = { + 'execute': 'guest-exec-status', + 'arguments': { + 'pid': result_exec['return']['pid'] + } + } + request_status_json = json.dumps(request_status) + + display.vvv(u"GA send: {0}".format(request_status_json), host=self._host) + + # TODO(odyssey4me): + # Work out a better way to wait until the command has exited + max_time = iMAX_WAIT + time.clock_gettime(time.CLOCK_MONOTONIC) + result_status = { + 'return': dict(exited=False), + } + while not result_status['return']['exited']: + # Wait for 5% of the time already elapsed + sleep_time = (time.clock_gettime(time.CLOCK_MONOTONIC) - command_start) * (5 / 100) + if sleep_time < 0.0002: + sleep_time = 0.0002 + elif sleep_time > 1: + sleep_time = 1 + time.sleep(sleep_time) + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + if time.clock_gettime(time.CLOCK_MONOTONIC) > max_time: + err = 'timeout' + self._display.vv(u"ERROR: libvirtError EXEC TO {0}\n{1}".format(self._virt_uri, to_native(err)), host=self._host) + sys.stderr.write(u"ERROR: libvirtError EXEC TO {0}\n{1}\n".format(self._virt_uri, to_native(err))) + self._connected = False + raise AnsibleConnectionFailure(to_native(err)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + while not result_status['return']['exited']: + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + if result_status['return'].get('out-data'): + stdout = base64.b64decode(result_status['return']['out-data']) + else: + stdout = b'' + + if result_status['return'].get('err-data'): + stderr = base64.b64decode(result_status['return']['err-data']) + else: + stderr = b'' + + # Decode xml from windows + if getattr(self._shell, "_IS_WINDOWS", False) and stdout.startswith(b"#< CLIXML"): + stdout = _parse_clixml(stdout) + + display.vvv(u"GA stdout: {0}".format(to_text(stdout)), host=self._host) + display.vvv(u"GA stderr: {0}".format(to_text(stderr)), host=self._host) + + return result_status['return']['exitcode'], stdout, stderr + + def put_file(self, in_path, out_path): + ''' transfer a file from local to domain ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._host) + + if not exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % in_path) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': out_path, + 'mode': 'wb+' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + # TODO(odyssey4me): + # Handle exception for file/path IOError + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + for chunk in iter(partial(in_file.read, BUFSIZE), b''): + try: + request_write = { + 'execute': 'guest-file-write', + 'arguments': { + 'handle': result_handle['return'], + 'buf-b64': base64.b64encode(chunk).decode() + } + } + request_write_json = json.dumps(request_write) + + display.vvvvv(u"GA send: {0}".format(request_write_json), host=self._host) + + result_write = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_write_json, 5, 0)) + + display.vvvvv(u"GA return: {0}".format(result_write), host=self._host) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from domain to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._host) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': in_path, + 'mode': 'r' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + request_read = { + 'execute': 'guest-file-read', + 'arguments': { + 'handle': result_handle['return'], + 'count': BUFSIZE + } + } + request_read_json = json.dumps(request_read) + + display.vvv(u"GA send: {0}".format(request_read_json), host=self._host) + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + while not result_read['return']['eof']: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/lib/plugins/libvirt_qemu.py.community b/lib/plugins/libvirt_qemu.py.community new file mode 100644 index 0000000..7291f09 --- /dev/null +++ b/lib/plugins/libvirt_qemu.py.community @@ -0,0 +1,364 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +author: + - Jesse Pretorius (@odyssey4me) +name: libvirt_qemu +short_description: Run tasks on libvirt/qemu virtual machines +description: + - Run commands or put/fetch files to libvirt/qemu virtual machines using the qemu agent API. +notes: + - Currently DOES NOT work with selinux set to enforcing in the VM. + - Requires the qemu-agent installed in the VM. + - Requires access to the qemu-ga commands guest-exec, guest-exec-status, guest-file-close, guest-file-open, guest-file-read, guest-file-write. +extends_documentation_fragment: +version_added: "2.10.0" +options: + remote_addr: + description: Virtual machine name. + default: inventory_hostname + vars: + - name: ansible_host + - name: inventory_hostname + executable: + description: + - Shell to use for execution inside container. + - Set this to 'cmd' or 'powershell' for Windows VMs. + default: /bin/sh + vars: + - name: ansible_shell_type + virt_uri: + description: Libvirt URI to connect to to access the virtual machine. + default: qemu:///system + vars: + - name: ansible_libvirt_uri +""" + +import base64 +import json +import shlex +import time +import traceback + +try: + import libvirt + import libvirt_qemu +except ImportError as imp_exc: + LIBVIRT_IMPORT_ERROR = imp_exc +else: + LIBVIRT_IMPORT_ERROR = None + +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.six import raise_from +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.plugins.shell.powershell import _parse_clixml +from ansible.utils.display import Display +from functools import partial +from os.path import exists + +display = Display() + + +REQUIRED_CAPABILITIES = [ + {'enabled': True, 'name': 'guest-exec', 'success-response': True}, + {'enabled': True, 'name': 'guest-exec-status', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-close', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-open', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-read', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-write', 'success-response': True} +] + + +class Connection(ConnectionBase): + ''' Local libvirt qemu based connections ''' + + transport = 'community.libvirt.libvirt_qemu' + # TODO(odyssey4me): + # Figure out why pipelining does not work and fix it + has_pipelining = False + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + if LIBVIRT_IMPORT_ERROR: + raise_from( + AnsibleError('libvirt python bindings must be installed to use this plugin'), + LIBVIRT_IMPORT_ERROR) + + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._host = self._play_context.remote_addr + + # Windows operates differently from a POSIX connection/shell plugin, + # we need to set various properties to ensure SSH on Windows continues + # to work + # Ensure that any Windows hosts in your inventory have one of the + # following set, in order to trigger this code: + # ansible_shell_type: cmd + # ansible_shell_type: powershell + if getattr(self._shell, "_IS_WINDOWS", False): + self.has_native_async = True + self.always_pipeline_modules = True + self.module_implementation_preferences = ('.ps1', '.exe', '') + self.allow_executable = False + + def _connect(self): + ''' connect to the virtual machine; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + + self._virt_uri = self.get_option('virt_uri') + + self._display.vvv(u"CONNECT TO {0}".format(self._virt_uri), host=self._host) + try: + self.conn = libvirt.open(self._virt_uri) + except libvirt.libvirtError as err: + raise AnsibleConnectionFailure(to_native(err)) + + self._display.vvv(u"FIND DOMAIN {0}".format(self._host), host=self._host) + try: + self.domain = self.conn.lookupByName(self._host) + except libvirt.libvirtError as err: + raise AnsibleConnectionFailure(to_native(err)) + + request_cap = json.dumps({'execute': 'guest-info'}) + response_cap = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_cap, 5, 0)) + self.capabilities = response_cap['return']['supported_commands'] + self._display.vvvvv(u"GUEST CAPABILITIES: {0}".format(self.capabilities), host=self._host) + missing_caps = [] + for cap in REQUIRED_CAPABILITIES: + if cap not in self.capabilities: + missing_caps.append(cap['name']) + if len(missing_caps) > 0: + self._display.vvv(u"REQUIRED CAPABILITIES MISSING: {0}".format(missing_caps), host=self._host) + raise AnsibleConnectionFailure('Domain does not have required capabilities') + + display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._host) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the virtual machine host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + + cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict')) + + if getattr(self._shell, "_IS_WINDOWS", False): + # Become method 'runas' is done in the wrapper that is executed, + # need to disable sudoable so the bare_run is not waiting for a + # prompt that will not occur + sudoable = False + + # Make sure our first command is to set the console encoding to + # utf-8, this must be done via chcp to get utf-8 (65001) + cmd = ' '.join(["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND, cmd]) + + # Generate powershell commands + cmd_args_list = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) + + # TODO(odyssey4me): + # Implement buffering much like the other connection plugins + # Implement 'env' for the environment settings + # Implement 'input-data' for whatever it might be useful for + request_exec = { + 'execute': 'guest-exec', + 'arguments': { + 'path': cmd_args_list[0], + 'capture-output': True, + 'arg': cmd_args_list[1:] + } + } + request_exec_json = json.dumps(request_exec) + + display.vvv(u"GA send: {0}".format(request_exec_json), host=self._host) + + # TODO(odyssey4me): + # Add timeout parameter + result_exec = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_exec_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_exec), host=self._host) + + command_start = time.clock_gettime(time.CLOCK_MONOTONIC) + + request_status = { + 'execute': 'guest-exec-status', + 'arguments': { + 'pid': result_exec['return']['pid'] + } + } + request_status_json = json.dumps(request_status) + + display.vvv(u"GA send: {0}".format(request_status_json), host=self._host) + + # TODO(odyssey4me): + # Work out a better way to wait until the command has exited + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + while not result_status['return']['exited']: + # Wait for 5% of the time already elapsed + sleep_time = (time.clock_gettime(time.CLOCK_MONOTONIC) - command_start) * (5 / 100) + if sleep_time < 0.0002: + sleep_time = 0.0002 + elif sleep_time > 1: + sleep_time = 1 + time.sleep(sleep_time) + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + if result_status['return'].get('out-data'): + stdout = base64.b64decode(result_status['return']['out-data']) + else: + stdout = b'' + + if result_status['return'].get('err-data'): + stderr = base64.b64decode(result_status['return']['err-data']) + else: + stderr = b'' + + # Decode xml from windows + if getattr(self._shell, "_IS_WINDOWS", False) and stdout.startswith(b"#< CLIXML"): + stdout = _parse_clixml(stdout) + + display.vvv(u"GA stdout: {0}".format(to_text(stdout)), host=self._host) + display.vvv(u"GA stderr: {0}".format(to_text(stderr)), host=self._host) + + return result_status['return']['exitcode'], stdout, stderr + + def put_file(self, in_path, out_path): + ''' transfer a file from local to domain ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._host) + + if not exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % in_path) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': out_path, + 'mode': 'wb+' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + # TODO(odyssey4me): + # Handle exception for file/path IOError + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + for chunk in iter(partial(in_file.read, BUFSIZE), b''): + try: + request_write = { + 'execute': 'guest-file-write', + 'arguments': { + 'handle': result_handle['return'], + 'buf-b64': base64.b64encode(chunk).decode() + } + } + request_write_json = json.dumps(request_write) + + display.vvvvv(u"GA send: {0}".format(request_write_json), host=self._host) + + result_write = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_write_json, 5, 0)) + + display.vvvvv(u"GA return: {0}".format(result_write), host=self._host) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from domain to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._host) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': in_path, + 'mode': 'r' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + request_read = { + 'execute': 'guest-file-read', + 'arguments': { + 'handle': result_handle['return'], + 'count': BUFSIZE + } + } + request_read_json = json.dumps(request_read) + + display.vvv(u"GA send: {0}".format(request_read_json), host=self._host) + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + while not result_read['return']['eof']: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/lib/plugins/libvirt_qemu.py.dst b/lib/plugins/libvirt_qemu.py.dst new file mode 100644 index 0000000..637514d --- /dev/null +++ b/lib/plugins/libvirt_qemu.py.dst @@ -0,0 +1,329 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + author: Jesse Pretorius + connection: community.libvirt.libvirt_qemu + short_description: Run tasks on libvirt/qemu virtual machines + description: + - Run commands or put/fetch files to libvirt/qemu virtual machines using the qemu agent API. + notes: + - Currently DOES NOT work with selinux set to enforcing in the VM. + - Requires the qemu-agent installed in the VM. + - Requires access to the qemu-ga commands guest-exec, guest-exec-status, guest-file-close, guest-file-open, guest-file-read, guest-file-write. + version_added: "2.10" + options: + remote_addr: + description: Virtual machine name + default: inventory_hostname + vars: + - name: ansible_host + executable: + description: Shell to use for execution inside container + default: /bin/sh + vars: + - name: ansible_executable + virt_uri: + description: libvirt URI to connect to to access the virtual machine + default: qemu:///system + vars: + - name: ansible_libvirt_uri +""" + +import base64 +import json +import libvirt +import libvirt_qemu +import shlex +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.plugins.connection import ConnectionBase, BUFSIZE +from ansible.plugins.shell.powershell import _parse_clixml +from ansible.utils.display import Display +from functools import partial +from os.path import exists, getsize + +display = Display() + + +REQUIRED_CAPABILITIES = [ + {'enabled': True, 'name': 'guest-exec', 'success-response': True}, + {'enabled': True, 'name': 'guest-exec-status', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-close', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-open', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-read', 'success-response': True}, + {'enabled': True, 'name': 'guest-file-write', 'success-response': True} +] + + +class Connection(ConnectionBase): + ''' Local libvirt qemu based connections ''' + + transport = 'community.libvirt.libvirt_qemu' + # TODO(odyssey4me): + # Figure out why pipelining does not work and fix it + has_pipelining = False + has_tty = False + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._host = self._play_context.remote_addr + + # Windows operates differently from a POSIX connection/shell plugin, + # we need to set various properties to ensure SSH on Windows continues + # to work + if getattr(self._shell, "_IS_WINDOWS", False): + self.has_native_async = True + self.always_pipeline_modules = True + self.module_implementation_preferences = ('.ps1', '.exe', '') + self.allow_executable = False + + def _connect(self): + ''' connect to the virtual machine; nothing to do here ''' + super(Connection, self)._connect() + if not self._connected: + + self._virt_uri = self.get_option('virt_uri') + + self._display.vvv(u"CONNECT TO {0}".format(self._virt_uri), host=self._host) + try: + self.conn = libvirt.open(self._virt_uri) + except libvirt.libvirtError as err: + raise AnsibleConnectionFailure(to_native(err)) + + self._display.vvv(u"FIND DOMAIN {0}".format(self._host), host=self._host) + try: + self.domain = self.conn.lookupByName(self._host) + except libvirt.libvirtError as err: + raise AnsibleConnectionFailure(to_native(err)) + + request_cap = json.dumps({'execute': 'guest-info'}) + response_cap = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_cap, 5, 0)) + self.capabilities = response_cap['return']['supported_commands'] + self._display.vvvvv(u"GUEST CAPABILITIES: {0}".format(self.capabilities), host=self._host) + missing_caps = [] + for cap in REQUIRED_CAPABILITIES: + if cap not in self.capabilities: + missing_caps.append(cap['name']) + if len(missing_caps) > 0: + self._display.vvv(u"REQUIRED CAPABILITIES MISSING: {0}".format(missing_caps), host=self._host) + raise AnsibleConnectionFailure('Domain does not have required capabilities') + + display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._host) + self._connected = True + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the virtual machine host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(u"EXEC {0}".format(cmd), host=self._host) + + cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict')) + + if getattr(self._shell, "_IS_WINDOWS", False): + # Become method 'runas' is done in the wrapper that is executed, + # need to disable sudoable so the bare_run is not waiting for a + # prompt that will not occur + sudoable = False + + # Generate powershell commands + cmd_args_list = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) + + # TODO(odyssey4me): + # Implement buffering much like the other connection plugins + # Implement 'env' for the environment settings + # Implement 'input-data' for whatever it might be useful for + request_exec = { + 'execute': 'guest-exec', + 'arguments': { + 'path': cmd_args_list[0], + 'capture-output': True, + 'arg': cmd_args_list[1:] + } + } + request_exec_json = json.dumps(request_exec) + + display.vvv(u"GA send: {0}".format(request_exec_json), host=self._host) + + # TODO(odyssey4me): + # Add timeout parameter + result_exec = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_exec_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_exec), host=self._host) + + request_status = { + 'execute': 'guest-exec-status', + 'arguments': { + 'pid': result_exec['return']['pid'] + } + } + request_status_json = json.dumps(request_status) + + display.vvv(u"GA send: {0}".format(request_status_json), host=self._host) + + # TODO(odyssey4me): + # Work out a better way to wait until the command has exited + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + while not result_status['return']['exited']: + result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_status), host=self._host) + + if result_status['return'].get('out-data'): + stdout = base64.b64decode(result_status['return']['out-data']) + else: + stdout = b'' + + if result_status['return'].get('err-data'): + stderr = base64.b64decode(result_status['return']['err-data']) + else: + stderr = b'' + + # Decode xml from windows + if getattr(self._shell, "_IS_WINDOWS", False) and stdout.startswith(b"#< CLIXML"): + stdout = _parse_clixml(stdout) + + display.vvv(u"GA stdout: {0}".format(to_text(stdout)), host=self._host) + display.vvv(u"GA stderr: {0}".format(to_text(stderr)), host=self._host) + + return result_status['return']['exitcode'], stdout, stderr + + def put_file(self, in_path, out_path): + ''' transfer a file from local to domain ''' + super(Connection, self).put_file(in_path, out_path) + display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._host) + + if not exists(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound( + "file or module does not exist: %s" % in_path) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': out_path, + 'mode': 'wb+' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + # TODO(odyssey4me): + # Handle exception for file/path IOError + with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: + for chunk in iter(partial(in_file.read, BUFSIZE), b''): + try: + request_write = { + 'execute': 'guest-file-write', + 'arguments': { + 'handle': result_handle['return'], + 'buf-b64': base64.b64encode(chunk).decode() + } + } + request_write_json = json.dumps(request_write) + + display.vvvvv(u"GA send: {0}".format(request_write_json), host=self._host) + + result_write = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_write_json, 5, 0)) + + display.vvvvv(u"GA return: {0}".format(result_write), host=self._host) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from domain to local ''' + super(Connection, self).fetch_file(in_path, out_path) + display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._host) + + request_handle = { + 'execute': 'guest-file-open', + 'arguments': { + 'path': in_path, + 'mode': 'r' + } + } + request_handle_json = json.dumps(request_handle) + + display.vvv(u"GA send: {0}".format(request_handle_json), host=self._host) + + result_handle = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_handle_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_handle), host=self._host) + + request_read = { + 'execute': 'guest-file-read', + 'arguments': { + 'handle': result_handle['return'], + 'count': BUFSIZE + } + } + request_read_json = json.dumps(request_read) + + display.vvv(u"GA send: {0}".format(request_read_json), host=self._host) + + with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file: + try: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + while not result_read['return']['eof']: + result_read = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_read_json, 5, 0)) + display.vvvvv(u"GA return: {0}".format(result_read), host=self._host) + out_file.write(base64.b64decode(result_read['return']['buf-b64'])) + + except Exception: + traceback.print_exc() + raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + + request_close = { + 'execute': 'guest-file-close', + 'arguments': { + 'handle': result_handle['return'] + } + } + request_close_json = json.dumps(request_close) + + display.vvv(u"GA send: {0}".format(request_close_json), host=self._host) + + result_close = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_close_json, 5, 0)) + + display.vvv(u"GA return: {0}".format(result_close), host=self._host) + + def close(self): + ''' terminate the connection; nothing to do here ''' + super(Connection, self).close() + self._connected = False diff --git a/library/ansible-keepassxc b/library/ansible-keepassxc new file mode 120000 index 0000000..86d251e --- /dev/null +++ b/library/ansible-keepassxc @@ -0,0 +1 @@ +../src/ansible_gentooimgr/library/ansible-keepassxc.py \ No newline at end of file diff --git a/library/ansible_gentooimgr b/library/ansible_gentooimgr new file mode 120000 index 0000000..2a4820b --- /dev/null +++ b/library/ansible_gentooimgr @@ -0,0 +1 @@ +../src/ansible_gentooimgr/library/ansible_gentooimgr.py \ No newline at end of file diff --git a/roles/ansible-gentoo_install/defaults/main.yml b/roles/ansible-gentoo_install/defaults/main.yml index fd9d091..e2a6495 100644 --- a/roles/ansible-gentoo_install/defaults/main.yml +++ b/roles/ansible-gentoo_install/defaults/main.yml @@ -5,7 +5,7 @@ AGI_NBD_DISK: "/dev/{{AGI_NBD_DEV}}" AGI_install_disk: "{{AGI_NBD_DISK}}" AGI_NBD_PART: "{{AGI_NBD_DEV}}p1" AGI_NBD_MP: "{{BOX_NBD_MP|default('/mnt/gentoo')}}" -AGI_NBD_FILES: "{{BOX_NBD_FILES|default('/g/Agile/tmp/Topics/GentooImgr')}}" +AGI_NBD_FILES: "{{BOX_NBD_FILES}}" AGI_GENTOO_FROM_MP: "{{BOX_GENTOO_FROM_MP}}" AGI_PROXY_MODE: "{{PROXY_MODE|default('')}}" diff --git a/roles/ansible-gentoo_install/tasks/daemons.yml b/roles/ansible-gentoo_install/tasks/daemons.yml index 39eceda..5233ed5 100644 --- a/roles/ansible-gentoo_install/tasks/daemons.yml +++ b/roles/ansible-gentoo_install/tasks/daemons.yml @@ -13,7 +13,7 @@ shell: | MODE={{AGI_PROXY_MODE|default('')}} . /usr/local/bin/proxy_export.bash - emerge -v {{AGI_bootstrap_pkgs}} + box_gentoo_emerge.bash -v {{AGI_bootstrap_pkgs}} when: "'AGI_bootstrap_pkgs' != []" - name: start syslog daemon at boot diff --git a/roles/ansible-gentoo_install/tasks/local.yml b/roles/ansible-gentoo_install/tasks/local.yml index f0d3156..1b774c0 100644 --- a/roles/ansible-gentoo_install/tasks/local.yml +++ b/roles/ansible-gentoo_install/tasks/local.yml @@ -14,7 +14,7 @@ - set_fact: AGI_use_local_kernel: true when: - - ansible_distribution == 'Gentoo' or BOX_GENTOO_FROM_MP != '' + - ansible_distribution == 'Gentoo' or BOX_GENTOO_FROM_MP not in ['/', ''] - set_fact: AGI_PROXY_MODE: "{{PROXY_MODE|default('')}}" @@ -32,16 +32,28 @@ - name: check for mounted disk shell: | - grep '/dev/{{AGI_NBD_DEV}}' /proc/mounts + grep '/dev/{{AGI_NBD_DEV}}' /proc/mounts && exit 0 + ps ax | grep -v grep | \ + grep "qemu-nbd.*/dev/nbd.*{{BOX_NBD_BASE_QCOW}}" && \ + echo WARN looks like theres an active nbd mount of \ + "${BOX_NBD_BASE_QCOW}" && exit 1 + exit 2 failed_when: false changed_when: false register: check_mounted_disk check_mode: no - name: partition if disk not mounted + fail: + msg: "looks like theres an active nbd mount of {{BOX_NBD_BASE_QCOW}}" + when: + - check_mounted_disk.rc == 1 + check_mode: no + + - name: partition if disk not mounted or active include: disk.yml when: - - check_mounted_disk.rc != 0 + - check_mounted_disk.rc > 1 check_mode: no - name: mount root partition diff --git a/roles/ansible-gentoo_install/tasks/main.yml b/roles/ansible-gentoo_install/tasks/main.yml index 6a9e929..8be474d 100644 --- a/roles/ansible-gentoo_install/tasks/main.yml +++ b/roles/ansible-gentoo_install/tasks/main.yml @@ -2,16 +2,23 @@ --- - name: "DEBUG: ansible-gentoo_install nbd_disk ansible_connection" debug: - verbosity: 1 - msg: "DEBUG: ansible-gentoo_install nbd_disk={{ nbd_disk }} ansible_connection={{ ansible_connection }}" + verbosity: 0 + msg: "DEBUG: ansible-gentoo_install nbd_disk={{ nbd_disk }} AGI_NBD_DISK={{AGI_NBD_DISK}} ansible_connection={{ ansible_connection }}" check_mode: false - set_fact: AGI_target: Gentoo2 +- name: "ansible-gentoo_install" + set_fact: + ansible_check_mode: false + when: + - "'ansible-gentoo_install' in ROLES" + - ansible_check_mode is true + - name: look for nbd partitions shell: | - grep nbd /proc/partitions | head -1 + grep nbd /proc/partitions | head -1|sed -e 's/.* //' register: nbd_out failed_when: false check_mode: false @@ -28,45 +35,52 @@ shell: | echo nbd_disk={{ nbd_disk }} ansible_connection={{ ansible_connection }} echo ansible_distribution={{ansible_distribution}} BOX_GENTOO_FROM_MP={{BOX_GENTOO_FROM_MP}} - [ -d '/mnt/o/var/local/src/play_tox/src/ansible_gentooimgr' ] || exit 1 - [ -f '/mnt/o/var/local/src/play_tox/src/ansible_gentooimgr/__init__.py' ] || exit 2 - [ -d '/mnt/o/var/local/src/play_tox/src/ansible_gentooimgr/gentooimgr' ] || exit 3 - [ -f '/mnt/o/var/local/src/play_tox/src/ansible_gentooimgr/gentooimgr/__init__.py' ] || exit 4 + [ -d '{{PLAY_ANSIBLE_SRC}}/src/ansible_gentooimgr' ] || exit 1 + [ -f '{{PLAY_ANSIBLE_SRC}}/src/ansible_gentooimgr/__init__.py' ] || exit 2 + [ -d '{{PLAY_ANSIBLE_SRC}}/src/ansible_gentooimgr/gentooimgr' ] || exit 3 + [ -f '{{PLAY_ANSIBLE_SRC}}/src/ansible_gentooimgr/gentooimgr/__init__.py' ] || exit 4 {{ansible_python_interpreter}} \ - -c "import sys; sys.path.append('/mnt/o/var/local/src/play_tox/src/ansible_gentooimgr'); import gentooimgr; print(gentooimgr.__file__)" + -c "import os sys; sys.path.append('{{PLAY_ANSIBLE_SRC}}/src/ansible_gentooimgr'); import gentooimgr; print(os.path.dirname(gentooimgr.__file__))" register: gentooimgr_out check_mode: false ignore_errors: true - block: - - set_fact: + - name: set AGI_gentooimgr_configs + set_fact: AGI_gentooimgr_configs: "{{gentooimgr_out.stdout}}/configs" - name: ansible_gentooimgr nbd status ansible_gentooimgr: action: status - loglevel: 10 + loglevel: "{{BOX_NBD_LOGLEVEL}}" threads: 1 - config: cloud.config - profile: openrc - kernel_dir: /usr/src/linux - portage: '{{AGI_NBD_FILES}}/portage-20231223.tar.xz' - stage3: '{{AGI_NBD_FILES}}/stage3-amd64-openrc-20231217T170203Z.tar.xz' - temporary_dir: "{{AGI_NBD_FILES}}" + config: base.config + profile: "{{BOX_NBD_BASE_PROFILE}}" + kernel_dir: "{{BOX_NBD_KERNEL_DIR}}" + portage: '{{BOX_NBD_PORTAGE_FILE}}' + stage3: '{{BOX_NBD_STAGE3_FILE }}' + temporary_dir: "{{BOX_NBD_BASE_DIR}}" download_dir: "{{AGI_NBD_FILES}}" - + register: ansible_gentooimgr_out ignore_errors: true check_mode: false + - name: "DEBUG: ansible-gentoo_install nbd_disk ansible_connection" + debug: + verbosity: 0 + var: ansible_gentooimgr_out + check_mode: false when: - ansible_connection in ['chroot', 'local', 'libvirt_qemu'] - - ansible_distribution == 'Gentoo' or BOX_GENTOO_FROM_MP != '' + - ansible_distribution == 'Gentoo' or BOX_GENTOO_FROM_MP not in ['/', ''] # - nbd_disk|default('') == AGI_NBD_DISK -- include_tasks: local.yml +- name: include_tasks local.yml + include_tasks: local.yml when: - ansible_connection in ['chroot', 'local'] - - ansible_distribution == 'Gentoo' or BOX_GENTOO_FROM_MP != '' - - nbd_disk|default('') == AGI_NBD_DISK + - ansible_distribution == 'Gentoo' or BOX_GENTOO_FROM_MP not in ['/', ''] +# - nbd_disk|default('') == AGI_NBD_DISK diff --git a/roles/ansible-gentoo_install/tasks/tarball.yml b/roles/ansible-gentoo_install/tasks/tarball.yml index f6eb388..dfc1956 100644 --- a/roles/ansible-gentoo_install/tasks/tarball.yml +++ b/roles/ansible-gentoo_install/tasks/tarball.yml @@ -131,8 +131,8 @@ - name: test gpg GENTOO shell: | # E1D6ABB63BFCFB4BA02FDF1CEC590EEAC9189250 - [ -f /usr/share/openpgp-keys/gentoo-release.asc ] || \ - emerge -v sec-keys/openpgp-keys-gentoo-release || exit 1 + [ -f /usr/share/openpgp-keys/gentoo-release.asc ] || \ + box_gentoo_emerge.bash sec-keys/openpgp-keys-gentoo-release || exit 1 gpg --list-keys | grep E1D6ABB63BFCFB4BA02FDF1CEC590EEAC9189250 || \ gpg --import /usr/share/openpgp-keys/gentoo-release.asc || exit 2 when: